diff --git a/.gitattributes b/.gitattributes
index 212fac85c84290fcb19246f7cf1237ad2d5bbf48..55c1f06aa87a8bd1f3618d9107ce30640716934d 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -1162,3 +1162,4 @@ vlmpy310/lib/python3.10/site-packages/tokenizers/tokenizers.abi3.so filter=lfs d
vlmpy310/lib/python3.10/site-packages/av.libs/libavformat-071c54bd.so.61.7.100 filter=lfs diff=lfs merge=lfs -text
vlmpy310/lib/python3.10/site-packages/transformers/models/oneformer/__pycache__/modeling_oneformer.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
vlmpy310/lib/python3.10/site-packages/transformers/models/seamless_m4t/__pycache__/modeling_seamless_m4t.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
+llava_next/lib/python3.10/site-packages/rich/__pycache__/_emoji_codes.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
diff --git a/llava_next/lib/python3.10/site-packages/rich/__pycache__/_emoji_codes.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/rich/__pycache__/_emoji_codes.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fc73ab124f00b00580b3f58b5d6a2d91724c4a10
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/rich/__pycache__/_emoji_codes.cpython-310.pyc
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:16bd89a08208feb8b84a13c0b42807214b18e854b07ae0350b47170185b22d87
+size 360026
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/bloom/__pycache__/configuration_bloom.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/bloom/__pycache__/configuration_bloom.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8b3f08a153c3766a4c11b3ddee7cf1d602b8a696
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/bloom/__pycache__/configuration_bloom.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/bloom/__pycache__/modeling_bloom.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/bloom/__pycache__/modeling_bloom.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9c8b8a2f988b7a2647a2873d4ee23b541632aa2c
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/bloom/__pycache__/modeling_bloom.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/bloom/__pycache__/modeling_flax_bloom.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/bloom/__pycache__/modeling_flax_bloom.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6cfdc5fc92238f6c305a347d22e11e4e666500b8
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/bloom/__pycache__/modeling_flax_bloom.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/bloom/__pycache__/tokenization_bloom_fast.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/bloom/__pycache__/tokenization_bloom_fast.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..793603dd768abc1136cec2b39463e3c4e2da2fbc
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/bloom/__pycache__/tokenization_bloom_fast.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/bloom/configuration_bloom.py b/llava_next/lib/python3.10/site-packages/transformers/models/bloom/configuration_bloom.py
new file mode 100644
index 0000000000000000000000000000000000000000..17395625e0177e640fa7ab48aab7756e8aa66d54
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/transformers/models/bloom/configuration_bloom.py
@@ -0,0 +1,242 @@
+# coding=utf-8
+# Copyright 2022 the Big Science Workshop and HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Bloom configuration"""
+from collections import OrderedDict
+from typing import TYPE_CHECKING, Any, List, Mapping, Optional
+
+from packaging import version
+
+
+if TYPE_CHECKING:
+ from ... import PreTrainedTokenizer, TensorType
+
+from ...configuration_utils import PretrainedConfig
+from ...onnx import OnnxConfigWithPast, PatchingSpec
+from ...utils import is_torch_available, logging
+
+
+logger = logging.get_logger(__name__)
+
+BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP = {
+ "bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json",
+ "bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json",
+ "bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json",
+ "bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json",
+ "bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json",
+ "bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json",
+}
+
+
+class BloomConfig(PretrainedConfig):
+ """
+ This is the configuration class to store the configuration of a [`BloomModel`]. It is used to instantiate a Bloom
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
+ defaults will yield a similar configuration to the Bloom architecture
+ [bigscience/bloom](https://huggingface.co/bigscience/bloom).
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 250880):
+ Vocabulary size of the Bloom model. Defines the maximum number of different tokens that can be represented
+ by the `inputs_ids` passed when calling [`BloomModel`]. Check [this
+ discussion](https://huggingface.co/bigscience/bloom/discussions/120#633d28389addb8530b406c2a) on how the
+ `vocab_size` has been defined.
+ hidden_size (`int`, *optional*, defaults to 64):
+ Dimensionality of the embeddings and hidden states.
+ n_layer (`int`, *optional*, defaults to 2):
+ Number of hidden layers in the Transformer encoder.
+ n_head (`int`, *optional*, defaults to 8):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ layer_norm_epsilon (`float`, *optional*, defaults to 1e-5):
+ The epsilon to use in the layer normalization layers.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ apply_residual_connection_post_layernorm (`bool`, *optional*, defaults to `False`):
+ If enabled, use the layer norm of the hidden states as the residual in the transformer blocks
+ hidden_dropout (`float`, *optional*, defaults to 0.1):
+ Dropout rate of the dropout function on the bias dropout.
+ attention_dropout (`float`, *optional*, defaults to 0.1):
+ Dropout rate applied to the attention probs
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models).
+ pretraining_tp (`int`, *optional*, defaults to `1`):
+ Experimental feature. Tensor parallelism rank used during pretraining with Megatron. Please refer to [this
+ document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is
+ necessary to ensure exact reproducibility of the pretraining results. Please refer to [this
+ issue](https://github.com/pytorch/pytorch/issues/76232). Note also that this is enabled only when
+ `slow_but_exact=True`.
+ slow_but_exact (`bool`, *optional*, defaults to `False`):
+ Experimental feature. Whether to use slow but exact implementation of the attention mechanism. While
+ merging the TP rank tensors, due to slicing operations the results may be slightly different between the
+ model trained on Megatron and our model. Please refer to [this
+ issue](https://github.com/pytorch/pytorch/issues/76232). A solution to obtain more accurate results is to
+ enable this feature. Enabling this will hurt the computational time of the inference. Will be probably
+ resolved in the future once the main model has been fine-tuned with TP_rank=1.
+
+ Example:
+
+ ```python
+ >>> from transformers import BloomConfig, BloomModel
+
+ >>> # Initializing a Bloom configuration
+ >>> configuration = BloomConfig()
+
+ >>> # Initializing a model (with random weights) from the configuration
+ >>> model = BloomModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "bloom"
+ keys_to_ignore_at_inference = ["past_key_values"]
+ attribute_map = {
+ "num_hidden_layers": "n_layer",
+ "num_attention_heads": "n_head",
+ }
+
+ def __init__(
+ self,
+ vocab_size=250880,
+ hidden_size=64,
+ n_layer=2,
+ n_head=8,
+ layer_norm_epsilon=1e-5,
+ initializer_range=0.02,
+ use_cache=True,
+ bos_token_id=1,
+ eos_token_id=2,
+ apply_residual_connection_post_layernorm=False,
+ hidden_dropout=0.0,
+ attention_dropout=0.0,
+ pretraining_tp=1, # TP rank used when training with megatron
+ slow_but_exact=False,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ # Backward compatibility with n_embed kwarg
+ n_embed = kwargs.pop("n_embed", None)
+ self.hidden_size = hidden_size if n_embed is None else n_embed
+ self.n_layer = n_layer
+ self.n_head = n_head
+ self.layer_norm_epsilon = layer_norm_epsilon
+ self.initializer_range = initializer_range
+ self.use_cache = use_cache
+ self.pretraining_tp = pretraining_tp
+ self.apply_residual_connection_post_layernorm = apply_residual_connection_post_layernorm
+ self.hidden_dropout = hidden_dropout
+ self.attention_dropout = attention_dropout
+
+ self.bos_token_id = bos_token_id
+ self.eos_token_id = eos_token_id
+ self.slow_but_exact = slow_but_exact
+
+ super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
+
+
+class BloomOnnxConfig(OnnxConfigWithPast):
+ torch_onnx_minimum_version = version.parse("1.12")
+
+ def __init__(
+ self,
+ config: PretrainedConfig,
+ task: str = "default",
+ patching_specs: List[PatchingSpec] = None,
+ use_past: bool = False,
+ ):
+ super().__init__(config, task=task, patching_specs=patching_specs, use_past=use_past)
+ if not getattr(self._config, "pad_token_id", None):
+ # TODO: how to do that better?
+ self._config.pad_token_id = 0
+
+ @property
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
+ common_inputs = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}})
+ if self.use_past:
+ # BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
+ self.fill_with_past_key_values_(common_inputs, direction="inputs", inverted_values_shape=True)
+ common_inputs["attention_mask"] = {0: "batch", 1: "past_sequence + sequence"}
+ else:
+ common_inputs["attention_mask"] = {0: "batch", 1: "sequence"}
+
+ return common_inputs
+
+ @property
+ def num_layers(self) -> int:
+ return self._config.n_layer
+
+ @property
+ def num_attention_heads(self) -> int:
+ return self._config.n_head
+
+ @property
+ def atol_for_validation(self) -> float:
+ return 1e-3
+
+ def generate_dummy_inputs(
+ self,
+ tokenizer: "PreTrainedTokenizer",
+ batch_size: int = -1,
+ seq_length: int = -1,
+ is_pair: bool = False,
+ framework: Optional["TensorType"] = None,
+ ) -> Mapping[str, Any]:
+ common_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs(
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
+ )
+
+ # We need to order the input in the way they appears in the forward()
+ ordered_inputs = OrderedDict({"input_ids": common_inputs["input_ids"]})
+
+ # Need to add the past_keys
+ if self.use_past:
+ if not is_torch_available():
+ raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
+ else:
+ import torch
+
+ batch, seqlen = common_inputs["input_ids"].shape
+ # Not using the same length for past_key_values
+ past_key_values_length = seqlen + 2
+ head_dim = self._config.hidden_size // self.num_attention_heads
+ past_key_shape = (
+ batch * self.num_attention_heads,
+ head_dim,
+ past_key_values_length,
+ )
+ past_value_shape = (
+ batch * self.num_attention_heads,
+ past_key_values_length,
+ head_dim,
+ )
+ ordered_inputs["past_key_values"] = [
+ (torch.zeros(past_key_shape), torch.zeros(past_value_shape)) for _ in range(self.num_layers)
+ ]
+
+ ordered_inputs["attention_mask"] = common_inputs["attention_mask"]
+ if self.use_past:
+ mask_dtype = ordered_inputs["attention_mask"].dtype
+ ordered_inputs["attention_mask"] = torch.cat(
+ [ordered_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1
+ )
+
+ return ordered_inputs
+
+ @property
+ def default_onnx_opset(self) -> int:
+ return 13
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/bloom/convert_bloom_original_checkpoint_to_pytorch.py b/llava_next/lib/python3.10/site-packages/transformers/models/bloom/convert_bloom_original_checkpoint_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..eda9a2d815e6b82add587035f9e8f2797bd5c748
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/transformers/models/bloom/convert_bloom_original_checkpoint_to_pytorch.py
@@ -0,0 +1,255 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert BigScience BLOOM checkpoint."""
+
+
+import argparse
+import json
+import os
+import re
+
+import torch
+
+from transformers import BloomConfig, BloomModel
+from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
+from transformers.utils import logging
+
+
+logging.set_verbosity_info()
+
+WEIGHTS_TO_AVERAGE_ENDSWITH = [
+ "word_embeddings_layernorm.weight",
+ "word_embeddings_layernorm.bias",
+ "input_layernorm.weight",
+ "input_layernorm.bias",
+ "post_attention_layernorm.weight",
+ "post_attention_layernorm.bias",
+ "self_attention.dense.bias",
+ "mlp.dense_4h_to_h.bias",
+ "ln_f.weight",
+ "ln_f.bias",
+]
+
+WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN = [
+ "mlp.dense_4h_to_h.weight",
+ "self_attention.dense.weight",
+]
+
+
+def layer_name_mapping(key, file):
+ """Convert Megatron-DeepSpeed TP/PP weights mapping in transformers PP only"""
+ # Handle first and last layers
+ layer_rename_map = {
+ "word_embeddings.weight": "word_embeddings.weight",
+ "word_embeddings.norm.weight": "word_embeddings_layernorm.weight",
+ "word_embeddings.norm.bias": "word_embeddings_layernorm.bias",
+ "weight": "ln_f.weight",
+ "bias": "ln_f.bias",
+ }
+
+ if key in layer_rename_map:
+ return layer_rename_map[key]
+
+ # Handle transformer blocks
+ layer_number = int(re.match(r".*layer_(\d*).*", file)[1])
+ layer_number -= 3
+ return f"h.{layer_number}." + key
+
+
+def get_dtype_size(dtype):
+ if dtype == torch.bool:
+ return 1 / 8
+ bit_search = re.search(r"[^\d](\d+)$", str(dtype))
+ if bit_search is None:
+ raise ValueError(f"`dtype` is not a valid dtype: {dtype}.")
+ bit_size = int(bit_search.groups()[0])
+ return bit_size // 8
+
+
+def convert_bloom_checkpoint_to_pytorch(
+ bloom_checkpoint_path, bloom_config_file, pytorch_dump_folder_path, shard_model, pretraining_tp
+):
+ # Construct model
+ if bloom_config_file == "":
+ config = BloomConfig()
+ else:
+ config = BloomConfig.from_json_file(bloom_config_file)
+
+ if shard_model:
+ file_names = os.listdir(bloom_checkpoint_path)
+ file_names = sorted(filter(lambda s: s.startswith("layer") and "model_00" in s, file_names))
+
+ index_dict = {"weight_map": {}, "metadata": {}}
+ total_size = 0
+
+ missing_keys = None
+
+ config = BloomConfig()
+
+ for j, file in enumerate(file_names):
+ print("Processing file: {}".format(file))
+ tensors = None
+
+ for i in range(pretraining_tp):
+ # load all TP files
+ f_name = file.replace("model_00", f"model_0{i}")
+ temp = torch.load(os.path.join(bloom_checkpoint_path, f_name), map_location="cpu")
+
+ # Rename keys in the transformers names
+ keys = list(temp.keys())
+ for key in keys:
+ temp[layer_name_mapping(key, file)] = temp.pop(key)
+
+ if tensors is None:
+ tensors = temp
+ else:
+ for key in tensors.keys():
+ if any(key.endswith(end) for end in WEIGHTS_TO_AVERAGE_ENDSWITH):
+ # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
+ tensors[key] += temp[key]
+ else:
+ # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
+ cat_dim = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN) else 0
+ # We concatenate these weights accross TP ranks
+ tensors[key] = torch.cat([tensors[key], temp[key]], dim=cat_dim)
+
+ # Divide by the number of TP the weights we want to average
+ for key in tensors.keys():
+ if any(key.endswith(end) for end in WEIGHTS_TO_AVERAGE_ENDSWITH):
+ tensors[key] = tensors[key] / pretraining_tp
+ torch.save(
+ tensors,
+ os.path.join(
+ pytorch_dump_folder_path,
+ "pytorch_model_{}-of-{}.bin".format(str(j + 1).zfill(5), str(len(file_names)).zfill(5)),
+ ),
+ )
+
+ for key in tensors.keys():
+ value = tensors[key]
+ total_size += value.numel() * get_dtype_size(value.dtype)
+ if key not in index_dict["weight_map"]:
+ index_dict["weight_map"][key] = "pytorch_model_{}-of-{}.bin".format(
+ str(j + 1).zfill(5), str(len(file_names)).zfill(5)
+ )
+
+ config = BloomConfig()
+ pytorch_config_dump_path = pytorch_dump_folder_path + "/" + CONFIG_NAME
+ index_dict["metadata"]["total_size"] = total_size
+ with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
+ f.write(config.to_json_string())
+ with open(os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME + ".index.json"), "w", encoding="utf-8") as f:
+ json_config = json.dumps(index_dict, indent=2, sort_keys=True) + "\n"
+ f.write(json_config)
+ else:
+ model = BloomModel(config)
+
+ file_names = os.listdir(bloom_checkpoint_path)
+ file_names = sorted(filter(lambda s: s.startswith("layer") and "model_00" in s, file_names))
+
+ missing_keys = None
+ for i, file in enumerate(file_names):
+ tensors = None
+ for i in range(pretraining_tp):
+ # load all TP files
+ f_name = file.replace("model_00", f"model_0{i}")
+ temp = torch.load(os.path.join(bloom_checkpoint_path, f_name), map_location="cpu")
+
+ # Rename keys in the transformers names
+ keys = list(temp.keys())
+ for key in keys:
+ temp[layer_name_mapping(key, file)] = temp.pop(key)
+
+ if tensors is None:
+ tensors = temp
+ else:
+ for key in tensors.keys():
+ # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
+ if any(key.endswith(end) for end in WEIGHTS_TO_AVERAGE_ENDSWITH):
+ tensors[key] += temp[key]
+ else:
+ # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
+ cat_dim = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN) else 0
+ # We concatenate these weights accross TP ranks
+ tensors[key] = torch.cat([tensors[key], temp[key]], dim=cat_dim)
+
+ # Divide by the number of TP the weights we want to average
+ for key in tensors.keys():
+ if any(key.endswith(end) for end in WEIGHTS_TO_AVERAGE_ENDSWITH):
+ tensors[key] = tensors[key] / pretraining_tp
+
+ other_keys = model.load_state_dict(tensors, strict=False)
+ assert not other_keys.unexpected_keys, f"The keys {other_keys.unexpected_keys} are unexpected"
+ if missing_keys is None:
+ missing_keys = set(other_keys.missing_keys)
+ else:
+ missing_keys = missing_keys.intersection(set(other_keys.missing_keys))
+
+ assert not missing_keys, f"The keys {missing_keys} are missing"
+
+ # Save pytorch-model
+ os.makedirs(pytorch_dump_folder_path, exist_ok=True)
+ pytorch_weights_dump_path = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
+ pytorch_config_dump_path = pytorch_dump_folder_path + "/" + CONFIG_NAME
+ print(f"Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}")
+ if config.torch_dtype is not None:
+ model = model.to(config.torch_dtype)
+ torch.save(model.state_dict(), pytorch_weights_dump_path)
+ print(f"Save configuration file to {pytorch_config_dump_path}")
+ with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
+ f.write(config.to_json_string())
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--bloom_checkpoint_path",
+ default=None,
+ type=str,
+ required=True,
+ help="Path to the Megatron-LM checkpoint path.",
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
+ )
+ parser.add_argument(
+ "--bloom_config_file",
+ default="",
+ type=str,
+ help=(
+ "An optional config json file corresponding to the pre-trained model. \n"
+ "This specifies the model architecture."
+ ),
+ )
+ parser.add_argument(
+ "--shard_model",
+ action="store_true",
+ help="An optional setting to shard the output model \nThis enables sharding the converted checkpoint",
+ )
+ parser.add_argument(
+ "--pretraining_tp",
+ default=4,
+ type=int,
+ help="Pretraining TP rank that has been used when training the model in Megatron-LM \n",
+ )
+ args = parser.parse_args()
+ convert_bloom_checkpoint_to_pytorch(
+ args.bloom_checkpoint_path,
+ args.bloom_config_file,
+ args.pytorch_dump_folder_path,
+ args.shard_model,
+ args.pretraining_tp,
+ )
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/bloom/tokenization_bloom_fast.py b/llava_next/lib/python3.10/site-packages/transformers/models/bloom/tokenization_bloom_fast.py
new file mode 100644
index 0000000000000000000000000000000000000000..c0189e08b3d149e67f4f9cce4142c397fe811562
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/transformers/models/bloom/tokenization_bloom_fast.py
@@ -0,0 +1,177 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tokenization classes for Bloom."""
+
+
+import pickle
+from typing import Optional, Tuple
+
+from ...tokenization_utils_base import BatchEncoding
+from ...tokenization_utils_fast import PreTrainedTokenizerFast
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"tokenizer_file": "tokenizer.json"}
+
+PRETRAINED_VOCAB_FILES_MAP = {
+ "tokenizer_file": {
+ "bigscience/tokenizer": "https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json",
+ "bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json",
+ "bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json",
+ "bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json",
+ "bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json",
+ "bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json",
+ "bigscience/bloom": "https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json",
+ },
+}
+
+
+class BloomTokenizerFast(PreTrainedTokenizerFast):
+ """
+ Construct a "fast" Bloom tokenizer (backed by HuggingFace's *tokenizers* library). Based on byte-level
+ Byte-Pair-Encoding.
+
+ This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
+
+ ```python
+ >>> from transformers import BloomTokenizerFast
+
+ >>> tokenizer = BloomTokenizerFast.from_pretrained("bigscience/bloom")
+ >>> tokenizer("Hello world")["input_ids"]
+ [59414, 8876]
+
+ >>> tokenizer(" Hello world")["input_ids"]
+ [86153, 8876]
+ ```
+
+ You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer, but since
+ the model was not pretrained this way, it might yield a decrease in performance.
+
+
+
+ When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`.
+
+
+
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
+ refer to this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ Path to the vocabulary file.
+ merges_file (`str`):
+ Path to the merges file.
+ errors (`str`, *optional*, defaults to `"replace"`):
+ Paradigm to follow when decoding bytes to UTF-8. See
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
+ unk_token (`str`, *optional*, defaults to `<|endoftext|>`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ bos_token (`str`, *optional*, defaults to `<|endoftext|>`):
+ The beginning of sequence token.
+ eos_token (`str`, *optional*, defaults to `<|endoftext|>`):
+ The end of sequence token.
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
+ other word. (Bloom tokenizer detect beginning of words by the preceding space).
+ trim_offsets (`bool`, *optional*, defaults to `True`):
+ Whether or not the post-processing step should trim offsets to avoid including whitespaces.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
+ model_input_names = ["input_ids", "attention_mask"]
+ slow_tokenizer_class = None
+ # No `max_model_input_sizes` as BLOOM uses ALiBi positional embeddings
+
+ def __init__(
+ self,
+ vocab_file=None,
+ merges_file=None,
+ tokenizer_file=None,
+ unk_token="",
+ bos_token="",
+ eos_token="",
+ pad_token="",
+ add_prefix_space=False,
+ clean_up_tokenization_spaces=False,
+ **kwargs,
+ ):
+ super().__init__(
+ vocab_file,
+ merges_file,
+ tokenizer_file=tokenizer_file,
+ unk_token=unk_token,
+ bos_token=bos_token,
+ eos_token=eos_token,
+ pad_token=pad_token,
+ add_prefix_space=add_prefix_space,
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
+ **kwargs,
+ )
+ # TODO @ArthurZucker this can only work one way for now, to update later-on. Tests should also properly
+ # check this as they were green before.
+ pre_tok_state = pickle.dumps(self.backend_tokenizer.pre_tokenizer)
+ decoder_state = pickle.dumps(self.backend_tokenizer.decoder)
+
+ if add_prefix_space:
+ pre_tok_state = pre_tok_state.replace(b'"add_prefix_space":false', b'"add_prefix_space": true')
+ decoder_state = decoder_state.replace(b'"add_prefix_space":false', b'"add_prefix_space": true')
+ self.backend_tokenizer.pre_tokenizer = pickle.loads(pre_tok_state)
+ self.backend_tokenizer.decoder = pickle.loads(decoder_state)
+
+ self.add_prefix_space = add_prefix_space
+
+ def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding:
+ is_split_into_words = kwargs.get("is_split_into_words", False)
+ if not (self.add_prefix_space or not is_split_into_words):
+ raise Exception(
+ f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
+ " pretokenized inputs."
+ )
+
+ return super()._batch_encode_plus(*args, **kwargs)
+
+ def _encode_plus(self, *args, **kwargs) -> BatchEncoding:
+ is_split_into_words = kwargs.get("is_split_into_words", False)
+
+ if not (self.add_prefix_space or not is_split_into_words):
+ raise Exception(
+ f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
+ " pretokenized inputs."
+ )
+
+ return super()._encode_plus(*args, **kwargs)
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
+ return tuple(files)
+
+ @property
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.default_chat_template
+ def default_chat_template(self):
+ """
+ A simple chat template that ignores role information and just concatenates messages with EOS tokens.
+ """
+ logger.warning_once(
+ "\nNo chat template is defined for this tokenizer - using the default template "
+ f"for the {self.__class__.__name__} class. If the default is not appropriate for "
+ "your model, please set `tokenizer.chat_template` to an appropriate template. "
+ "See https://huggingface.co/docs/transformers/main/chat_templating for more information.\n"
+ )
+ return "{% for message in messages %}" "{{ message.content }}{{ eos_token }}" "{% endfor %}"
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/cpmant/__init__.py b/llava_next/lib/python3.10/site-packages/transformers/models/cpmant/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..8140009b60f15680663fc61569f55675e6d71196
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/transformers/models/cpmant/__init__.py
@@ -0,0 +1,64 @@
+# flake8: noqa
+# There's no way to ignore "F401 '...' imported but unused" warnings in this
+# module, but to preserve other warnings. So, don't check this module at all.
+
+# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+# rely on isort to merge the imports
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
+
+
+_import_structure = {
+ "configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"],
+ "tokenization_cpmant": ["CpmAntTokenizer"],
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_cpmant"] = [
+ "CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "CpmAntForCausalLM",
+ "CpmAntModel",
+ "CpmAntPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
+ from .tokenization_cpmant import CpmAntTokenizer
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_cpmant import (
+ CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ CpmAntForCausalLM,
+ CpmAntModel,
+ CpmAntPreTrainedModel,
+ )
+
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/cpmant/__pycache__/configuration_cpmant.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/cpmant/__pycache__/configuration_cpmant.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..22da238a45d9e3fb1d549f0f1dbcbde3eb1c7682
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/cpmant/__pycache__/configuration_cpmant.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/cpmant/__pycache__/modeling_cpmant.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/cpmant/__pycache__/modeling_cpmant.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f6905bbcd7c2ffc0ebdbf639f38c7a6b5d656535
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/cpmant/__pycache__/modeling_cpmant.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/cpmant/__pycache__/tokenization_cpmant.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/cpmant/__pycache__/tokenization_cpmant.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5abd6c35e5a57a6377df8a0b79ca776dfa8eb649
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/cpmant/__pycache__/tokenization_cpmant.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/cpmant/tokenization_cpmant.py b/llava_next/lib/python3.10/site-packages/transformers/models/cpmant/tokenization_cpmant.py
new file mode 100644
index 0000000000000000000000000000000000000000..c10f48e2de282e1c1f69170f7c1c134441d0190e
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/transformers/models/cpmant/tokenization_cpmant.py
@@ -0,0 +1,278 @@
+# coding=utf-8
+# Copyright 2022 The OpenBMB Team and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tokenization classes for CPMAnt."""
+import collections
+import os
+from typing import List, Optional, Tuple
+
+from transformers.utils import is_jieba_available, requires_backends
+
+
+if is_jieba_available():
+ import jieba
+
+from ...tokenization_utils import PreTrainedTokenizer
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
+
+PRETRAINED_VOCAB_FILES_MAP = {
+ "vocab_file": {
+ "openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt",
+ },
+}
+
+PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
+ "openbmb/cpm-ant-10b": 1024,
+}
+
+
+def load_vocab(vocab_file):
+ """Loads a vocabulary file into a dictionary."""
+ vocab = collections.OrderedDict()
+ with open(vocab_file, "r", encoding="utf-8") as reader:
+ tokens = reader.readlines()
+ for index, token in enumerate(tokens):
+ token = token.rstrip("\n")
+ vocab[token] = index
+ return vocab
+
+
+class WordpieceTokenizer(object):
+ def __init__(self, vocab, unk_token="", max_input_chars_per_word=200):
+ self.vocab = vocab
+ self.unk_token = unk_token
+ self.max_input_chars_per_word = max_input_chars_per_word
+
+ def tokenize(self, token):
+ chars = list(token)
+ if len(chars) > self.max_input_chars_per_word:
+ return [self.unk_token]
+
+ start = 0
+ sub_tokens = []
+ while start < len(chars):
+ end = len(chars)
+ cur_substr = None
+ while start < end:
+ substr = "".join(chars[start:end])
+ if substr in self.vocab:
+ cur_substr = substr
+ break
+ end -= 1
+ if cur_substr is None:
+ sub_tokens.append(self.unk_token)
+ start += 1
+ else:
+ sub_tokens.append(cur_substr)
+ start = end
+
+ return sub_tokens
+
+
+class CpmAntTokenizer(PreTrainedTokenizer):
+ """
+ Construct a CPMAnt tokenizer. Based on byte-level Byte-Pair-Encoding.
+
+ Args:
+ vocab_file (`str`):
+ Path to the vocabulary file.
+ bod_token (`str`, *optional*, defaults to `""`):
+ The beginning of document token.
+ eod_token (`str`, *optional*, defaults to `""`):
+ The end of document token.
+ bos_token (`str`, *optional*, defaults to `""`):
+ The beginning of sequence token.
+ eos_token (`str`, *optional*, defaults to `""`):
+ The end of sequence token.
+ pad_token (`str`, *optional*, defaults to `""`):
+ The token used for padding.
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token.
+ line_token (`str`, *optional*, defaults to `""`):
+ The line token.
+ space_token (`str`, *optional*, defaults to `""`):
+ The space token.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
+ model_input_names = ["input_ids", "attention_mask"]
+ add_prefix_space = False
+
+ def __init__(
+ self,
+ vocab_file,
+ bod_token="",
+ eod_token="",
+ bos_token="",
+ eos_token="",
+ pad_token="",
+ unk_token="",
+ line_token="",
+ space_token="",
+ padding_side="left",
+ **kwargs,
+ ):
+ requires_backends(self, ["jieba"])
+ self.bod_token = bod_token
+ self.eod_token = eod_token
+ self.encoder = load_vocab(vocab_file)
+ self.encoder[" "] = self.encoder[space_token]
+ self.encoder["\n"] = self.encoder[line_token]
+
+ del self.encoder[space_token]
+ del self.encoder[line_token]
+
+ self.encoder = collections.OrderedDict(sorted(self.encoder.items(), key=lambda x: x[1]))
+ self.decoder = {v: k for k, v in self.encoder.items()}
+
+ self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.encoder, unk_token=unk_token)
+
+ super().__init__(
+ bod_token=bod_token,
+ eod_token=eod_token,
+ bos_token=bos_token,
+ eos_token=eos_token,
+ pad_token=pad_token,
+ unk_token=unk_token,
+ line_token=line_token,
+ space_token=space_token,
+ padding_side=padding_side,
+ **kwargs,
+ )
+
+ @property
+ def bod_token_id(self):
+ return self.encoder[self.bod_token]
+
+ @property
+ def eod_token_id(self):
+ return self.encoder[self.eod_token]
+
+ @property
+ def newline_id(self):
+ return self.encoder["\n"]
+
+ @property
+ def vocab_size(self) -> int:
+ return len(self.encoder)
+
+ def get_vocab(self):
+ return dict(self.encoder, **self.added_tokens_encoder)
+
+ def _tokenize(self, text):
+ """Tokenize a string."""
+ output_tokens = []
+ for x in jieba.cut(text, cut_all=False):
+ output_tokens.extend(self.wordpiece_tokenizer.tokenize(x))
+ return output_tokens
+
+ def _decode(self, token_ids, **kwargs):
+ """Decode ids into a string."""
+ token_ids = [i for i in token_ids if i >= 0]
+ token_ids = [
+ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
+ ]
+ return super()._decode(token_ids, **kwargs)
+
+ def check(self, token):
+ return token in self.encoder
+
+ def convert_tokens_to_string(self, tokens: List[str]) -> str:
+ return "".join(tokens)
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ return self.decoder.get(index, self.unk_token)
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if os.path.isdir(save_directory):
+ vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+ else:
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
+ index = 0
+ if " " in self.encoder:
+ self.encoder[""] = self.encoder[" "]
+ del self.encoder[" "]
+ if "\n" in self.encoder:
+ self.encoder[""] = self.encoder["\n"]
+ del self.encoder["\n"]
+ self.encoder = collections.OrderedDict(sorted(self.encoder.items(), key=lambda x: x[1]))
+ with open(vocab_file, "w", encoding="utf-8") as writer:
+ for token, token_index in self.encoder.items():
+ if index != token_index:
+ logger.warning(
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
+ " Please check that the vocabulary is not corrupted!"
+ )
+ index = token_index
+ writer.write(token + "\n")
+ index += 1
+ return (vocab_file,)
+
+ def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: List[int] = None) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A CPMAnt sequence has the following format:
+
+ - single sequence: `[BOS] Sequence`.
+
+ Args:
+ token_ids_0 (`List[int]`): The first tokenized sequence that special tokens will be added.
+ token_ids_1 (`List[int]`): The optional second tokenized sequence that special tokens will be added.
+
+ Returns:
+ `List[int]`: The model input with special tokens.
+ """
+ if token_ids_1 is None:
+ return [self.bos_token_id] + token_ids_0
+ return [self.bos_token_id] + token_ids_0 + [self.bos_token_id] + token_ids_1
+
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`): List of IDs.
+ token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ if token_ids_1 is not None:
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1))
+ return [1] + ([0] * len(token_ids_0))
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/deformable_detr/__init__.py b/llava_next/lib/python3.10/site-packages/transformers/models/deformable_detr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a560265f4bfcb8d43f88d2b3cd55f751409016ec
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/transformers/models/deformable_detr/__init__.py
@@ -0,0 +1,75 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
+
+
+_import_structure = {
+ "configuration_deformable_detr": ["DEFORMABLE_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeformableDetrConfig"],
+}
+
+try:
+ if not is_vision_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["feature_extraction_deformable_detr"] = ["DeformableDetrFeatureExtractor"]
+ _import_structure["image_processing_deformable_detr"] = ["DeformableDetrImageProcessor"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_deformable_detr"] = [
+ "DEFORMABLE_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "DeformableDetrForObjectDetection",
+ "DeformableDetrModel",
+ "DeformableDetrPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_deformable_detr import DEFORMABLE_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, DeformableDetrConfig
+
+ try:
+ if not is_vision_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .feature_extraction_deformable_detr import DeformableDetrFeatureExtractor
+ from .image_processing_deformable_detr import DeformableDetrImageProcessor
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_deformable_detr import (
+ DEFORMABLE_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
+ DeformableDetrForObjectDetection,
+ DeformableDetrModel,
+ DeformableDetrPreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/convert_deformable_detr_to_pytorch.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/convert_deformable_detr_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e69e8c4b8e50b4bdc8a18aaa07e00cdc76b6b951
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/convert_deformable_detr_to_pytorch.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/feature_extraction_deformable_detr.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/feature_extraction_deformable_detr.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ff3ba877aecc0db6d6a04da083df310742bebff0
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/feature_extraction_deformable_detr.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/image_processing_deformable_detr.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/image_processing_deformable_detr.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6c095989770d20c7aca69b1d5e692ab7e0edb8c5
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/image_processing_deformable_detr.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/deformable_detr/configuration_deformable_detr.py b/llava_next/lib/python3.10/site-packages/transformers/models/deformable_detr/configuration_deformable_detr.py
new file mode 100644
index 0000000000000000000000000000000000000000..a6161061d9a746a825c59405542776b4ff0b34fa
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/transformers/models/deformable_detr/configuration_deformable_detr.py
@@ -0,0 +1,263 @@
+# coding=utf-8
+# Copyright 2022 SenseTime and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Deformable DETR model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+from ..auto import CONFIG_MAPPING
+
+
+logger = logging.get_logger(__name__)
+
+DEFORMABLE_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP = {
+ "SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json",
+ # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
+}
+
+
+class DeformableDetrConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`DeformableDetrModel`]. It is used to instantiate
+ a Deformable DETR model according to the specified arguments, defining the model architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the Deformable DETR
+ [SenseTime/deformable-detr](https://huggingface.co/SenseTime/deformable-detr) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ use_timm_backbone (`bool`, *optional*, defaults to `True`):
+ Whether or not to use the `timm` library for the backbone. If set to `False`, will use the [`AutoBackbone`]
+ API.
+ backbone_config (`PretrainedConfig` or `dict`, *optional*):
+ The configuration of the backbone model. Only used in case `use_timm_backbone` is set to `False` in which
+ case it will default to `ResNetConfig()`.
+ num_channels (`int`, *optional*, defaults to 3):
+ The number of input channels.
+ num_queries (`int`, *optional*, defaults to 300):
+ Number of object queries, i.e. detection slots. This is the maximal number of objects
+ [`DeformableDetrModel`] can detect in a single image. In case `two_stage` is set to `True`, we use
+ `two_stage_num_proposals` instead.
+ d_model (`int`, *optional*, defaults to 256):
+ Dimension of the layers.
+ encoder_layers (`int`, *optional*, defaults to 6):
+ Number of encoder layers.
+ decoder_layers (`int`, *optional*, defaults to 6):
+ Number of decoder layers.
+ encoder_attention_heads (`int`, *optional*, defaults to 8):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ decoder_attention_heads (`int`, *optional*, defaults to 8):
+ Number of attention heads for each attention layer in the Transformer decoder.
+ decoder_ffn_dim (`int`, *optional*, defaults to 1024):
+ Dimension of the "intermediate" (often named feed-forward) layer in decoder.
+ encoder_ffn_dim (`int`, *optional*, defaults to 1024):
+ Dimension of the "intermediate" (often named feed-forward) layer in decoder.
+ activation_function (`str` or `function`, *optional*, defaults to `"relu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ activation_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for activations inside the fully connected layer.
+ init_std (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ init_xavier_std (`float`, *optional*, defaults to 1):
+ The scaling factor used for the Xavier initialization gain in the HM Attention map module.
+ encoder_layerdrop (`float`, *optional*, defaults to 0.0):
+ The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
+ for more details.
+ auxiliary_loss (`bool`, *optional*, defaults to `False`):
+ Whether auxiliary decoding losses (loss at each decoder layer) are to be used.
+ position_embedding_type (`str`, *optional*, defaults to `"sine"`):
+ Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`.
+ backbone (`str`, *optional*, defaults to `"resnet50"`):
+ Name of convolutional backbone to use in case `use_timm_backbone` = `True`. Supports any convolutional
+ backbone from the timm package. For a list of all available models, see [this
+ page](https://rwightman.github.io/pytorch-image-models/#load-a-pretrained-model).
+ use_pretrained_backbone (`bool`, *optional*, defaults to `True`):
+ Whether to use pretrained weights for the backbone. Only supported when `use_timm_backbone` = `True`.
+ dilation (`bool`, *optional*, defaults to `False`):
+ Whether to replace stride with dilation in the last convolutional block (DC5). Only supported when
+ `use_timm_backbone` = `True`.
+ class_cost (`float`, *optional*, defaults to 1):
+ Relative weight of the classification error in the Hungarian matching cost.
+ bbox_cost (`float`, *optional*, defaults to 5):
+ Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost.
+ giou_cost (`float`, *optional*, defaults to 2):
+ Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost.
+ mask_loss_coefficient (`float`, *optional*, defaults to 1):
+ Relative weight of the Focal loss in the panoptic segmentation loss.
+ dice_loss_coefficient (`float`, *optional*, defaults to 1):
+ Relative weight of the DICE/F-1 loss in the panoptic segmentation loss.
+ bbox_loss_coefficient (`float`, *optional*, defaults to 5):
+ Relative weight of the L1 bounding box loss in the object detection loss.
+ giou_loss_coefficient (`float`, *optional*, defaults to 2):
+ Relative weight of the generalized IoU loss in the object detection loss.
+ eos_coefficient (`float`, *optional*, defaults to 0.1):
+ Relative classification weight of the 'no-object' class in the object detection loss.
+ num_feature_levels (`int`, *optional*, defaults to 4):
+ The number of input feature levels.
+ encoder_n_points (`int`, *optional*, defaults to 4):
+ The number of sampled keys in each feature level for each attention head in the encoder.
+ decoder_n_points (`int`, *optional*, defaults to 4):
+ The number of sampled keys in each feature level for each attention head in the decoder.
+ two_stage (`bool`, *optional*, defaults to `False`):
+ Whether to apply a two-stage deformable DETR, where the region proposals are also generated by a variant of
+ Deformable DETR, which are further fed into the decoder for iterative bounding box refinement.
+ two_stage_num_proposals (`int`, *optional*, defaults to 300):
+ The number of region proposals to be generated, in case `two_stage` is set to `True`.
+ with_box_refine (`bool`, *optional*, defaults to `False`):
+ Whether to apply iterative bounding box refinement, where each decoder layer refines the bounding boxes
+ based on the predictions from the previous layer.
+ focal_alpha (`float`, *optional*, defaults to 0.25):
+ Alpha parameter in the focal loss.
+ disable_custom_kernels (`bool`, *optional*, defaults to `False`):
+ Disable the use of custom CUDA and CPU kernels. This option is necessary for the ONNX export, as custom
+ kernels are not supported by PyTorch ONNX export.
+
+ Examples:
+
+ ```python
+ >>> from transformers import DeformableDetrConfig, DeformableDetrModel
+
+ >>> # Initializing a Deformable DETR SenseTime/deformable-detr style configuration
+ >>> configuration = DeformableDetrConfig()
+
+ >>> # Initializing a model (with random weights) from the SenseTime/deformable-detr style configuration
+ >>> model = DeformableDetrModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "deformable_detr"
+ attribute_map = {
+ "hidden_size": "d_model",
+ "num_attention_heads": "encoder_attention_heads",
+ }
+
+ def __init__(
+ self,
+ use_timm_backbone=True,
+ backbone_config=None,
+ num_channels=3,
+ num_queries=300,
+ max_position_embeddings=1024,
+ encoder_layers=6,
+ encoder_ffn_dim=1024,
+ encoder_attention_heads=8,
+ decoder_layers=6,
+ decoder_ffn_dim=1024,
+ decoder_attention_heads=8,
+ encoder_layerdrop=0.0,
+ is_encoder_decoder=True,
+ activation_function="relu",
+ d_model=256,
+ dropout=0.1,
+ attention_dropout=0.0,
+ activation_dropout=0.0,
+ init_std=0.02,
+ init_xavier_std=1.0,
+ return_intermediate=True,
+ auxiliary_loss=False,
+ position_embedding_type="sine",
+ backbone="resnet50",
+ use_pretrained_backbone=True,
+ dilation=False,
+ num_feature_levels=4,
+ encoder_n_points=4,
+ decoder_n_points=4,
+ two_stage=False,
+ two_stage_num_proposals=300,
+ with_box_refine=False,
+ class_cost=1,
+ bbox_cost=5,
+ giou_cost=2,
+ mask_loss_coefficient=1,
+ dice_loss_coefficient=1,
+ bbox_loss_coefficient=5,
+ giou_loss_coefficient=2,
+ eos_coefficient=0.1,
+ focal_alpha=0.25,
+ disable_custom_kernels=False,
+ **kwargs,
+ ):
+ if backbone_config is not None and use_timm_backbone:
+ raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`.")
+
+ if not use_timm_backbone:
+ if backbone_config is None:
+ logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
+ backbone_config = CONFIG_MAPPING["resnet"](out_features=["stage4"])
+ elif isinstance(backbone_config, dict):
+ backbone_model_type = backbone_config.get("model_type")
+ config_class = CONFIG_MAPPING[backbone_model_type]
+ backbone_config = config_class.from_dict(backbone_config)
+ self.use_timm_backbone = use_timm_backbone
+ self.backbone_config = backbone_config
+ self.num_channels = num_channels
+ self.num_queries = num_queries
+ self.max_position_embeddings = max_position_embeddings
+ self.d_model = d_model
+ self.encoder_ffn_dim = encoder_ffn_dim
+ self.encoder_layers = encoder_layers
+ self.encoder_attention_heads = encoder_attention_heads
+ self.decoder_ffn_dim = decoder_ffn_dim
+ self.decoder_layers = decoder_layers
+ self.decoder_attention_heads = decoder_attention_heads
+ self.dropout = dropout
+ self.attention_dropout = attention_dropout
+ self.activation_dropout = activation_dropout
+ self.activation_function = activation_function
+ self.init_std = init_std
+ self.init_xavier_std = init_xavier_std
+ self.encoder_layerdrop = encoder_layerdrop
+ self.auxiliary_loss = auxiliary_loss
+ self.position_embedding_type = position_embedding_type
+ self.backbone = backbone
+ self.use_pretrained_backbone = use_pretrained_backbone
+ self.dilation = dilation
+ # deformable attributes
+ self.num_feature_levels = num_feature_levels
+ self.encoder_n_points = encoder_n_points
+ self.decoder_n_points = decoder_n_points
+ self.two_stage = two_stage
+ self.two_stage_num_proposals = two_stage_num_proposals
+ self.with_box_refine = with_box_refine
+ if two_stage is True and with_box_refine is False:
+ raise ValueError("If two_stage is True, with_box_refine must be True.")
+ # Hungarian matcher
+ self.class_cost = class_cost
+ self.bbox_cost = bbox_cost
+ self.giou_cost = giou_cost
+ # Loss coefficients
+ self.mask_loss_coefficient = mask_loss_coefficient
+ self.dice_loss_coefficient = dice_loss_coefficient
+ self.bbox_loss_coefficient = bbox_loss_coefficient
+ self.giou_loss_coefficient = giou_loss_coefficient
+ self.eos_coefficient = eos_coefficient
+ self.focal_alpha = focal_alpha
+ self.disable_custom_kernels = disable_custom_kernels
+ super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
+
+ @property
+ def num_attention_heads(self) -> int:
+ return self.encoder_attention_heads
+
+ @property
+ def hidden_size(self) -> int:
+ return self.d_model
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/deformable_detr/feature_extraction_deformable_detr.py b/llava_next/lib/python3.10/site-packages/transformers/models/deformable_detr/feature_extraction_deformable_detr.py
new file mode 100644
index 0000000000000000000000000000000000000000..f04743e91ceefe5fbad2485e9767f0a97dd6db49
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/transformers/models/deformable_detr/feature_extraction_deformable_detr.py
@@ -0,0 +1,43 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Feature extractor class for Deformable DETR."""
+
+import warnings
+
+from ...image_transforms import rgb_to_id as _rgb_to_id
+from ...utils import logging
+from .image_processing_deformable_detr import DeformableDetrImageProcessor
+
+
+logger = logging.get_logger(__name__)
+
+
+def rgb_to_id(x):
+ warnings.warn(
+ "rgb_to_id has moved and will not be importable from this module from v5. "
+ "Please import from transformers.image_transforms instead.",
+ FutureWarning,
+ )
+ return _rgb_to_id(x)
+
+
+class DeformableDetrFeatureExtractor(DeformableDetrImageProcessor):
+ def __init__(self, *args, **kwargs) -> None:
+ warnings.warn(
+ "The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
+ " Please use DeformableDetrImageProcessor instead.",
+ FutureWarning,
+ )
+ super().__init__(*args, **kwargs)
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/deformable_detr/image_processing_deformable_detr.py b/llava_next/lib/python3.10/site-packages/transformers/models/deformable_detr/image_processing_deformable_detr.py
new file mode 100644
index 0000000000000000000000000000000000000000..8c40d20c816ad3a13d26e04b39824ac85453e0f7
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/transformers/models/deformable_detr/image_processing_deformable_detr.py
@@ -0,0 +1,1429 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Image processor class for Deformable DETR."""
+
+import io
+import pathlib
+from collections import defaultdict
+from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Union
+
+import numpy as np
+
+from ...feature_extraction_utils import BatchFeature
+from ...image_processing_utils import BaseImageProcessor, get_size_dict
+from ...image_transforms import (
+ PaddingMode,
+ center_to_corners_format,
+ corners_to_center_format,
+ id_to_rgb,
+ pad,
+ rescale,
+ resize,
+ rgb_to_id,
+ to_channel_dimension_format,
+)
+from ...image_utils import (
+ IMAGENET_DEFAULT_MEAN,
+ IMAGENET_DEFAULT_STD,
+ AnnotationFormat,
+ AnnotationType,
+ ChannelDimension,
+ ImageInput,
+ PILImageResampling,
+ get_image_size,
+ infer_channel_dimension_format,
+ is_scaled_image,
+ make_list_of_images,
+ to_numpy_array,
+ valid_images,
+ validate_annotations,
+)
+from ...utils import (
+ TensorType,
+ is_flax_available,
+ is_jax_tensor,
+ is_scipy_available,
+ is_tf_available,
+ is_tf_tensor,
+ is_torch_available,
+ is_torch_tensor,
+ is_vision_available,
+ logging,
+)
+
+
+if is_torch_available():
+ import torch
+ from torch import nn
+
+
+if is_vision_available():
+ import PIL
+
+if is_scipy_available():
+ import scipy.special
+ import scipy.stats
+
+
+logger = logging.get_logger(__name__) # pylint: disable=invalid-name
+
+SUPPORTED_ANNOTATION_FORMATS = (AnnotationFormat.COCO_DETECTION, AnnotationFormat.COCO_PANOPTIC)
+
+
+# Copied from transformers.models.detr.image_processing_detr.get_size_with_aspect_ratio
+def get_size_with_aspect_ratio(image_size, size, max_size=None) -> Tuple[int, int]:
+ """
+ Computes the output image size given the input image size and the desired output size.
+
+ Args:
+ image_size (`Tuple[int, int]`):
+ The input image size.
+ size (`int`):
+ The desired output size.
+ max_size (`int`, *optional*):
+ The maximum allowed output size.
+ """
+ height, width = image_size
+ if max_size is not None:
+ min_original_size = float(min((height, width)))
+ max_original_size = float(max((height, width)))
+ if max_original_size / min_original_size * size > max_size:
+ size = int(round(max_size * min_original_size / max_original_size))
+
+ if (height <= width and height == size) or (width <= height and width == size):
+ return height, width
+
+ if width < height:
+ ow = size
+ oh = int(size * height / width)
+ else:
+ oh = size
+ ow = int(size * width / height)
+ return (oh, ow)
+
+
+# Copied from transformers.models.detr.image_processing_detr.get_resize_output_image_size
+def get_resize_output_image_size(
+ input_image: np.ndarray,
+ size: Union[int, Tuple[int, int], List[int]],
+ max_size: Optional[int] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+) -> Tuple[int, int]:
+ """
+ Computes the output image size given the input image size and the desired output size. If the desired output size
+ is a tuple or list, the output image size is returned as is. If the desired output size is an integer, the output
+ image size is computed by keeping the aspect ratio of the input image size.
+
+ Args:
+ input_image (`np.ndarray`):
+ The image to resize.
+ size (`int` or `Tuple[int, int]` or `List[int]`):
+ The desired output size.
+ max_size (`int`, *optional*):
+ The maximum allowed output size.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format of the input image. If not provided, it will be inferred from the input image.
+ """
+ image_size = get_image_size(input_image, input_data_format)
+ if isinstance(size, (list, tuple)):
+ return size
+
+ return get_size_with_aspect_ratio(image_size, size, max_size)
+
+
+# Copied from transformers.models.detr.image_processing_detr.get_numpy_to_framework_fn
+def get_numpy_to_framework_fn(arr) -> Callable:
+ """
+ Returns a function that converts a numpy array to the framework of the input array.
+
+ Args:
+ arr (`np.ndarray`): The array to convert.
+ """
+ if isinstance(arr, np.ndarray):
+ return np.array
+ if is_tf_available() and is_tf_tensor(arr):
+ import tensorflow as tf
+
+ return tf.convert_to_tensor
+ if is_torch_available() and is_torch_tensor(arr):
+ import torch
+
+ return torch.tensor
+ if is_flax_available() and is_jax_tensor(arr):
+ import jax.numpy as jnp
+
+ return jnp.array
+ raise ValueError(f"Cannot convert arrays of type {type(arr)}")
+
+
+# Copied from transformers.models.detr.image_processing_detr.safe_squeeze
+def safe_squeeze(arr: np.ndarray, axis: Optional[int] = None) -> np.ndarray:
+ """
+ Squeezes an array, but only if the axis specified has dim 1.
+ """
+ if axis is None:
+ return arr.squeeze()
+
+ try:
+ return arr.squeeze(axis=axis)
+ except ValueError:
+ return arr
+
+
+# Copied from transformers.models.detr.image_processing_detr.normalize_annotation
+def normalize_annotation(annotation: Dict, image_size: Tuple[int, int]) -> Dict:
+ image_height, image_width = image_size
+ norm_annotation = {}
+ for key, value in annotation.items():
+ if key == "boxes":
+ boxes = value
+ boxes = corners_to_center_format(boxes)
+ boxes /= np.asarray([image_width, image_height, image_width, image_height], dtype=np.float32)
+ norm_annotation[key] = boxes
+ else:
+ norm_annotation[key] = value
+ return norm_annotation
+
+
+# Copied from transformers.models.detr.image_processing_detr.max_across_indices
+def max_across_indices(values: Iterable[Any]) -> List[Any]:
+ """
+ Return the maximum value across all indices of an iterable of values.
+ """
+ return [max(values_i) for values_i in zip(*values)]
+
+
+# Copied from transformers.models.detr.image_processing_detr.get_max_height_width
+def get_max_height_width(
+ images: List[np.ndarray], input_data_format: Optional[Union[str, ChannelDimension]] = None
+) -> List[int]:
+ """
+ Get the maximum height and width across all images in a batch.
+ """
+ if input_data_format is None:
+ input_data_format = infer_channel_dimension_format(images[0])
+
+ if input_data_format == ChannelDimension.FIRST:
+ _, max_height, max_width = max_across_indices([img.shape for img in images])
+ elif input_data_format == ChannelDimension.LAST:
+ max_height, max_width, _ = max_across_indices([img.shape for img in images])
+ else:
+ raise ValueError(f"Invalid channel dimension format: {input_data_format}")
+ return (max_height, max_width)
+
+
+# Copied from transformers.models.detr.image_processing_detr.make_pixel_mask
+def make_pixel_mask(
+ image: np.ndarray, output_size: Tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]] = None
+) -> np.ndarray:
+ """
+ Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding.
+
+ Args:
+ image (`np.ndarray`):
+ Image to make the pixel mask for.
+ output_size (`Tuple[int, int]`):
+ Output size of the mask.
+ """
+ input_height, input_width = get_image_size(image, channel_dim=input_data_format)
+ mask = np.zeros(output_size, dtype=np.int64)
+ mask[:input_height, :input_width] = 1
+ return mask
+
+
+# Copied from transformers.models.detr.image_processing_detr.convert_coco_poly_to_mask
+def convert_coco_poly_to_mask(segmentations, height: int, width: int) -> np.ndarray:
+ """
+ Convert a COCO polygon annotation to a mask.
+
+ Args:
+ segmentations (`List[List[float]]`):
+ List of polygons, each polygon represented by a list of x-y coordinates.
+ height (`int`):
+ Height of the mask.
+ width (`int`):
+ Width of the mask.
+ """
+ try:
+ from pycocotools import mask as coco_mask
+ except ImportError:
+ raise ImportError("Pycocotools is not installed in your environment.")
+
+ masks = []
+ for polygons in segmentations:
+ rles = coco_mask.frPyObjects(polygons, height, width)
+ mask = coco_mask.decode(rles)
+ if len(mask.shape) < 3:
+ mask = mask[..., None]
+ mask = np.asarray(mask, dtype=np.uint8)
+ mask = np.any(mask, axis=2)
+ masks.append(mask)
+ if masks:
+ masks = np.stack(masks, axis=0)
+ else:
+ masks = np.zeros((0, height, width), dtype=np.uint8)
+
+ return masks
+
+
+# Copied from transformers.models.detr.image_processing_detr.prepare_coco_detection_annotation with DETR->DeformableDetr
+def prepare_coco_detection_annotation(
+ image,
+ target,
+ return_segmentation_masks: bool = False,
+ input_data_format: Optional[Union[ChannelDimension, str]] = None,
+):
+ """
+ Convert the target in COCO format into the format expected by DeformableDetr.
+ """
+ image_height, image_width = get_image_size(image, channel_dim=input_data_format)
+
+ image_id = target["image_id"]
+ image_id = np.asarray([image_id], dtype=np.int64)
+
+ # Get all COCO annotations for the given image.
+ annotations = target["annotations"]
+ annotations = [obj for obj in annotations if "iscrowd" not in obj or obj["iscrowd"] == 0]
+
+ classes = [obj["category_id"] for obj in annotations]
+ classes = np.asarray(classes, dtype=np.int64)
+
+ # for conversion to coco api
+ area = np.asarray([obj["area"] for obj in annotations], dtype=np.float32)
+ iscrowd = np.asarray([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in annotations], dtype=np.int64)
+
+ boxes = [obj["bbox"] for obj in annotations]
+ # guard against no boxes via resizing
+ boxes = np.asarray(boxes, dtype=np.float32).reshape(-1, 4)
+ boxes[:, 2:] += boxes[:, :2]
+ boxes[:, 0::2] = boxes[:, 0::2].clip(min=0, max=image_width)
+ boxes[:, 1::2] = boxes[:, 1::2].clip(min=0, max=image_height)
+
+ keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
+
+ new_target = {}
+ new_target["image_id"] = image_id
+ new_target["class_labels"] = classes[keep]
+ new_target["boxes"] = boxes[keep]
+ new_target["area"] = area[keep]
+ new_target["iscrowd"] = iscrowd[keep]
+ new_target["orig_size"] = np.asarray([int(image_height), int(image_width)], dtype=np.int64)
+
+ if annotations and "keypoints" in annotations[0]:
+ keypoints = [obj["keypoints"] for obj in annotations]
+ # Converting the filtered keypoints list to a numpy array
+ keypoints = np.asarray(keypoints, dtype=np.float32)
+ # Apply the keep mask here to filter the relevant annotations
+ keypoints = keypoints[keep]
+ num_keypoints = keypoints.shape[0]
+ keypoints = keypoints.reshape((-1, 3)) if num_keypoints else keypoints
+ new_target["keypoints"] = keypoints
+
+ if return_segmentation_masks:
+ segmentation_masks = [obj["segmentation"] for obj in annotations]
+ masks = convert_coco_poly_to_mask(segmentation_masks, image_height, image_width)
+ new_target["masks"] = masks[keep]
+
+ return new_target
+
+
+# Copied from transformers.models.detr.image_processing_detr.masks_to_boxes
+def masks_to_boxes(masks: np.ndarray) -> np.ndarray:
+ """
+ Compute the bounding boxes around the provided panoptic segmentation masks.
+
+ Args:
+ masks: masks in format `[number_masks, height, width]` where N is the number of masks
+
+ Returns:
+ boxes: bounding boxes in format `[number_masks, 4]` in xyxy format
+ """
+ if masks.size == 0:
+ return np.zeros((0, 4))
+
+ h, w = masks.shape[-2:]
+ y = np.arange(0, h, dtype=np.float32)
+ x = np.arange(0, w, dtype=np.float32)
+ # see https://github.com/pytorch/pytorch/issues/50276
+ y, x = np.meshgrid(y, x, indexing="ij")
+
+ x_mask = masks * np.expand_dims(x, axis=0)
+ x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1)
+ x = np.ma.array(x_mask, mask=~(np.array(masks, dtype=bool)))
+ x_min = x.filled(fill_value=1e8)
+ x_min = x_min.reshape(x_min.shape[0], -1).min(-1)
+
+ y_mask = masks * np.expand_dims(y, axis=0)
+ y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1)
+ y = np.ma.array(y_mask, mask=~(np.array(masks, dtype=bool)))
+ y_min = y.filled(fill_value=1e8)
+ y_min = y_min.reshape(y_min.shape[0], -1).min(-1)
+
+ return np.stack([x_min, y_min, x_max, y_max], 1)
+
+
+# Copied from transformers.models.detr.image_processing_detr.prepare_coco_panoptic_annotation with DETR->DeformableDetr
+def prepare_coco_panoptic_annotation(
+ image: np.ndarray,
+ target: Dict,
+ masks_path: Union[str, pathlib.Path],
+ return_masks: bool = True,
+ input_data_format: Union[ChannelDimension, str] = None,
+) -> Dict:
+ """
+ Prepare a coco panoptic annotation for DeformableDetr.
+ """
+ image_height, image_width = get_image_size(image, channel_dim=input_data_format)
+ annotation_path = pathlib.Path(masks_path) / target["file_name"]
+
+ new_target = {}
+ new_target["image_id"] = np.asarray([target["image_id"] if "image_id" in target else target["id"]], dtype=np.int64)
+ new_target["size"] = np.asarray([image_height, image_width], dtype=np.int64)
+ new_target["orig_size"] = np.asarray([image_height, image_width], dtype=np.int64)
+
+ if "segments_info" in target:
+ masks = np.asarray(PIL.Image.open(annotation_path), dtype=np.uint32)
+ masks = rgb_to_id(masks)
+
+ ids = np.array([segment_info["id"] for segment_info in target["segments_info"]])
+ masks = masks == ids[:, None, None]
+ masks = masks.astype(np.uint8)
+ if return_masks:
+ new_target["masks"] = masks
+ new_target["boxes"] = masks_to_boxes(masks)
+ new_target["class_labels"] = np.array(
+ [segment_info["category_id"] for segment_info in target["segments_info"]], dtype=np.int64
+ )
+ new_target["iscrowd"] = np.asarray(
+ [segment_info["iscrowd"] for segment_info in target["segments_info"]], dtype=np.int64
+ )
+ new_target["area"] = np.asarray(
+ [segment_info["area"] for segment_info in target["segments_info"]], dtype=np.float32
+ )
+
+ return new_target
+
+
+# Copied from transformers.models.detr.image_processing_detr.get_segmentation_image
+def get_segmentation_image(
+ masks: np.ndarray, input_size: Tuple, target_size: Tuple, stuff_equiv_classes, deduplicate=False
+):
+ h, w = input_size
+ final_h, final_w = target_size
+
+ m_id = scipy.special.softmax(masks.transpose(0, 1), -1)
+
+ if m_id.shape[-1] == 0:
+ # We didn't detect any mask :(
+ m_id = np.zeros((h, w), dtype=np.int64)
+ else:
+ m_id = m_id.argmax(-1).reshape(h, w)
+
+ if deduplicate:
+ # Merge the masks corresponding to the same stuff class
+ for equiv in stuff_equiv_classes.values():
+ for eq_id in equiv:
+ m_id[m_id == eq_id] = equiv[0]
+
+ seg_img = id_to_rgb(m_id)
+ seg_img = resize(seg_img, (final_w, final_h), resample=PILImageResampling.NEAREST)
+ return seg_img
+
+
+# Copied from transformers.models.detr.image_processing_detr.get_mask_area
+def get_mask_area(seg_img: np.ndarray, target_size: Tuple[int, int], n_classes: int) -> np.ndarray:
+ final_h, final_w = target_size
+ np_seg_img = seg_img.astype(np.uint8)
+ np_seg_img = np_seg_img.reshape(final_h, final_w, 3)
+ m_id = rgb_to_id(np_seg_img)
+ area = [(m_id == i).sum() for i in range(n_classes)]
+ return area
+
+
+# Copied from transformers.models.detr.image_processing_detr.score_labels_from_class_probabilities
+def score_labels_from_class_probabilities(logits: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
+ probs = scipy.special.softmax(logits, axis=-1)
+ labels = probs.argmax(-1, keepdims=True)
+ scores = np.take_along_axis(probs, labels, axis=-1)
+ scores, labels = scores.squeeze(-1), labels.squeeze(-1)
+ return scores, labels
+
+
+# Copied from transformers.models.detr.image_processing_detr.post_process_panoptic_sample
+def post_process_panoptic_sample(
+ out_logits: np.ndarray,
+ masks: np.ndarray,
+ boxes: np.ndarray,
+ processed_size: Tuple[int, int],
+ target_size: Tuple[int, int],
+ is_thing_map: Dict,
+ threshold=0.85,
+) -> Dict:
+ """
+ Converts the output of [`DetrForSegmentation`] into panoptic segmentation predictions for a single sample.
+
+ Args:
+ out_logits (`torch.Tensor`):
+ The logits for this sample.
+ masks (`torch.Tensor`):
+ The predicted segmentation masks for this sample.
+ boxes (`torch.Tensor`):
+ The prediced bounding boxes for this sample. The boxes are in the normalized format `(center_x, center_y,
+ width, height)` and values between `[0, 1]`, relative to the size the image (disregarding padding).
+ processed_size (`Tuple[int, int]`):
+ The processed size of the image `(height, width)`, as returned by the preprocessing step i.e. the size
+ after data augmentation but before batching.
+ target_size (`Tuple[int, int]`):
+ The target size of the image, `(height, width)` corresponding to the requested final size of the
+ prediction.
+ is_thing_map (`Dict`):
+ A dictionary mapping class indices to a boolean value indicating whether the class is a thing or not.
+ threshold (`float`, *optional*, defaults to 0.85):
+ The threshold used to binarize the segmentation masks.
+ """
+ # we filter empty queries and detection below threshold
+ scores, labels = score_labels_from_class_probabilities(out_logits)
+ keep = (labels != out_logits.shape[-1] - 1) & (scores > threshold)
+
+ cur_scores = scores[keep]
+ cur_classes = labels[keep]
+ cur_boxes = center_to_corners_format(boxes[keep])
+
+ if len(cur_boxes) != len(cur_classes):
+ raise ValueError("Not as many boxes as there are classes")
+
+ cur_masks = masks[keep]
+ cur_masks = resize(cur_masks[:, None], processed_size, resample=PILImageResampling.BILINEAR)
+ cur_masks = safe_squeeze(cur_masks, 1)
+ b, h, w = cur_masks.shape
+
+ # It may be that we have several predicted masks for the same stuff class.
+ # In the following, we track the list of masks ids for each stuff class (they are merged later on)
+ cur_masks = cur_masks.reshape(b, -1)
+ stuff_equiv_classes = defaultdict(list)
+ for k, label in enumerate(cur_classes):
+ if not is_thing_map[label]:
+ stuff_equiv_classes[label].append(k)
+
+ seg_img = get_segmentation_image(cur_masks, processed_size, target_size, stuff_equiv_classes, deduplicate=True)
+ area = get_mask_area(cur_masks, processed_size, n_classes=len(cur_scores))
+
+ # We filter out any mask that is too small
+ if cur_classes.size() > 0:
+ # We know filter empty masks as long as we find some
+ filtered_small = np.array([a <= 4 for a in area], dtype=bool)
+ while filtered_small.any():
+ cur_masks = cur_masks[~filtered_small]
+ cur_scores = cur_scores[~filtered_small]
+ cur_classes = cur_classes[~filtered_small]
+ seg_img = get_segmentation_image(cur_masks, (h, w), target_size, stuff_equiv_classes, deduplicate=True)
+ area = get_mask_area(seg_img, target_size, n_classes=len(cur_scores))
+ filtered_small = np.array([a <= 4 for a in area], dtype=bool)
+ else:
+ cur_classes = np.ones((1, 1), dtype=np.int64)
+
+ segments_info = [
+ {"id": i, "isthing": is_thing_map[cat], "category_id": int(cat), "area": a}
+ for i, (cat, a) in enumerate(zip(cur_classes, area))
+ ]
+ del cur_classes
+
+ with io.BytesIO() as out:
+ PIL.Image.fromarray(seg_img).save(out, format="PNG")
+ predictions = {"png_string": out.getvalue(), "segments_info": segments_info}
+
+ return predictions
+
+
+# Copied from transformers.models.detr.image_processing_detr.resize_annotation
+def resize_annotation(
+ annotation: Dict[str, Any],
+ orig_size: Tuple[int, int],
+ target_size: Tuple[int, int],
+ threshold: float = 0.5,
+ resample: PILImageResampling = PILImageResampling.NEAREST,
+):
+ """
+ Resizes an annotation to a target size.
+
+ Args:
+ annotation (`Dict[str, Any]`):
+ The annotation dictionary.
+ orig_size (`Tuple[int, int]`):
+ The original size of the input image.
+ target_size (`Tuple[int, int]`):
+ The target size of the image, as returned by the preprocessing `resize` step.
+ threshold (`float`, *optional*, defaults to 0.5):
+ The threshold used to binarize the segmentation masks.
+ resample (`PILImageResampling`, defaults to `PILImageResampling.NEAREST`):
+ The resampling filter to use when resizing the masks.
+ """
+ ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(target_size, orig_size))
+ ratio_height, ratio_width = ratios
+
+ new_annotation = {}
+ new_annotation["size"] = target_size
+
+ for key, value in annotation.items():
+ if key == "boxes":
+ boxes = value
+ scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32)
+ new_annotation["boxes"] = scaled_boxes
+ elif key == "area":
+ area = value
+ scaled_area = area * (ratio_width * ratio_height)
+ new_annotation["area"] = scaled_area
+ elif key == "masks":
+ masks = value[:, None]
+ masks = np.array([resize(mask, target_size, resample=resample) for mask in masks])
+ masks = masks.astype(np.float32)
+ masks = masks[:, 0] > threshold
+ new_annotation["masks"] = masks
+ elif key == "size":
+ new_annotation["size"] = target_size
+ else:
+ new_annotation[key] = value
+
+ return new_annotation
+
+
+# Copied from transformers.models.detr.image_processing_detr.binary_mask_to_rle
+def binary_mask_to_rle(mask):
+ """
+ Converts given binary mask of shape `(height, width)` to the run-length encoding (RLE) format.
+
+ Args:
+ mask (`torch.Tensor` or `numpy.array`):
+ A binary mask tensor of shape `(height, width)` where 0 denotes background and 1 denotes the target
+ segment_id or class_id.
+ Returns:
+ `List`: Run-length encoded list of the binary mask. Refer to COCO API for more information about the RLE
+ format.
+ """
+ if is_torch_tensor(mask):
+ mask = mask.numpy()
+
+ pixels = mask.flatten()
+ pixels = np.concatenate([[0], pixels, [0]])
+ runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
+ runs[1::2] -= runs[::2]
+ return list(runs)
+
+
+# Copied from transformers.models.detr.image_processing_detr.convert_segmentation_to_rle
+def convert_segmentation_to_rle(segmentation):
+ """
+ Converts given segmentation map of shape `(height, width)` to the run-length encoding (RLE) format.
+
+ Args:
+ segmentation (`torch.Tensor` or `numpy.array`):
+ A segmentation map of shape `(height, width)` where each value denotes a segment or class id.
+ Returns:
+ `List[List]`: A list of lists, where each list is the run-length encoding of a segment / class id.
+ """
+ segment_ids = torch.unique(segmentation)
+
+ run_length_encodings = []
+ for idx in segment_ids:
+ mask = torch.where(segmentation == idx, 1, 0)
+ rle = binary_mask_to_rle(mask)
+ run_length_encodings.append(rle)
+
+ return run_length_encodings
+
+
+# Copied from transformers.models.detr.image_processing_detr.remove_low_and_no_objects
+def remove_low_and_no_objects(masks, scores, labels, object_mask_threshold, num_labels):
+ """
+ Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` and
+ `labels`.
+
+ Args:
+ masks (`torch.Tensor`):
+ A tensor of shape `(num_queries, height, width)`.
+ scores (`torch.Tensor`):
+ A tensor of shape `(num_queries)`.
+ labels (`torch.Tensor`):
+ A tensor of shape `(num_queries)`.
+ object_mask_threshold (`float`):
+ A number between 0 and 1 used to binarize the masks.
+ Raises:
+ `ValueError`: Raised when the first dimension doesn't match in all input tensors.
+ Returns:
+ `Tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`]`: The `masks`, `scores` and `labels` without the region
+ < `object_mask_threshold`.
+ """
+ if not (masks.shape[0] == scores.shape[0] == labels.shape[0]):
+ raise ValueError("mask, scores and labels must have the same shape!")
+
+ to_keep = labels.ne(num_labels) & (scores > object_mask_threshold)
+
+ return masks[to_keep], scores[to_keep], labels[to_keep]
+
+
+# Copied from transformers.models.detr.image_processing_detr.check_segment_validity
+def check_segment_validity(mask_labels, mask_probs, k, mask_threshold=0.5, overlap_mask_area_threshold=0.8):
+ # Get the mask associated with the k class
+ mask_k = mask_labels == k
+ mask_k_area = mask_k.sum()
+
+ # Compute the area of all the stuff in query k
+ original_area = (mask_probs[k] >= mask_threshold).sum()
+ mask_exists = mask_k_area > 0 and original_area > 0
+
+ # Eliminate disconnected tiny segments
+ if mask_exists:
+ area_ratio = mask_k_area / original_area
+ if not area_ratio.item() > overlap_mask_area_threshold:
+ mask_exists = False
+
+ return mask_exists, mask_k
+
+
+# Copied from transformers.models.detr.image_processing_detr.compute_segments
+def compute_segments(
+ mask_probs,
+ pred_scores,
+ pred_labels,
+ mask_threshold: float = 0.5,
+ overlap_mask_area_threshold: float = 0.8,
+ label_ids_to_fuse: Optional[Set[int]] = None,
+ target_size: Tuple[int, int] = None,
+):
+ height = mask_probs.shape[1] if target_size is None else target_size[0]
+ width = mask_probs.shape[2] if target_size is None else target_size[1]
+
+ segmentation = torch.zeros((height, width), dtype=torch.int32, device=mask_probs.device)
+ segments: List[Dict] = []
+
+ if target_size is not None:
+ mask_probs = nn.functional.interpolate(
+ mask_probs.unsqueeze(0), size=target_size, mode="bilinear", align_corners=False
+ )[0]
+
+ current_segment_id = 0
+
+ # Weigh each mask by its prediction score
+ mask_probs *= pred_scores.view(-1, 1, 1)
+ mask_labels = mask_probs.argmax(0) # [height, width]
+
+ # Keep track of instances of each class
+ stuff_memory_list: Dict[str, int] = {}
+ for k in range(pred_labels.shape[0]):
+ pred_class = pred_labels[k].item()
+ should_fuse = pred_class in label_ids_to_fuse
+
+ # Check if mask exists and large enough to be a segment
+ mask_exists, mask_k = check_segment_validity(
+ mask_labels, mask_probs, k, mask_threshold, overlap_mask_area_threshold
+ )
+
+ if mask_exists:
+ if pred_class in stuff_memory_list:
+ current_segment_id = stuff_memory_list[pred_class]
+ else:
+ current_segment_id += 1
+
+ # Add current object segment to final segmentation map
+ segmentation[mask_k] = current_segment_id
+ segment_score = round(pred_scores[k].item(), 6)
+ segments.append(
+ {
+ "id": current_segment_id,
+ "label_id": pred_class,
+ "was_fused": should_fuse,
+ "score": segment_score,
+ }
+ )
+ if should_fuse:
+ stuff_memory_list[pred_class] = current_segment_id
+
+ return segmentation, segments
+
+
+class DeformableDetrImageProcessor(BaseImageProcessor):
+ r"""
+ Constructs a Deformable DETR image processor.
+
+ Args:
+ format (`str`, *optional*, defaults to `"coco_detection"`):
+ Data format of the annotations. One of "coco_detection" or "coco_panoptic".
+ do_resize (`bool`, *optional*, defaults to `True`):
+ Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be
+ overridden by the `do_resize` parameter in the `preprocess` method.
+ size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 800, "longest_edge": 1333}`):
+ Size of the image's (height, width) dimensions after resizing. Can be overridden by the `size` parameter in
+ the `preprocess` method.
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
+ Resampling filter to use if resizing the image.
+ do_rescale (`bool`, *optional*, defaults to `True`):
+ Controls whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the
+ `do_rescale` parameter in the `preprocess` method.
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
+ Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
+ `preprocess` method.
+ do_normalize:
+ Controls whether to normalize the image. Can be overridden by the `do_normalize` parameter in the
+ `preprocess` method.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`):
+ Mean values to use when normalizing the image. Can be a single value or a list of values, one for each
+ channel. Can be overridden by the `image_mean` parameter in the `preprocess` method.
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`):
+ Standard deviation values to use when normalizing the image. Can be a single value or a list of values, one
+ for each channel. Can be overridden by the `image_std` parameter in the `preprocess` method.
+ do_pad (`bool`, *optional*, defaults to `True`):
+ Controls whether to pad the image to the largest image in a batch and create a pixel mask. Can be
+ overridden by the `do_pad` parameter in the `preprocess` method.
+ """
+
+ model_input_names = ["pixel_values", "pixel_mask"]
+
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.__init__
+ def __init__(
+ self,
+ format: Union[str, AnnotationFormat] = AnnotationFormat.COCO_DETECTION,
+ do_resize: bool = True,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
+ do_rescale: bool = True,
+ rescale_factor: Union[int, float] = 1 / 255,
+ do_normalize: bool = True,
+ image_mean: Union[float, List[float]] = None,
+ image_std: Union[float, List[float]] = None,
+ do_pad: bool = True,
+ **kwargs,
+ ) -> None:
+ if "pad_and_return_pixel_mask" in kwargs:
+ do_pad = kwargs.pop("pad_and_return_pixel_mask")
+
+ if "max_size" in kwargs:
+ logger.warning_once(
+ "The `max_size` parameter is deprecated and will be removed in v4.26. "
+ "Please specify in `size['longest_edge'] instead`.",
+ )
+ max_size = kwargs.pop("max_size")
+ else:
+ max_size = None if size is None else 1333
+
+ size = size if size is not None else {"shortest_edge": 800, "longest_edge": 1333}
+ size = get_size_dict(size, max_size=max_size, default_to_square=False)
+
+ super().__init__(**kwargs)
+ self.format = format
+ self.do_resize = do_resize
+ self.size = size
+ self.resample = resample
+ self.do_rescale = do_rescale
+ self.rescale_factor = rescale_factor
+ self.do_normalize = do_normalize
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
+ self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
+ self.do_pad = do_pad
+
+ @classmethod
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.from_dict with Detr->DeformableDetr
+ def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs):
+ """
+ Overrides the `from_dict` method from the base class to make sure parameters are updated if image processor is
+ created using from_dict and kwargs e.g. `DeformableDetrImageProcessor.from_pretrained(checkpoint, size=600,
+ max_size=800)`
+ """
+ image_processor_dict = image_processor_dict.copy()
+ if "max_size" in kwargs:
+ image_processor_dict["max_size"] = kwargs.pop("max_size")
+ if "pad_and_return_pixel_mask" in kwargs:
+ image_processor_dict["pad_and_return_pixel_mask"] = kwargs.pop("pad_and_return_pixel_mask")
+ return super().from_dict(image_processor_dict, **kwargs)
+
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_annotation with DETR->DeformableDetr
+ def prepare_annotation(
+ self,
+ image: np.ndarray,
+ target: Dict,
+ format: Optional[AnnotationFormat] = None,
+ return_segmentation_masks: bool = None,
+ masks_path: Optional[Union[str, pathlib.Path]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ ) -> Dict:
+ """
+ Prepare an annotation for feeding into DeformableDetr model.
+ """
+ format = format if format is not None else self.format
+
+ if format == AnnotationFormat.COCO_DETECTION:
+ return_segmentation_masks = False if return_segmentation_masks is None else return_segmentation_masks
+ target = prepare_coco_detection_annotation(
+ image, target, return_segmentation_masks, input_data_format=input_data_format
+ )
+ elif format == AnnotationFormat.COCO_PANOPTIC:
+ return_segmentation_masks = True if return_segmentation_masks is None else return_segmentation_masks
+ target = prepare_coco_panoptic_annotation(
+ image,
+ target,
+ masks_path=masks_path,
+ return_masks=return_segmentation_masks,
+ input_data_format=input_data_format,
+ )
+ else:
+ raise ValueError(f"Format {format} is not supported.")
+ return target
+
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare
+ def prepare(self, image, target, return_segmentation_masks=None, masks_path=None):
+ logger.warning_once(
+ "The `prepare` method is deprecated and will be removed in a v4.33. "
+ "Please use `prepare_annotation` instead. Note: the `prepare_annotation` method "
+ "does not return the image anymore.",
+ )
+ target = self.prepare_annotation(image, target, return_segmentation_masks, masks_path, self.format)
+ return image, target
+
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.convert_coco_poly_to_mask
+ def convert_coco_poly_to_mask(self, *args, **kwargs):
+ logger.warning_once("The `convert_coco_poly_to_mask` method is deprecated and will be removed in v4.33. ")
+ return convert_coco_poly_to_mask(*args, **kwargs)
+
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_coco_detection
+ def prepare_coco_detection(self, *args, **kwargs):
+ logger.warning_once("The `prepare_coco_detection` method is deprecated and will be removed in v4.33. ")
+ return prepare_coco_detection_annotation(*args, **kwargs)
+
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_coco_panoptic
+ def prepare_coco_panoptic(self, *args, **kwargs):
+ logger.warning_once("The `prepare_coco_panoptic` method is deprecated and will be removed in v4.33. ")
+ return prepare_coco_panoptic_annotation(*args, **kwargs)
+
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.resize
+ def resize(
+ self,
+ image: np.ndarray,
+ size: Dict[str, int],
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
+ data_format: Optional[ChannelDimension] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> np.ndarray:
+ """
+ Resize the image to the given size. Size can be `min_size` (scalar) or `(height, width)` tuple. If size is an
+ int, smaller edge of the image will be matched to this number.
+
+ Args:
+ image (`np.ndarray`):
+ Image to resize.
+ size (`Dict[str, int]`):
+ Dictionary containing the size to resize to. Can contain the keys `shortest_edge` and `longest_edge` or
+ `height` and `width`.
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
+ Resampling filter to use if resizing the image.
+ data_format (`str` or `ChannelDimension`, *optional*):
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
+ image is used.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format of the input image. If not provided, it will be inferred.
+ """
+ if "max_size" in kwargs:
+ logger.warning_once(
+ "The `max_size` parameter is deprecated and will be removed in v4.26. "
+ "Please specify in `size['longest_edge'] instead`.",
+ )
+ max_size = kwargs.pop("max_size")
+ else:
+ max_size = None
+ size = get_size_dict(size, max_size=max_size, default_to_square=False)
+ if "shortest_edge" in size and "longest_edge" in size:
+ size = get_resize_output_image_size(
+ image, size["shortest_edge"], size["longest_edge"], input_data_format=input_data_format
+ )
+ elif "height" in size and "width" in size:
+ size = (size["height"], size["width"])
+ else:
+ raise ValueError(
+ "Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got"
+ f" {size.keys()}."
+ )
+ image = resize(
+ image, size=size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs
+ )
+ return image
+
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.resize_annotation
+ def resize_annotation(
+ self,
+ annotation,
+ orig_size,
+ size,
+ resample: PILImageResampling = PILImageResampling.NEAREST,
+ ) -> Dict:
+ """
+ Resize the annotation to match the resized image. If size is an int, smaller edge of the mask will be matched
+ to this number.
+ """
+ return resize_annotation(annotation, orig_size=orig_size, target_size=size, resample=resample)
+
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.rescale
+ def rescale(
+ self,
+ image: np.ndarray,
+ rescale_factor: float,
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ ) -> np.ndarray:
+ """
+ Rescale the image by the given factor. image = image * rescale_factor.
+
+ Args:
+ image (`np.ndarray`):
+ Image to rescale.
+ rescale_factor (`float`):
+ The value to use for rescaling.
+ data_format (`str` or `ChannelDimension`, *optional*):
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
+ image is used. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ input_data_format (`str` or `ChannelDimension`, *optional*):
+ The channel dimension format for the input image. If unset, is inferred from the input image. Can be
+ one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ """
+ return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format)
+
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.normalize_annotation
+ def normalize_annotation(self, annotation: Dict, image_size: Tuple[int, int]) -> Dict:
+ """
+ Normalize the boxes in the annotation from `[top_left_x, top_left_y, bottom_right_x, bottom_right_y]` to
+ `[center_x, center_y, width, height]` format.
+ """
+ return normalize_annotation(annotation, image_size=image_size)
+
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._pad_image
+ def _pad_image(
+ self,
+ image: np.ndarray,
+ output_size: Tuple[int, int],
+ constant_values: Union[float, Iterable[float]] = 0,
+ data_format: Optional[ChannelDimension] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ ) -> np.ndarray:
+ """
+ Pad an image with zeros to the given size.
+ """
+ input_height, input_width = get_image_size(image, channel_dim=input_data_format)
+ output_height, output_width = output_size
+
+ pad_bottom = output_height - input_height
+ pad_right = output_width - input_width
+ padding = ((0, pad_bottom), (0, pad_right))
+ padded_image = pad(
+ image,
+ padding,
+ mode=PaddingMode.CONSTANT,
+ constant_values=constant_values,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ )
+ return padded_image
+
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.pad
+ def pad(
+ self,
+ images: List[np.ndarray],
+ constant_values: Union[float, Iterable[float]] = 0,
+ return_pixel_mask: bool = True,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ data_format: Optional[ChannelDimension] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ ) -> BatchFeature:
+ """
+ Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width
+ in the batch and optionally returns their corresponding pixel mask.
+
+ Args:
+ image (`np.ndarray`):
+ Image to pad.
+ constant_values (`float` or `Iterable[float]`, *optional*):
+ The value to use for the padding if `mode` is `"constant"`.
+ return_pixel_mask (`bool`, *optional*, defaults to `True`):
+ Whether to return a pixel mask.
+ return_tensors (`str` or `TensorType`, *optional*):
+ The type of tensors to return. Can be one of:
+ - Unset: Return a list of `np.ndarray`.
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
+ data_format (`str` or `ChannelDimension`, *optional*):
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format of the input image. If not provided, it will be inferred.
+ """
+ pad_size = get_max_height_width(images, input_data_format=input_data_format)
+
+ padded_images = [
+ self._pad_image(
+ image,
+ pad_size,
+ constant_values=constant_values,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ )
+ for image in images
+ ]
+ data = {"pixel_values": padded_images}
+
+ if return_pixel_mask:
+ masks = [
+ make_pixel_mask(image=image, output_size=pad_size, input_data_format=input_data_format)
+ for image in images
+ ]
+ data["pixel_mask"] = masks
+
+ return BatchFeature(data=data, tensor_type=return_tensors)
+
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.preprocess
+ def preprocess(
+ self,
+ images: ImageInput,
+ annotations: Optional[Union[AnnotationType, List[AnnotationType]]] = None,
+ return_segmentation_masks: bool = None,
+ masks_path: Optional[Union[str, pathlib.Path]] = None,
+ do_resize: Optional[bool] = None,
+ size: Optional[Dict[str, int]] = None,
+ resample=None, # PILImageResampling
+ do_rescale: Optional[bool] = None,
+ rescale_factor: Optional[Union[int, float]] = None,
+ do_normalize: Optional[bool] = None,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ do_pad: Optional[bool] = None,
+ format: Optional[Union[str, AnnotationFormat]] = None,
+ return_tensors: Optional[Union[TensorType, str]] = None,
+ data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> BatchFeature:
+ """
+ Preprocess an image or a batch of images so that it can be used by the model.
+
+ Args:
+ images (`ImageInput`):
+ Image or batch of images to preprocess. Expects a single or batch of images with pixel values ranging
+ from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`.
+ annotations (`AnnotationType` or `List[AnnotationType]`, *optional*):
+ List of annotations associated with the image or batch of images. If annotation is for object
+ detection, the annotations should be a dictionary with the following keys:
+ - "image_id" (`int`): The image id.
+ - "annotations" (`List[Dict]`): List of annotations for an image. Each annotation should be a
+ dictionary. An image can have no annotations, in which case the list should be empty.
+ If annotation is for segmentation, the annotations should be a dictionary with the following keys:
+ - "image_id" (`int`): The image id.
+ - "segments_info" (`List[Dict]`): List of segments for an image. Each segment should be a dictionary.
+ An image can have no segments, in which case the list should be empty.
+ - "file_name" (`str`): The file name of the image.
+ return_segmentation_masks (`bool`, *optional*, defaults to self.return_segmentation_masks):
+ Whether to return segmentation masks.
+ masks_path (`str` or `pathlib.Path`, *optional*):
+ Path to the directory containing the segmentation masks.
+ do_resize (`bool`, *optional*, defaults to self.do_resize):
+ Whether to resize the image.
+ size (`Dict[str, int]`, *optional*, defaults to self.size):
+ Size of the image after resizing.
+ resample (`PILImageResampling`, *optional*, defaults to self.resample):
+ Resampling filter to use when resizing the image.
+ do_rescale (`bool`, *optional*, defaults to self.do_rescale):
+ Whether to rescale the image.
+ rescale_factor (`float`, *optional*, defaults to self.rescale_factor):
+ Rescale factor to use when rescaling the image.
+ do_normalize (`bool`, *optional*, defaults to self.do_normalize):
+ Whether to normalize the image.
+ image_mean (`float` or `List[float]`, *optional*, defaults to self.image_mean):
+ Mean to use when normalizing the image.
+ image_std (`float` or `List[float]`, *optional*, defaults to self.image_std):
+ Standard deviation to use when normalizing the image.
+ do_pad (`bool`, *optional*, defaults to self.do_pad):
+ Whether to pad the image.
+ format (`str` or `AnnotationFormat`, *optional*, defaults to self.format):
+ Format of the annotations.
+ return_tensors (`str` or `TensorType`, *optional*, defaults to self.return_tensors):
+ Type of tensors to return. If `None`, will return the list of images.
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
+ The channel dimension format for the output image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - Unset: Use the channel dimension format of the input image.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+ """
+ if "pad_and_return_pixel_mask" in kwargs:
+ logger.warning_once(
+ "The `pad_and_return_pixel_mask` argument is deprecated and will be removed in a future version, "
+ "use `do_pad` instead."
+ )
+ do_pad = kwargs.pop("pad_and_return_pixel_mask")
+
+ max_size = None
+ if "max_size" in kwargs:
+ logger.warning_once(
+ "The `max_size` argument is deprecated and will be removed in a future version, use"
+ " `size['longest_edge']` instead."
+ )
+ size = kwargs.pop("max_size")
+
+ do_resize = self.do_resize if do_resize is None else do_resize
+ size = self.size if size is None else size
+ size = get_size_dict(size=size, max_size=max_size, default_to_square=False)
+ resample = self.resample if resample is None else resample
+ do_rescale = self.do_rescale if do_rescale is None else do_rescale
+ rescale_factor = self.rescale_factor if rescale_factor is None else rescale_factor
+ do_normalize = self.do_normalize if do_normalize is None else do_normalize
+ image_mean = self.image_mean if image_mean is None else image_mean
+ image_std = self.image_std if image_std is None else image_std
+ do_pad = self.do_pad if do_pad is None else do_pad
+ format = self.format if format is None else format
+
+ if do_resize is not None and size is None:
+ raise ValueError("Size and max_size must be specified if do_resize is True.")
+
+ if do_rescale is not None and rescale_factor is None:
+ raise ValueError("Rescale factor must be specified if do_rescale is True.")
+
+ if do_normalize is not None and (image_mean is None or image_std is None):
+ raise ValueError("Image mean and std must be specified if do_normalize is True.")
+
+ images = make_list_of_images(images)
+ if annotations is not None and isinstance(annotations, dict):
+ annotations = [annotations]
+
+ if annotations is not None and len(images) != len(annotations):
+ raise ValueError(
+ f"The number of images ({len(images)}) and annotations ({len(annotations)}) do not match."
+ )
+
+ if not valid_images(images):
+ raise ValueError(
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
+ "torch.Tensor, tf.Tensor or jax.ndarray."
+ )
+
+ format = AnnotationFormat(format)
+ if annotations is not None:
+ validate_annotations(format, SUPPORTED_ANNOTATION_FORMATS, annotations)
+
+ if (
+ masks_path is not None
+ and format == AnnotationFormat.COCO_PANOPTIC
+ and not isinstance(masks_path, (pathlib.Path, str))
+ ):
+ raise ValueError(
+ "The path to the directory containing the mask PNG files should be provided as a"
+ f" `pathlib.Path` or string object, but is {type(masks_path)} instead."
+ )
+
+ # All transformations expect numpy arrays
+ images = [to_numpy_array(image) for image in images]
+
+ if is_scaled_image(images[0]) and do_rescale:
+ logger.warning_once(
+ "It looks like you are trying to rescale already rescaled images. If the input"
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
+ )
+
+ if input_data_format is None:
+ # We assume that all images have the same channel dimension format.
+ input_data_format = infer_channel_dimension_format(images[0])
+
+ # prepare (COCO annotations as a list of Dict -> DETR target as a single Dict per image)
+ if annotations is not None:
+ prepared_images = []
+ prepared_annotations = []
+ for image, target in zip(images, annotations):
+ target = self.prepare_annotation(
+ image,
+ target,
+ format,
+ return_segmentation_masks=return_segmentation_masks,
+ masks_path=masks_path,
+ input_data_format=input_data_format,
+ )
+ prepared_images.append(image)
+ prepared_annotations.append(target)
+ images = prepared_images
+ annotations = prepared_annotations
+ del prepared_images, prepared_annotations
+
+ # transformations
+ if do_resize:
+ if annotations is not None:
+ resized_images, resized_annotations = [], []
+ for image, target in zip(images, annotations):
+ orig_size = get_image_size(image, input_data_format)
+ resized_image = self.resize(
+ image, size=size, max_size=max_size, resample=resample, input_data_format=input_data_format
+ )
+ resized_annotation = self.resize_annotation(
+ target, orig_size, get_image_size(resized_image, input_data_format)
+ )
+ resized_images.append(resized_image)
+ resized_annotations.append(resized_annotation)
+ images = resized_images
+ annotations = resized_annotations
+ del resized_images, resized_annotations
+ else:
+ images = [
+ self.resize(image, size=size, resample=resample, input_data_format=input_data_format)
+ for image in images
+ ]
+
+ if do_rescale:
+ images = [self.rescale(image, rescale_factor, input_data_format=input_data_format) for image in images]
+
+ if do_normalize:
+ images = [
+ self.normalize(image, image_mean, image_std, input_data_format=input_data_format) for image in images
+ ]
+ if annotations is not None:
+ annotations = [
+ self.normalize_annotation(annotation, get_image_size(image, input_data_format))
+ for annotation, image in zip(annotations, images)
+ ]
+
+ if do_pad:
+ # Pads images and returns their mask: {'pixel_values': ..., 'pixel_mask': ...}
+ data = self.pad(
+ images, return_pixel_mask=True, data_format=data_format, input_data_format=input_data_format
+ )
+ else:
+ images = [
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
+ for image in images
+ ]
+ data = {"pixel_values": images}
+
+ encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors)
+ if annotations is not None:
+ encoded_inputs["labels"] = [
+ BatchFeature(annotation, tensor_type=return_tensors) for annotation in annotations
+ ]
+
+ return encoded_inputs
+
+ # POSTPROCESSING METHODS - TODO: add support for other frameworks
+ def post_process(self, outputs, target_sizes):
+ """
+ Converts the raw output of [`DeformableDetrForObjectDetection`] into final bounding boxes in (top_left_x,
+ top_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch.
+
+ Args:
+ outputs ([`DeformableDetrObjectDetectionOutput`]):
+ Raw outputs of the model.
+ target_sizes (`torch.Tensor` of shape `(batch_size, 2)`):
+ Tensor containing the size (height, width) of each image of the batch. For evaluation, this must be the
+ original image size (before any data augmentation). For visualization, this should be the image size
+ after data augment, but before padding.
+ Returns:
+ `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image
+ in the batch as predicted by the model.
+ """
+ logger.warning_once(
+ "`post_process` is deprecated and will be removed in v5 of Transformers, please use"
+ " `post_process_object_detection` instead, with `threshold=0.` for equivalent results.",
+ )
+
+ out_logits, out_bbox = outputs.logits, outputs.pred_boxes
+
+ if len(out_logits) != len(target_sizes):
+ raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the logits")
+ if target_sizes.shape[1] != 2:
+ raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch")
+
+ prob = out_logits.sigmoid()
+ topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 100, dim=1)
+ scores = topk_values
+ topk_boxes = torch.div(topk_indexes, out_logits.shape[2], rounding_mode="floor")
+ labels = topk_indexes % out_logits.shape[2]
+ boxes = center_to_corners_format(out_bbox)
+ boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))
+
+ # and from relative [0, 1] to absolute [0, height] coordinates
+ img_h, img_w = target_sizes.unbind(1)
+ scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)
+ boxes = boxes * scale_fct[:, None, :]
+
+ results = [{"scores": s, "labels": l, "boxes": b} for s, l, b in zip(scores, labels, boxes)]
+
+ return results
+
+ def post_process_object_detection(
+ self, outputs, threshold: float = 0.5, target_sizes: Union[TensorType, List[Tuple]] = None, top_k: int = 100
+ ):
+ """
+ Converts the raw output of [`DeformableDetrForObjectDetection`] into final bounding boxes in (top_left_x,
+ top_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch.
+
+ Args:
+ outputs ([`DetrObjectDetectionOutput`]):
+ Raw outputs of the model.
+ threshold (`float`, *optional*):
+ Score threshold to keep object detection predictions.
+ target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*):
+ Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size
+ (height, width) of each image in the batch. If left to None, predictions will not be resized.
+ top_k (`int`, *optional*, defaults to 100):
+ Keep only top k bounding boxes before filtering by thresholding.
+
+ Returns:
+ `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image
+ in the batch as predicted by the model.
+ """
+ out_logits, out_bbox = outputs.logits, outputs.pred_boxes
+
+ if target_sizes is not None:
+ if len(out_logits) != len(target_sizes):
+ raise ValueError(
+ "Make sure that you pass in as many target sizes as the batch dimension of the logits"
+ )
+
+ prob = out_logits.sigmoid()
+ prob = prob.view(out_logits.shape[0], -1)
+ k_value = min(top_k, prob.size(1))
+ topk_values, topk_indexes = torch.topk(prob, k_value, dim=1)
+ scores = topk_values
+ topk_boxes = torch.div(topk_indexes, out_logits.shape[2], rounding_mode="floor")
+ labels = topk_indexes % out_logits.shape[2]
+ boxes = center_to_corners_format(out_bbox)
+ boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))
+
+ # and from relative [0, 1] to absolute [0, height] coordinates
+ if isinstance(target_sizes, List):
+ img_h = torch.Tensor([i[0] for i in target_sizes])
+ img_w = torch.Tensor([i[1] for i in target_sizes])
+ else:
+ img_h, img_w = target_sizes.unbind(1)
+ scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)
+ boxes = boxes * scale_fct[:, None, :]
+
+ results = []
+ for s, l, b in zip(scores, labels, boxes):
+ score = s[s > threshold]
+ label = l[s > threshold]
+ box = b[s > threshold]
+ results.append({"scores": score, "labels": label, "boxes": box})
+
+ return results
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/deformable_detr/load_custom.py b/llava_next/lib/python3.10/site-packages/transformers/models/deformable_detr/load_custom.py
new file mode 100644
index 0000000000000000000000000000000000000000..c3a822e2764170c24c7098956e81788856385451
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/transformers/models/deformable_detr/load_custom.py
@@ -0,0 +1,49 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Loading of Deformable DETR's CUDA kernels"""
+import os
+from pathlib import Path
+
+
+def load_cuda_kernels():
+ from torch.utils.cpp_extension import load
+
+ root = Path(__file__).resolve().parent.parent.parent / "kernels" / "deformable_detr"
+ src_files = [
+ root / filename
+ for filename in [
+ "vision.cpp",
+ os.path.join("cpu", "ms_deform_attn_cpu.cpp"),
+ os.path.join("cuda", "ms_deform_attn_cuda.cu"),
+ ]
+ ]
+
+ load(
+ "MultiScaleDeformableAttention",
+ src_files,
+ with_cuda=True,
+ extra_include_paths=[str(root)],
+ extra_cflags=["-DWITH_CUDA=1"],
+ extra_cuda_cflags=[
+ "-DCUDA_HAS_FP16=1",
+ "-D__CUDA_NO_HALF_OPERATORS__",
+ "-D__CUDA_NO_HALF_CONVERSIONS__",
+ "-D__CUDA_NO_HALF2_OPERATORS__",
+ ],
+ )
+
+ import MultiScaleDeformableAttention as MSDA
+
+ return MSDA
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/deformable_detr/modeling_deformable_detr.py b/llava_next/lib/python3.10/site-packages/transformers/models/deformable_detr/modeling_deformable_detr.py
new file mode 100644
index 0000000000000000000000000000000000000000..3767eef0392f6a2111677e8eb994ab10909e5b94
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/transformers/models/deformable_detr/modeling_deformable_detr.py
@@ -0,0 +1,2480 @@
+# coding=utf-8
+# Copyright 2022 SenseTime and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch Deformable DETR model."""
+
+
+import copy
+import math
+import warnings
+from dataclasses import dataclass
+from typing import Dict, List, Optional, Tuple, Union
+
+import torch
+import torch.nn.functional as F
+from torch import Tensor, nn
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+
+from ...activations import ACT2FN
+from ...file_utils import (
+ ModelOutput,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ is_scipy_available,
+ is_timm_available,
+ is_torch_cuda_available,
+ is_vision_available,
+ replace_return_docstrings,
+ requires_backends,
+)
+from ...modeling_attn_mask_utils import _prepare_4d_attention_mask
+from ...modeling_outputs import BaseModelOutput
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import meshgrid
+from ...utils import is_ninja_available, logging
+from ..auto import AutoBackbone
+from .configuration_deformable_detr import DeformableDetrConfig
+from .load_custom import load_cuda_kernels
+
+
+logger = logging.get_logger(__name__)
+
+# Move this to not compile only when importing, this needs to happen later, like in __init__.
+if is_torch_cuda_available() and is_ninja_available():
+ logger.info("Loading custom CUDA kernels...")
+ try:
+ MultiScaleDeformableAttention = load_cuda_kernels()
+ except Exception as e:
+ logger.warning(f"Could not load the custom kernel for multi-scale deformable attention: {e}")
+ MultiScaleDeformableAttention = None
+else:
+ MultiScaleDeformableAttention = None
+
+if is_vision_available():
+ from transformers.image_transforms import center_to_corners_format
+
+
+class MultiScaleDeformableAttentionFunction(Function):
+ @staticmethod
+ def forward(
+ context,
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step,
+ ):
+ context.im2col_step = im2col_step
+ output = MultiScaleDeformableAttention.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ context.im2col_step,
+ )
+ context.save_for_backward(
+ value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights
+ )
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(context, grad_output):
+ (
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ ) = context.saved_tensors
+ grad_value, grad_sampling_loc, grad_attn_weight = MultiScaleDeformableAttention.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output,
+ context.im2col_step,
+ )
+
+ return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
+
+
+if is_scipy_available():
+ from scipy.optimize import linear_sum_assignment
+
+if is_timm_available():
+ from timm import create_model
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "DeformableDetrConfig"
+_CHECKPOINT_FOR_DOC = "sensetime/deformable-detr"
+
+DEFORMABLE_DETR_PRETRAINED_MODEL_ARCHIVE_LIST = [
+ "sensetime/deformable-detr",
+ # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
+]
+
+
+@dataclass
+class DeformableDetrDecoderOutput(ModelOutput):
+ """
+ Base class for outputs of the DeformableDetrDecoder. This class adds two attributes to
+ BaseModelOutputWithCrossAttentions, namely:
+ - a stacked tensor of intermediate decoder hidden states (i.e. the output of each decoder layer)
+ - a stacked tensor of intermediate reference points.
+
+ Args:
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`):
+ Stacked intermediate hidden states (output of each layer of the decoder).
+ intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, sequence_length, hidden_size)`):
+ Stacked intermediate reference points (reference points of each layer of the decoder).
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
+ plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
+ the self-attention heads.
+ cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax,
+ used to compute the weighted average in the cross-attention heads.
+ """
+
+ last_hidden_state: torch.FloatTensor = None
+ intermediate_hidden_states: torch.FloatTensor = None
+ intermediate_reference_points: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+ cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+@dataclass
+class DeformableDetrModelOutput(ModelOutput):
+ """
+ Base class for outputs of the Deformable DETR encoder-decoder model.
+
+ Args:
+ init_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
+ Initial reference points sent through the Transformer decoder.
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the decoder of the model.
+ intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`):
+ Stacked intermediate hidden states (output of each layer of the decoder).
+ intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`):
+ Stacked intermediate reference points (reference points of each layer of the decoder).
+ decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, num_queries, hidden_size)`. Hidden-states of the decoder at the output of each layer
+ plus the initial embedding outputs.
+ decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, num_queries,
+ num_queries)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted
+ average in the self-attention heads.
+ cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_queries, num_heads, 4, 4)`.
+ Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
+ weighted average in the cross-attention heads.
+ encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder of the model.
+ encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each
+ layer plus the initial embedding outputs.
+ encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_queries, num_heads, 4, 4)`.
+ Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
+ self-attention heads.
+ enc_outputs_class (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`):
+ Predicted bounding boxes scores where the top `config.two_stage_num_proposals` scoring bounding boxes are
+ picked as region proposals in the first stage. Output of bounding box binary classification (i.e.
+ foreground and background).
+ enc_outputs_coord_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`):
+ Logits of predicted bounding boxes coordinates in the first stage.
+ """
+
+ init_reference_points: torch.FloatTensor = None
+ last_hidden_state: torch.FloatTensor = None
+ intermediate_hidden_states: torch.FloatTensor = None
+ intermediate_reference_points: torch.FloatTensor = None
+ decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ encoder_last_hidden_state: Optional[torch.FloatTensor] = None
+ encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ enc_outputs_class: Optional[torch.FloatTensor] = None
+ enc_outputs_coord_logits: Optional[torch.FloatTensor] = None
+
+
+@dataclass
+class DeformableDetrObjectDetectionOutput(ModelOutput):
+ """
+ Output type of [`DeformableDetrForObjectDetection`].
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)):
+ Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a
+ bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized
+ scale-invariant IoU loss.
+ loss_dict (`Dict`, *optional*):
+ A dictionary containing the individual losses. Useful for logging.
+ logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`):
+ Classification logits (including no-object) for all queries.
+ pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
+ Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These
+ values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding
+ possible padding). You can use [`~DeformableDetrProcessor.post_process_object_detection`] to retrieve the
+ unnormalized bounding boxes.
+ auxiliary_outputs (`list[Dict]`, *optional*):
+ Optional, only returned when auxilary losses are activated (i.e. `config.auxiliary_loss` is set to `True`)
+ and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and
+ `pred_boxes`) for each decoder layer.
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the decoder of the model.
+ decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, num_queries, hidden_size)`. Hidden-states of the decoder at the output of each layer
+ plus the initial embedding outputs.
+ decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, num_queries,
+ num_queries)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted
+ average in the self-attention heads.
+ cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_queries, num_heads, 4, 4)`.
+ Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
+ weighted average in the cross-attention heads.
+ encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder of the model.
+ encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each
+ layer plus the initial embedding outputs.
+ encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_heads, 4,
+ 4)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average
+ in the self-attention heads.
+ intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`):
+ Stacked intermediate hidden states (output of each layer of the decoder).
+ intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`):
+ Stacked intermediate reference points (reference points of each layer of the decoder).
+ init_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
+ Initial reference points sent through the Transformer decoder.
+ enc_outputs_class (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`):
+ Predicted bounding boxes scores where the top `config.two_stage_num_proposals` scoring bounding boxes are
+ picked as region proposals in the first stage. Output of bounding box binary classification (i.e.
+ foreground and background).
+ enc_outputs_coord_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`):
+ Logits of predicted bounding boxes coordinates in the first stage.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ loss_dict: Optional[Dict] = None
+ logits: torch.FloatTensor = None
+ pred_boxes: torch.FloatTensor = None
+ auxiliary_outputs: Optional[List[Dict]] = None
+ init_reference_points: Optional[torch.FloatTensor] = None
+ last_hidden_state: Optional[torch.FloatTensor] = None
+ intermediate_hidden_states: Optional[torch.FloatTensor] = None
+ intermediate_reference_points: Optional[torch.FloatTensor] = None
+ decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ encoder_last_hidden_state: Optional[torch.FloatTensor] = None
+ encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ enc_outputs_class: Optional = None
+ enc_outputs_coord_logits: Optional = None
+
+
+def _get_clones(module, N):
+ return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
+
+
+def inverse_sigmoid(x, eps=1e-5):
+ x = x.clamp(min=0, max=1)
+ x1 = x.clamp(min=eps)
+ x2 = (1 - x).clamp(min=eps)
+ return torch.log(x1 / x2)
+
+
+# Copied from transformers.models.detr.modeling_detr.DetrFrozenBatchNorm2d with Detr->DeformableDetr
+class DeformableDetrFrozenBatchNorm2d(nn.Module):
+ """
+ BatchNorm2d where the batch statistics and the affine parameters are fixed.
+
+ Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than
+ torchvision.models.resnet[18,34,50,101] produce nans.
+ """
+
+ def __init__(self, n):
+ super().__init__()
+ self.register_buffer("weight", torch.ones(n))
+ self.register_buffer("bias", torch.zeros(n))
+ self.register_buffer("running_mean", torch.zeros(n))
+ self.register_buffer("running_var", torch.ones(n))
+
+ def _load_from_state_dict(
+ self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
+ ):
+ num_batches_tracked_key = prefix + "num_batches_tracked"
+ if num_batches_tracked_key in state_dict:
+ del state_dict[num_batches_tracked_key]
+
+ super()._load_from_state_dict(
+ state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
+ )
+
+ def forward(self, x):
+ # move reshapes to the beginning
+ # to make it user-friendly
+ weight = self.weight.reshape(1, -1, 1, 1)
+ bias = self.bias.reshape(1, -1, 1, 1)
+ running_var = self.running_var.reshape(1, -1, 1, 1)
+ running_mean = self.running_mean.reshape(1, -1, 1, 1)
+ epsilon = 1e-5
+ scale = weight * (running_var + epsilon).rsqrt()
+ bias = bias - running_mean * scale
+ return x * scale + bias
+
+
+# Copied from transformers.models.detr.modeling_detr.replace_batch_norm with Detr->DeformableDetr
+def replace_batch_norm(model):
+ r"""
+ Recursively replace all `torch.nn.BatchNorm2d` with `DeformableDetrFrozenBatchNorm2d`.
+
+ Args:
+ model (torch.nn.Module):
+ input model
+ """
+ for name, module in model.named_children():
+ if isinstance(module, nn.BatchNorm2d):
+ new_module = DeformableDetrFrozenBatchNorm2d(module.num_features)
+
+ if not module.weight.device == torch.device("meta"):
+ new_module.weight.data.copy_(module.weight)
+ new_module.bias.data.copy_(module.bias)
+ new_module.running_mean.data.copy_(module.running_mean)
+ new_module.running_var.data.copy_(module.running_var)
+
+ model._modules[name] = new_module
+
+ if len(list(module.children())) > 0:
+ replace_batch_norm(module)
+
+
+class DeformableDetrConvEncoder(nn.Module):
+ """
+ Convolutional backbone, using either the AutoBackbone API or one from the timm library.
+
+ nn.BatchNorm2d layers are replaced by DeformableDetrFrozenBatchNorm2d as defined above.
+
+ """
+
+ def __init__(self, config):
+ super().__init__()
+
+ self.config = config
+
+ if config.use_timm_backbone:
+ requires_backends(self, ["timm"])
+ kwargs = {}
+ if config.dilation:
+ kwargs["output_stride"] = 16
+ backbone = create_model(
+ config.backbone,
+ pretrained=config.use_pretrained_backbone,
+ features_only=True,
+ out_indices=(2, 3, 4) if config.num_feature_levels > 1 else (4,),
+ in_chans=config.num_channels,
+ **kwargs,
+ )
+ else:
+ backbone = AutoBackbone.from_config(config.backbone_config)
+
+ # replace batch norm by frozen batch norm
+ with torch.no_grad():
+ replace_batch_norm(backbone)
+ self.model = backbone
+ self.intermediate_channel_sizes = (
+ self.model.feature_info.channels() if config.use_timm_backbone else self.model.channels
+ )
+
+ backbone_model_type = config.backbone if config.use_timm_backbone else config.backbone_config.model_type
+ if "resnet" in backbone_model_type:
+ for name, parameter in self.model.named_parameters():
+ if config.use_timm_backbone:
+ if "layer2" not in name and "layer3" not in name and "layer4" not in name:
+ parameter.requires_grad_(False)
+ else:
+ if "stage.1" not in name and "stage.2" not in name and "stage.3" not in name:
+ parameter.requires_grad_(False)
+
+ # Copied from transformers.models.detr.modeling_detr.DetrConvEncoder.forward with Detr->DeformableDetr
+ def forward(self, pixel_values: torch.Tensor, pixel_mask: torch.Tensor):
+ # send pixel_values through the model to get list of feature maps
+ features = self.model(pixel_values) if self.config.use_timm_backbone else self.model(pixel_values).feature_maps
+
+ out = []
+ for feature_map in features:
+ # downsample pixel_mask to match shape of corresponding feature_map
+ mask = nn.functional.interpolate(pixel_mask[None].float(), size=feature_map.shape[-2:]).to(torch.bool)[0]
+ out.append((feature_map, mask))
+ return out
+
+
+# Copied from transformers.models.detr.modeling_detr.DetrConvModel with Detr->DeformableDetr
+class DeformableDetrConvModel(nn.Module):
+ """
+ This module adds 2D position embeddings to all intermediate feature maps of the convolutional encoder.
+ """
+
+ def __init__(self, conv_encoder, position_embedding):
+ super().__init__()
+ self.conv_encoder = conv_encoder
+ self.position_embedding = position_embedding
+
+ def forward(self, pixel_values, pixel_mask):
+ # send pixel_values and pixel_mask through backbone to get list of (feature_map, pixel_mask) tuples
+ out = self.conv_encoder(pixel_values, pixel_mask)
+ pos = []
+ for feature_map, mask in out:
+ # position encoding
+ pos.append(self.position_embedding(feature_map, mask).to(feature_map.dtype))
+
+ return out, pos
+
+
+class DeformableDetrSinePositionEmbedding(nn.Module):
+ """
+ This is a more standard version of the position embedding, very similar to the one used by the Attention is all you
+ need paper, generalized to work on images.
+ """
+
+ def __init__(self, embedding_dim=64, temperature=10000, normalize=False, scale=None):
+ super().__init__()
+ self.embedding_dim = embedding_dim
+ self.temperature = temperature
+ self.normalize = normalize
+ if scale is not None and normalize is False:
+ raise ValueError("normalize should be True if scale is passed")
+ if scale is None:
+ scale = 2 * math.pi
+ self.scale = scale
+
+ def forward(self, pixel_values, pixel_mask):
+ if pixel_mask is None:
+ raise ValueError("No pixel mask provided")
+ y_embed = pixel_mask.cumsum(1, dtype=torch.float32)
+ x_embed = pixel_mask.cumsum(2, dtype=torch.float32)
+ if self.normalize:
+ eps = 1e-6
+ y_embed = (y_embed - 0.5) / (y_embed[:, -1:, :] + eps) * self.scale
+ x_embed = (x_embed - 0.5) / (x_embed[:, :, -1:] + eps) * self.scale
+
+ dim_t = torch.arange(self.embedding_dim, dtype=torch.float32, device=pixel_values.device)
+ dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / self.embedding_dim)
+
+ pos_x = x_embed[:, :, :, None] / dim_t
+ pos_y = y_embed[:, :, :, None] / dim_t
+ pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
+ pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
+ pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
+ return pos
+
+
+# Copied from transformers.models.detr.modeling_detr.DetrLearnedPositionEmbedding
+class DeformableDetrLearnedPositionEmbedding(nn.Module):
+ """
+ This module learns positional embeddings up to a fixed maximum size.
+ """
+
+ def __init__(self, embedding_dim=256):
+ super().__init__()
+ self.row_embeddings = nn.Embedding(50, embedding_dim)
+ self.column_embeddings = nn.Embedding(50, embedding_dim)
+
+ def forward(self, pixel_values, pixel_mask=None):
+ height, width = pixel_values.shape[-2:]
+ width_values = torch.arange(width, device=pixel_values.device)
+ height_values = torch.arange(height, device=pixel_values.device)
+ x_emb = self.column_embeddings(width_values)
+ y_emb = self.row_embeddings(height_values)
+ pos = torch.cat([x_emb.unsqueeze(0).repeat(height, 1, 1), y_emb.unsqueeze(1).repeat(1, width, 1)], dim=-1)
+ pos = pos.permute(2, 0, 1)
+ pos = pos.unsqueeze(0)
+ pos = pos.repeat(pixel_values.shape[0], 1, 1, 1)
+ return pos
+
+
+# Copied from transformers.models.detr.modeling_detr.build_position_encoding with Detr->DeformableDetr
+def build_position_encoding(config):
+ n_steps = config.d_model // 2
+ if config.position_embedding_type == "sine":
+ # TODO find a better way of exposing other arguments
+ position_embedding = DeformableDetrSinePositionEmbedding(n_steps, normalize=True)
+ elif config.position_embedding_type == "learned":
+ position_embedding = DeformableDetrLearnedPositionEmbedding(n_steps)
+ else:
+ raise ValueError(f"Not supported {config.position_embedding_type}")
+
+ return position_embedding
+
+
+def multi_scale_deformable_attention(
+ value: Tensor, value_spatial_shapes: Tensor, sampling_locations: Tensor, attention_weights: Tensor
+) -> Tensor:
+ batch_size, _, num_heads, hidden_dim = value.shape
+ _, num_queries, num_heads, num_levels, num_points, _ = sampling_locations.shape
+ value_list = value.split([height.item() * width.item() for height, width in value_spatial_shapes], dim=1)
+ sampling_grids = 2 * sampling_locations - 1
+ sampling_value_list = []
+ for level_id, (height, width) in enumerate(value_spatial_shapes):
+ # batch_size, height*width, num_heads, hidden_dim
+ # -> batch_size, height*width, num_heads*hidden_dim
+ # -> batch_size, num_heads*hidden_dim, height*width
+ # -> batch_size*num_heads, hidden_dim, height, width
+ value_l_ = (
+ value_list[level_id].flatten(2).transpose(1, 2).reshape(batch_size * num_heads, hidden_dim, height, width)
+ )
+ # batch_size, num_queries, num_heads, num_points, 2
+ # -> batch_size, num_heads, num_queries, num_points, 2
+ # -> batch_size*num_heads, num_queries, num_points, 2
+ sampling_grid_l_ = sampling_grids[:, :, :, level_id].transpose(1, 2).flatten(0, 1)
+ # batch_size*num_heads, hidden_dim, num_queries, num_points
+ sampling_value_l_ = nn.functional.grid_sample(
+ value_l_, sampling_grid_l_, mode="bilinear", padding_mode="zeros", align_corners=False
+ )
+ sampling_value_list.append(sampling_value_l_)
+ # (batch_size, num_queries, num_heads, num_levels, num_points)
+ # -> (batch_size, num_heads, num_queries, num_levels, num_points)
+ # -> (batch_size, num_heads, 1, num_queries, num_levels*num_points)
+ attention_weights = attention_weights.transpose(1, 2).reshape(
+ batch_size * num_heads, 1, num_queries, num_levels * num_points
+ )
+ output = (
+ (torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights)
+ .sum(-1)
+ .view(batch_size, num_heads * hidden_dim, num_queries)
+ )
+ return output.transpose(1, 2).contiguous()
+
+
+class DeformableDetrMultiscaleDeformableAttention(nn.Module):
+ """
+ Multiscale deformable attention as proposed in Deformable DETR.
+ """
+
+ def __init__(self, config: DeformableDetrConfig, num_heads: int, n_points: int):
+ super().__init__()
+ if config.d_model % num_heads != 0:
+ raise ValueError(
+ f"embed_dim (d_model) must be divisible by num_heads, but got {config.d_model} and {num_heads}"
+ )
+ dim_per_head = config.d_model // num_heads
+ # check if dim_per_head is power of 2
+ if not ((dim_per_head & (dim_per_head - 1) == 0) and dim_per_head != 0):
+ warnings.warn(
+ "You'd better set embed_dim (d_model) in DeformableDetrMultiscaleDeformableAttention to make the"
+ " dimension of each attention head a power of 2 which is more efficient in the authors' CUDA"
+ " implementation."
+ )
+
+ self.im2col_step = 64
+
+ self.d_model = config.d_model
+ self.n_levels = config.num_feature_levels
+ self.n_heads = num_heads
+ self.n_points = n_points
+
+ self.sampling_offsets = nn.Linear(config.d_model, num_heads * self.n_levels * n_points * 2)
+ self.attention_weights = nn.Linear(config.d_model, num_heads * self.n_levels * n_points)
+ self.value_proj = nn.Linear(config.d_model, config.d_model)
+ self.output_proj = nn.Linear(config.d_model, config.d_model)
+
+ self.disable_custom_kernels = config.disable_custom_kernels
+
+ self._reset_parameters()
+
+ def _reset_parameters(self):
+ nn.init.constant_(self.sampling_offsets.weight.data, 0.0)
+ thetas = torch.arange(self.n_heads, dtype=torch.float32) * (2.0 * math.pi / self.n_heads)
+ grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)
+ grid_init = (
+ (grid_init / grid_init.abs().max(-1, keepdim=True)[0])
+ .view(self.n_heads, 1, 1, 2)
+ .repeat(1, self.n_levels, self.n_points, 1)
+ )
+ for i in range(self.n_points):
+ grid_init[:, :, i, :] *= i + 1
+ with torch.no_grad():
+ self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1))
+ nn.init.constant_(self.attention_weights.weight.data, 0.0)
+ nn.init.constant_(self.attention_weights.bias.data, 0.0)
+ nn.init.xavier_uniform_(self.value_proj.weight.data)
+ nn.init.constant_(self.value_proj.bias.data, 0.0)
+ nn.init.xavier_uniform_(self.output_proj.weight.data)
+ nn.init.constant_(self.output_proj.bias.data, 0.0)
+
+ def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]):
+ return tensor if position_embeddings is None else tensor + position_embeddings
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ position_embeddings: Optional[torch.Tensor] = None,
+ reference_points=None,
+ spatial_shapes=None,
+ level_start_index=None,
+ output_attentions: bool = False,
+ ):
+ # add position embeddings to the hidden states before projecting to queries and keys
+ if position_embeddings is not None:
+ hidden_states = self.with_pos_embed(hidden_states, position_embeddings)
+
+ batch_size, num_queries, _ = hidden_states.shape
+ batch_size, sequence_length, _ = encoder_hidden_states.shape
+ if (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() != sequence_length:
+ raise ValueError(
+ "Make sure to align the spatial shapes with the sequence length of the encoder hidden states"
+ )
+
+ value = self.value_proj(encoder_hidden_states)
+ if attention_mask is not None:
+ # we invert the attention_mask
+ value = value.masked_fill(~attention_mask[..., None], float(0))
+ value = value.view(batch_size, sequence_length, self.n_heads, self.d_model // self.n_heads)
+ sampling_offsets = self.sampling_offsets(hidden_states).view(
+ batch_size, num_queries, self.n_heads, self.n_levels, self.n_points, 2
+ )
+ attention_weights = self.attention_weights(hidden_states).view(
+ batch_size, num_queries, self.n_heads, self.n_levels * self.n_points
+ )
+ attention_weights = F.softmax(attention_weights, -1).view(
+ batch_size, num_queries, self.n_heads, self.n_levels, self.n_points
+ )
+ # batch_size, num_queries, n_heads, n_levels, n_points, 2
+ if reference_points.shape[-1] == 2:
+ offset_normalizer = torch.stack([spatial_shapes[..., 1], spatial_shapes[..., 0]], -1)
+ sampling_locations = (
+ reference_points[:, :, None, :, None, :]
+ + sampling_offsets / offset_normalizer[None, None, None, :, None, :]
+ )
+ elif reference_points.shape[-1] == 4:
+ sampling_locations = (
+ reference_points[:, :, None, :, None, :2]
+ + sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5
+ )
+ else:
+ raise ValueError(f"Last dim of reference_points must be 2 or 4, but got {reference_points.shape[-1]}")
+
+ if self.disable_custom_kernels:
+ # PyTorch implementation
+ output = multi_scale_deformable_attention(value, spatial_shapes, sampling_locations, attention_weights)
+ else:
+ try:
+ # custom kernel
+ output = MultiScaleDeformableAttentionFunction.apply(
+ value,
+ spatial_shapes,
+ level_start_index,
+ sampling_locations,
+ attention_weights,
+ self.im2col_step,
+ )
+ except Exception:
+ # PyTorch implementation
+ output = multi_scale_deformable_attention(value, spatial_shapes, sampling_locations, attention_weights)
+ output = self.output_proj(output)
+
+ return output, attention_weights
+
+
+class DeformableDetrMultiheadAttention(nn.Module):
+ """
+ Multi-headed attention from 'Attention Is All You Need' paper.
+
+ Here, we add position embeddings to the queries and keys (as explained in the Deformable DETR paper).
+ """
+
+ def __init__(
+ self,
+ embed_dim: int,
+ num_heads: int,
+ dropout: float = 0.0,
+ bias: bool = True,
+ ):
+ super().__init__()
+ self.embed_dim = embed_dim
+ self.num_heads = num_heads
+ self.dropout = dropout
+ self.head_dim = embed_dim // num_heads
+ if self.head_dim * num_heads != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
+ f" {num_heads})."
+ )
+ self.scaling = self.head_dim**-0.5
+
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+
+ def _shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int):
+ return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]):
+ return tensor if position_embeddings is None else tensor + position_embeddings
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_embeddings: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ """Input shape: Batch x Time x Channel"""
+
+ batch_size, target_len, embed_dim = hidden_states.size()
+ # add position embeddings to the hidden states before projecting to queries and keys
+ if position_embeddings is not None:
+ hidden_states_original = hidden_states
+ hidden_states = self.with_pos_embed(hidden_states, position_embeddings)
+
+ # get queries, keys and values
+ query_states = self.q_proj(hidden_states) * self.scaling
+ key_states = self._shape(self.k_proj(hidden_states), -1, batch_size)
+ value_states = self._shape(self.v_proj(hidden_states_original), -1, batch_size)
+
+ proj_shape = (batch_size * self.num_heads, -1, self.head_dim)
+ query_states = self._shape(query_states, target_len, batch_size).view(*proj_shape)
+ key_states = key_states.view(*proj_shape)
+ value_states = value_states.view(*proj_shape)
+
+ source_len = key_states.size(1)
+
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
+
+ if attn_weights.size() != (batch_size * self.num_heads, target_len, source_len):
+ raise ValueError(
+ f"Attention weights should be of size {(batch_size * self.num_heads, target_len, source_len)}, but is"
+ f" {attn_weights.size()}"
+ )
+
+ # expand attention_mask
+ if attention_mask is not None:
+ # [batch_size, seq_len] -> [batch_size, 1, target_seq_len, source_seq_len]
+ attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype)
+
+ if attention_mask is not None:
+ if attention_mask.size() != (batch_size, 1, target_len, source_len):
+ raise ValueError(
+ f"Attention mask should be of size {(batch_size, 1, target_len, source_len)}, but is"
+ f" {attention_mask.size()}"
+ )
+ attn_weights = attn_weights.view(batch_size, self.num_heads, target_len, source_len) + attention_mask
+ attn_weights = attn_weights.view(batch_size * self.num_heads, target_len, source_len)
+
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
+
+ if output_attentions:
+ # this operation is a bit awkward, but it's required to
+ # make sure that attn_weights keeps its gradient.
+ # In order to do so, attn_weights have to reshaped
+ # twice and have to be reused in the following
+ attn_weights_reshaped = attn_weights.view(batch_size, self.num_heads, target_len, source_len)
+ attn_weights = attn_weights_reshaped.view(batch_size * self.num_heads, target_len, source_len)
+ else:
+ attn_weights_reshaped = None
+
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
+
+ attn_output = torch.bmm(attn_probs, value_states)
+
+ if attn_output.size() != (batch_size * self.num_heads, target_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output` should be of size {(batch_size, self.num_heads, target_len, self.head_dim)}, but is"
+ f" {attn_output.size()}"
+ )
+
+ attn_output = attn_output.view(batch_size, self.num_heads, target_len, self.head_dim)
+ attn_output = attn_output.transpose(1, 2)
+ attn_output = attn_output.reshape(batch_size, target_len, embed_dim)
+
+ attn_output = self.out_proj(attn_output)
+
+ return attn_output, attn_weights_reshaped
+
+
+class DeformableDetrEncoderLayer(nn.Module):
+ def __init__(self, config: DeformableDetrConfig):
+ super().__init__()
+ self.embed_dim = config.d_model
+ self.self_attn = DeformableDetrMultiscaleDeformableAttention(
+ config, num_heads=config.encoder_attention_heads, n_points=config.encoder_n_points
+ )
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.dropout = config.dropout
+ self.activation_fn = ACT2FN[config.activation_function]
+ self.activation_dropout = config.activation_dropout
+ self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
+ self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: torch.Tensor,
+ position_embeddings: torch.Tensor = None,
+ reference_points=None,
+ spatial_shapes=None,
+ level_start_index=None,
+ output_attentions: bool = False,
+ ):
+ """
+ Args:
+ hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Input to the layer.
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
+ Attention mask.
+ position_embeddings (`torch.FloatTensor`, *optional*):
+ Position embeddings, to be added to `hidden_states`.
+ reference_points (`torch.FloatTensor`, *optional*):
+ Reference points.
+ spatial_shapes (`torch.LongTensor`, *optional*):
+ Spatial shapes of the backbone feature maps.
+ level_start_index (`torch.LongTensor`, *optional*):
+ Level start index.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ """
+ residual = hidden_states
+
+ # Apply Multi-scale Deformable Attention Module on the multi-scale feature maps.
+ hidden_states, attn_weights = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ encoder_hidden_states=hidden_states,
+ encoder_attention_mask=attention_mask,
+ position_embeddings=position_embeddings,
+ reference_points=reference_points,
+ spatial_shapes=spatial_shapes,
+ level_start_index=level_start_index,
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+
+ residual = hidden_states
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
+
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+
+ hidden_states = residual + hidden_states
+ hidden_states = self.final_layer_norm(hidden_states)
+
+ if self.training:
+ if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any():
+ clamp_value = torch.finfo(hidden_states.dtype).max - 1000
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs
+
+
+class DeformableDetrDecoderLayer(nn.Module):
+ def __init__(self, config: DeformableDetrConfig):
+ super().__init__()
+ self.embed_dim = config.d_model
+
+ # self-attention
+ self.self_attn = DeformableDetrMultiheadAttention(
+ embed_dim=self.embed_dim,
+ num_heads=config.decoder_attention_heads,
+ dropout=config.attention_dropout,
+ )
+ self.dropout = config.dropout
+ self.activation_fn = ACT2FN[config.activation_function]
+ self.activation_dropout = config.activation_dropout
+
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ # cross-attention
+ self.encoder_attn = DeformableDetrMultiscaleDeformableAttention(
+ config,
+ num_heads=config.decoder_attention_heads,
+ n_points=config.decoder_n_points,
+ )
+ self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ # feedforward neural networks
+ self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
+ self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ position_embeddings: Optional[torch.Tensor] = None,
+ reference_points=None,
+ spatial_shapes=None,
+ level_start_index=None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = False,
+ ):
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`):
+ Input to the layer of shape `(seq_len, batch, embed_dim)`.
+ position_embeddings (`torch.FloatTensor`, *optional*):
+ Position embeddings that are added to the queries and keys in the self-attention layer.
+ reference_points (`torch.FloatTensor`, *optional*):
+ Reference points.
+ spatial_shapes (`torch.LongTensor`, *optional*):
+ Spatial shapes.
+ level_start_index (`torch.LongTensor`, *optional*):
+ Level start index.
+ encoder_hidden_states (`torch.FloatTensor`):
+ cross attention input to the layer of shape `(seq_len, batch, embed_dim)`
+ encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
+ `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative
+ values.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ """
+ residual = hidden_states
+
+ # Self Attention
+ hidden_states, self_attn_weights = self.self_attn(
+ hidden_states=hidden_states,
+ position_embeddings=position_embeddings,
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+
+ second_residual = hidden_states
+
+ # Cross-Attention
+ cross_attn_weights = None
+ hidden_states, cross_attn_weights = self.encoder_attn(
+ hidden_states=hidden_states,
+ attention_mask=encoder_attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ position_embeddings=position_embeddings,
+ reference_points=reference_points,
+ spatial_shapes=spatial_shapes,
+ level_start_index=level_start_index,
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = second_residual + hidden_states
+
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
+
+ # Fully Connected
+ residual = hidden_states
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+ hidden_states = self.final_layer_norm(hidden_states)
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (self_attn_weights, cross_attn_weights)
+
+ return outputs
+
+
+# Copied from transformers.models.detr.modeling_detr.DetrClassificationHead
+class DeformableDetrClassificationHead(nn.Module):
+ """Head for sentence-level classification tasks."""
+
+ def __init__(self, input_dim: int, inner_dim: int, num_classes: int, pooler_dropout: float):
+ super().__init__()
+ self.dense = nn.Linear(input_dim, inner_dim)
+ self.dropout = nn.Dropout(p=pooler_dropout)
+ self.out_proj = nn.Linear(inner_dim, num_classes)
+
+ def forward(self, hidden_states: torch.Tensor):
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.dense(hidden_states)
+ hidden_states = torch.tanh(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.out_proj(hidden_states)
+ return hidden_states
+
+
+class DeformableDetrPreTrainedModel(PreTrainedModel):
+ config_class = DeformableDetrConfig
+ base_model_prefix = "model"
+ main_input_name = "pixel_values"
+ _no_split_modules = [r"DeformableDetrConvEncoder", r"DeformableDetrEncoderLayer", r"DeformableDetrDecoderLayer"]
+
+ def _init_weights(self, module):
+ std = self.config.init_std
+
+ if isinstance(module, DeformableDetrLearnedPositionEmbedding):
+ nn.init.uniform_(module.row_embeddings.weight)
+ nn.init.uniform_(module.column_embeddings.weight)
+ elif isinstance(module, DeformableDetrMultiscaleDeformableAttention):
+ module._reset_parameters()
+ elif isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ if hasattr(module, "reference_points") and not self.config.two_stage:
+ nn.init.xavier_uniform_(module.reference_points.weight.data, gain=1.0)
+ nn.init.constant_(module.reference_points.bias.data, 0.0)
+ if hasattr(module, "level_embed"):
+ nn.init.normal_(module.level_embed)
+
+
+DEFORMABLE_DETR_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`DeformableDetrConfig`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+DEFORMABLE_DETR_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Padding will be ignored by default should you provide it.
+
+ Pixel values can be obtained using [`AutoImageProcessor`]. See [`DeformableDetrImageProcessor.__call__`]
+ for details.
+
+ pixel_mask (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
+ Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`:
+
+ - 1 for pixels that are real (i.e. **not masked**),
+ - 0 for pixels that are padding (i.e. **masked**).
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ decoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, num_queries)`, *optional*):
+ Not used by default. Can be used to mask object queries.
+ encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
+ Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you
+ can choose to directly pass a flattened representation of an image.
+ decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*):
+ Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an
+ embedded representation.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+class DeformableDetrEncoder(DeformableDetrPreTrainedModel):
+ """
+ Transformer encoder consisting of *config.encoder_layers* deformable attention layers. Each layer is a
+ [`DeformableDetrEncoderLayer`].
+
+ The encoder updates the flattened multi-scale feature maps through multiple deformable attention layers.
+
+ Args:
+ config: DeformableDetrConfig
+ """
+
+ def __init__(self, config: DeformableDetrConfig):
+ super().__init__(config)
+
+ self.dropout = config.dropout
+ self.layers = nn.ModuleList([DeformableDetrEncoderLayer(config) for _ in range(config.encoder_layers)])
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @staticmethod
+ def get_reference_points(spatial_shapes, valid_ratios, device):
+ """
+ Get reference points for each feature map. Used in decoder.
+
+ Args:
+ spatial_shapes (`torch.LongTensor` of shape `(num_feature_levels, 2)`):
+ Spatial shapes of each feature map.
+ valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`):
+ Valid ratios of each feature map.
+ device (`torch.device`):
+ Device on which to create the tensors.
+ Returns:
+ `torch.FloatTensor` of shape `(batch_size, num_queries, num_feature_levels, 2)`
+ """
+ reference_points_list = []
+ for level, (height, width) in enumerate(spatial_shapes):
+ ref_y, ref_x = meshgrid(
+ torch.linspace(0.5, height - 0.5, height, dtype=torch.float32, device=device),
+ torch.linspace(0.5, width - 0.5, width, dtype=torch.float32, device=device),
+ indexing="ij",
+ )
+ # TODO: valid_ratios could be useless here. check https://github.com/fundamentalvision/Deformable-DETR/issues/36
+ ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, level, 1] * height)
+ ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, level, 0] * width)
+ ref = torch.stack((ref_x, ref_y), -1)
+ reference_points_list.append(ref)
+ reference_points = torch.cat(reference_points_list, 1)
+ reference_points = reference_points[:, :, None] * valid_ratios[:, None]
+ return reference_points
+
+ def forward(
+ self,
+ inputs_embeds=None,
+ attention_mask=None,
+ position_embeddings=None,
+ spatial_shapes=None,
+ level_start_index=None,
+ valid_ratios=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ ):
+ r"""
+ Args:
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Flattened feature map (output of the backbone + projection layer) that is passed to the encoder.
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding pixel features. Mask values selected in `[0, 1]`:
+ - 1 for pixel features that are real (i.e. **not masked**),
+ - 0 for pixel features that are padding (i.e. **masked**).
+ [What are attention masks?](../glossary#attention-mask)
+ position_embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Position embeddings that are added to the queries and keys in each self-attention layer.
+ spatial_shapes (`torch.LongTensor` of shape `(num_feature_levels, 2)`):
+ Spatial shapes of each feature map.
+ level_start_index (`torch.LongTensor` of shape `(num_feature_levels)`):
+ Starting index of each feature map.
+ valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`):
+ Ratio of valid area in each feature level.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ hidden_states = inputs_embeds
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+
+ reference_points = self.get_reference_points(spatial_shapes, valid_ratios, device=inputs_embeds.device)
+
+ encoder_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+ for i, encoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+ layer_outputs = encoder_layer(
+ hidden_states,
+ attention_mask,
+ position_embeddings=position_embeddings,
+ reference_points=reference_points,
+ spatial_shapes=spatial_shapes,
+ level_start_index=level_start_index,
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
+ )
+
+
+class DeformableDetrDecoder(DeformableDetrPreTrainedModel):
+ """
+ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`DeformableDetrDecoderLayer`].
+
+ The decoder updates the query embeddings through multiple self-attention and cross-attention layers.
+
+ Some tweaks for Deformable DETR:
+
+ - `position_embeddings`, `reference_points`, `spatial_shapes` and `valid_ratios` are added to the forward pass.
+ - it also returns a stack of intermediate outputs and reference points from all decoding layers.
+
+ Args:
+ config: DeformableDetrConfig
+ """
+
+ def __init__(self, config: DeformableDetrConfig):
+ super().__init__(config)
+
+ self.dropout = config.dropout
+ self.layers = nn.ModuleList([DeformableDetrDecoderLayer(config) for _ in range(config.decoder_layers)])
+ self.gradient_checkpointing = False
+
+ # hack implementation for iterative bounding box refinement and two-stage Deformable DETR
+ self.bbox_embed = None
+ self.class_embed = None
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def forward(
+ self,
+ inputs_embeds=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ position_embeddings=None,
+ reference_points=None,
+ spatial_shapes=None,
+ level_start_index=None,
+ valid_ratios=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ ):
+ r"""
+ Args:
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`):
+ The query embeddings that are passed into the decoder.
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
+ of the decoder.
+ encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing cross-attention on padding pixel_values of the encoder. Mask values selected
+ in `[0, 1]`:
+ - 1 for pixels that are real (i.e. **not masked**),
+ - 0 for pixels that are padding (i.e. **masked**).
+ position_embeddings (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*):
+ Position embeddings that are added to the queries and keys in each self-attention layer.
+ reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)` is `as_two_stage` else `(batch_size, num_queries, 2)` or , *optional*):
+ Reference point in range `[0, 1]`, top-left (0,0), bottom-right (1, 1), including padding area.
+ spatial_shapes (`torch.FloatTensor` of shape `(num_feature_levels, 2)`):
+ Spatial shapes of the feature maps.
+ level_start_index (`torch.LongTensor` of shape `(num_feature_levels)`, *optional*):
+ Indexes for the start of each feature level. In range `[0, sequence_length]`.
+ valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`, *optional*):
+ Ratio of valid area in each feature level.
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if inputs_embeds is not None:
+ hidden_states = inputs_embeds
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
+ intermediate = ()
+ intermediate_reference_points = ()
+
+ for idx, decoder_layer in enumerate(self.layers):
+ if reference_points.shape[-1] == 4:
+ reference_points_input = (
+ reference_points[:, :, None] * torch.cat([valid_ratios, valid_ratios], -1)[:, None]
+ )
+ else:
+ if reference_points.shape[-1] != 2:
+ raise ValueError("Reference points' last dimension must be of size 2")
+ reference_points_input = reference_points[:, :, None] * valid_ratios[:, None]
+
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ decoder_layer.__call__,
+ hidden_states,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ None,
+ )
+ else:
+ layer_outputs = decoder_layer(
+ hidden_states,
+ position_embeddings=position_embeddings,
+ encoder_hidden_states=encoder_hidden_states,
+ reference_points=reference_points_input,
+ spatial_shapes=spatial_shapes,
+ level_start_index=level_start_index,
+ encoder_attention_mask=encoder_attention_mask,
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ # hack implementation for iterative bounding box refinement
+ if self.bbox_embed is not None:
+ tmp = self.bbox_embed[idx](hidden_states)
+ if reference_points.shape[-1] == 4:
+ new_reference_points = tmp + inverse_sigmoid(reference_points)
+ new_reference_points = new_reference_points.sigmoid()
+ else:
+ if reference_points.shape[-1] != 2:
+ raise ValueError(
+ f"Reference points' last dimension must be of size 2, but is {reference_points.shape[-1]}"
+ )
+ new_reference_points = tmp
+ new_reference_points[..., :2] = tmp[..., :2] + inverse_sigmoid(reference_points)
+ new_reference_points = new_reference_points.sigmoid()
+ reference_points = new_reference_points.detach()
+
+ intermediate += (hidden_states,)
+ intermediate_reference_points += (reference_points,)
+
+ if output_attentions:
+ all_self_attns += (layer_outputs[1],)
+
+ if encoder_hidden_states is not None:
+ all_cross_attentions += (layer_outputs[2],)
+
+ # Keep batch_size as first dimension
+ intermediate = torch.stack(intermediate, dim=1)
+ intermediate_reference_points = torch.stack(intermediate_reference_points, dim=1)
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [
+ hidden_states,
+ intermediate,
+ intermediate_reference_points,
+ all_hidden_states,
+ all_self_attns,
+ all_cross_attentions,
+ ]
+ if v is not None
+ )
+ return DeformableDetrDecoderOutput(
+ last_hidden_state=hidden_states,
+ intermediate_hidden_states=intermediate,
+ intermediate_reference_points=intermediate_reference_points,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ The bare Deformable DETR Model (consisting of a backbone and encoder-decoder Transformer) outputting raw
+ hidden-states without any specific head on top.
+ """,
+ DEFORMABLE_DETR_START_DOCSTRING,
+)
+class DeformableDetrModel(DeformableDetrPreTrainedModel):
+ def __init__(self, config: DeformableDetrConfig):
+ super().__init__(config)
+
+ # Create backbone + positional encoding
+ backbone = DeformableDetrConvEncoder(config)
+ position_embeddings = build_position_encoding(config)
+ self.backbone = DeformableDetrConvModel(backbone, position_embeddings)
+
+ # Create input projection layers
+ if config.num_feature_levels > 1:
+ num_backbone_outs = len(backbone.intermediate_channel_sizes)
+ input_proj_list = []
+ for _ in range(num_backbone_outs):
+ in_channels = backbone.intermediate_channel_sizes[_]
+ input_proj_list.append(
+ nn.Sequential(
+ nn.Conv2d(in_channels, config.d_model, kernel_size=1),
+ nn.GroupNorm(32, config.d_model),
+ )
+ )
+ for _ in range(config.num_feature_levels - num_backbone_outs):
+ input_proj_list.append(
+ nn.Sequential(
+ nn.Conv2d(in_channels, config.d_model, kernel_size=3, stride=2, padding=1),
+ nn.GroupNorm(32, config.d_model),
+ )
+ )
+ in_channels = config.d_model
+ self.input_proj = nn.ModuleList(input_proj_list)
+ else:
+ self.input_proj = nn.ModuleList(
+ [
+ nn.Sequential(
+ nn.Conv2d(backbone.intermediate_channel_sizes[-1], config.d_model, kernel_size=1),
+ nn.GroupNorm(32, config.d_model),
+ )
+ ]
+ )
+
+ if not config.two_stage:
+ self.query_position_embeddings = nn.Embedding(config.num_queries, config.d_model * 2)
+
+ self.encoder = DeformableDetrEncoder(config)
+ self.decoder = DeformableDetrDecoder(config)
+
+ self.level_embed = nn.Parameter(torch.Tensor(config.num_feature_levels, config.d_model))
+
+ if config.two_stage:
+ self.enc_output = nn.Linear(config.d_model, config.d_model)
+ self.enc_output_norm = nn.LayerNorm(config.d_model)
+ self.pos_trans = nn.Linear(config.d_model * 2, config.d_model * 2)
+ self.pos_trans_norm = nn.LayerNorm(config.d_model * 2)
+ else:
+ self.reference_points = nn.Linear(config.d_model, 2)
+
+ self.post_init()
+
+ def get_encoder(self):
+ return self.encoder
+
+ def get_decoder(self):
+ return self.decoder
+
+ def freeze_backbone(self):
+ for name, param in self.backbone.conv_encoder.model.named_parameters():
+ param.requires_grad_(False)
+
+ def unfreeze_backbone(self):
+ for name, param in self.backbone.conv_encoder.model.named_parameters():
+ param.requires_grad_(True)
+
+ def get_valid_ratio(self, mask):
+ """Get the valid ratio of all feature maps."""
+
+ _, height, width = mask.shape
+ valid_height = torch.sum(mask[:, :, 0], 1)
+ valid_width = torch.sum(mask[:, 0, :], 1)
+ valid_ratio_heigth = valid_height.float() / height
+ valid_ratio_width = valid_width.float() / width
+ valid_ratio = torch.stack([valid_ratio_width, valid_ratio_heigth], -1)
+ return valid_ratio
+
+ def get_proposal_pos_embed(self, proposals):
+ """Get the position embedding of the proposals."""
+
+ num_pos_feats = self.config.d_model // 2
+ temperature = 10000
+ scale = 2 * math.pi
+
+ dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=proposals.device)
+ dim_t = temperature ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / num_pos_feats)
+ # batch_size, num_queries, 4
+ proposals = proposals.sigmoid() * scale
+ # batch_size, num_queries, 4, 128
+ pos = proposals[:, :, :, None] / dim_t
+ # batch_size, num_queries, 4, 64, 2 -> batch_size, num_queries, 512
+ pos = torch.stack((pos[:, :, :, 0::2].sin(), pos[:, :, :, 1::2].cos()), dim=4).flatten(2)
+ return pos
+
+ def gen_encoder_output_proposals(self, enc_output, padding_mask, spatial_shapes):
+ """Generate the encoder output proposals from encoded enc_output.
+
+ Args:
+ enc_output (Tensor[batch_size, sequence_length, hidden_size]): Output of the encoder.
+ padding_mask (Tensor[batch_size, sequence_length]): Padding mask for `enc_output`.
+ spatial_shapes (Tensor[num_feature_levels, 2]): Spatial shapes of the feature maps.
+
+ Returns:
+ `tuple(torch.FloatTensor)`: A tuple of feature map and bbox prediction.
+ - object_query (Tensor[batch_size, sequence_length, hidden_size]): Object query features. Later used to
+ directly predict a bounding box. (without the need of a decoder)
+ - output_proposals (Tensor[batch_size, sequence_length, 4]): Normalized proposals, after an inverse
+ sigmoid.
+ """
+ batch_size = enc_output.shape[0]
+ proposals = []
+ _cur = 0
+ for level, (height, width) in enumerate(spatial_shapes):
+ mask_flatten_ = padding_mask[:, _cur : (_cur + height * width)].view(batch_size, height, width, 1)
+ valid_height = torch.sum(~mask_flatten_[:, :, 0, 0], 1)
+ valid_width = torch.sum(~mask_flatten_[:, 0, :, 0], 1)
+
+ grid_y, grid_x = meshgrid(
+ torch.linspace(0, height - 1, height, dtype=torch.float32, device=enc_output.device),
+ torch.linspace(0, width - 1, width, dtype=torch.float32, device=enc_output.device),
+ indexing="ij",
+ )
+ grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1)
+
+ scale = torch.cat([valid_width.unsqueeze(-1), valid_height.unsqueeze(-1)], 1).view(batch_size, 1, 1, 2)
+ grid = (grid.unsqueeze(0).expand(batch_size, -1, -1, -1) + 0.5) / scale
+ width_heigth = torch.ones_like(grid) * 0.05 * (2.0**level)
+ proposal = torch.cat((grid, width_heigth), -1).view(batch_size, -1, 4)
+ proposals.append(proposal)
+ _cur += height * width
+ output_proposals = torch.cat(proposals, 1)
+ output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(-1, keepdim=True)
+ output_proposals = torch.log(output_proposals / (1 - output_proposals)) # inverse sigmoid
+ output_proposals = output_proposals.masked_fill(padding_mask.unsqueeze(-1), float("inf"))
+ output_proposals = output_proposals.masked_fill(~output_proposals_valid, float("inf"))
+
+ # assign each pixel as an object query
+ object_query = enc_output
+ object_query = object_query.masked_fill(padding_mask.unsqueeze(-1), float(0))
+ object_query = object_query.masked_fill(~output_proposals_valid, float(0))
+ object_query = self.enc_output_norm(self.enc_output(object_query))
+ return object_query, output_proposals
+
+ @add_start_docstrings_to_model_forward(DEFORMABLE_DETR_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=DeformableDetrModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ pixel_values: torch.FloatTensor,
+ pixel_mask: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.FloatTensor] = None,
+ encoder_outputs: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.FloatTensor], DeformableDetrModelOutput]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoImageProcessor, DeformableDetrModel
+ >>> from PIL import Image
+ >>> import requests
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> image_processor = AutoImageProcessor.from_pretrained("SenseTime/deformable-detr")
+ >>> model = DeformableDetrModel.from_pretrained("SenseTime/deformable-detr")
+
+ >>> inputs = image_processor(images=image, return_tensors="pt")
+
+ >>> outputs = model(**inputs)
+
+ >>> last_hidden_states = outputs.last_hidden_state
+ >>> list(last_hidden_states.shape)
+ [1, 300, 256]
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ batch_size, num_channels, height, width = pixel_values.shape
+ device = pixel_values.device
+
+ if pixel_mask is None:
+ pixel_mask = torch.ones(((batch_size, height, width)), dtype=torch.long, device=device)
+
+ # Extract multi-scale feature maps of same resolution `config.d_model` (cf Figure 4 in paper)
+ # First, sent pixel_values + pixel_mask through Backbone to obtain the features
+ # which is a list of tuples
+ features, position_embeddings_list = self.backbone(pixel_values, pixel_mask)
+
+ # Then, apply 1x1 convolution to reduce the channel dimension to d_model (256 by default)
+ sources = []
+ masks = []
+ for level, (source, mask) in enumerate(features):
+ sources.append(self.input_proj[level](source))
+ masks.append(mask)
+ if mask is None:
+ raise ValueError("No attention mask was provided")
+
+ # Lowest resolution feature maps are obtained via 3x3 stride 2 convolutions on the final stage
+ if self.config.num_feature_levels > len(sources):
+ _len_sources = len(sources)
+ for level in range(_len_sources, self.config.num_feature_levels):
+ if level == _len_sources:
+ source = self.input_proj[level](features[-1][0])
+ else:
+ source = self.input_proj[level](sources[-1])
+ mask = nn.functional.interpolate(pixel_mask[None].float(), size=source.shape[-2:]).to(torch.bool)[0]
+ pos_l = self.backbone.position_embedding(source, mask).to(source.dtype)
+ sources.append(source)
+ masks.append(mask)
+ position_embeddings_list.append(pos_l)
+
+ # Create queries
+ query_embeds = None
+ if not self.config.two_stage:
+ query_embeds = self.query_position_embeddings.weight
+
+ # Prepare encoder inputs (by flattening)
+ source_flatten = []
+ mask_flatten = []
+ lvl_pos_embed_flatten = []
+ spatial_shapes = []
+ for level, (source, mask, pos_embed) in enumerate(zip(sources, masks, position_embeddings_list)):
+ batch_size, num_channels, height, width = source.shape
+ spatial_shape = (height, width)
+ spatial_shapes.append(spatial_shape)
+ source = source.flatten(2).transpose(1, 2)
+ mask = mask.flatten(1)
+ pos_embed = pos_embed.flatten(2).transpose(1, 2)
+ lvl_pos_embed = pos_embed + self.level_embed[level].view(1, 1, -1)
+ lvl_pos_embed_flatten.append(lvl_pos_embed)
+ source_flatten.append(source)
+ mask_flatten.append(mask)
+ source_flatten = torch.cat(source_flatten, 1)
+ mask_flatten = torch.cat(mask_flatten, 1)
+ lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1)
+ spatial_shapes = torch.as_tensor(spatial_shapes, dtype=torch.long, device=source_flatten.device)
+ level_start_index = torch.cat((spatial_shapes.new_zeros((1,)), spatial_shapes.prod(1).cumsum(0)[:-1]))
+ valid_ratios = torch.stack([self.get_valid_ratio(m) for m in masks], 1)
+ valid_ratios = valid_ratios.float()
+
+ # Fourth, sent source_flatten + mask_flatten + lvl_pos_embed_flatten (backbone + proj layer output) through encoder
+ # Also provide spatial_shapes, level_start_index and valid_ratios
+ if encoder_outputs is None:
+ encoder_outputs = self.encoder(
+ inputs_embeds=source_flatten,
+ attention_mask=mask_flatten,
+ position_embeddings=lvl_pos_embed_flatten,
+ spatial_shapes=spatial_shapes,
+ level_start_index=level_start_index,
+ valid_ratios=valid_ratios,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
+ encoder_outputs = BaseModelOutput(
+ last_hidden_state=encoder_outputs[0],
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
+ )
+
+ # Fifth, prepare decoder inputs
+ batch_size, _, num_channels = encoder_outputs[0].shape
+ enc_outputs_class = None
+ enc_outputs_coord_logits = None
+ if self.config.two_stage:
+ object_query_embedding, output_proposals = self.gen_encoder_output_proposals(
+ encoder_outputs[0], ~mask_flatten, spatial_shapes
+ )
+
+ # hack implementation for two-stage Deformable DETR
+ # apply a detection head to each pixel (A.4 in paper)
+ # linear projection for bounding box binary classification (i.e. foreground and background)
+ enc_outputs_class = self.decoder.class_embed[-1](object_query_embedding)
+ # 3-layer FFN to predict bounding boxes coordinates (bbox regression branch)
+ delta_bbox = self.decoder.bbox_embed[-1](object_query_embedding)
+ enc_outputs_coord_logits = delta_bbox + output_proposals
+
+ # only keep top scoring `config.two_stage_num_proposals` proposals
+ topk = self.config.two_stage_num_proposals
+ topk_proposals = torch.topk(enc_outputs_class[..., 0], topk, dim=1)[1]
+ topk_coords_logits = torch.gather(
+ enc_outputs_coord_logits, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, 4)
+ )
+
+ topk_coords_logits = topk_coords_logits.detach()
+ reference_points = topk_coords_logits.sigmoid()
+ init_reference_points = reference_points
+ pos_trans_out = self.pos_trans_norm(self.pos_trans(self.get_proposal_pos_embed(topk_coords_logits)))
+ query_embed, target = torch.split(pos_trans_out, num_channels, dim=2)
+ else:
+ query_embed, target = torch.split(query_embeds, num_channels, dim=1)
+ query_embed = query_embed.unsqueeze(0).expand(batch_size, -1, -1)
+ target = target.unsqueeze(0).expand(batch_size, -1, -1)
+ reference_points = self.reference_points(query_embed).sigmoid()
+ init_reference_points = reference_points
+
+ decoder_outputs = self.decoder(
+ inputs_embeds=target,
+ position_embeddings=query_embed,
+ encoder_hidden_states=encoder_outputs[0],
+ encoder_attention_mask=mask_flatten,
+ reference_points=reference_points,
+ spatial_shapes=spatial_shapes,
+ level_start_index=level_start_index,
+ valid_ratios=valid_ratios,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ if not return_dict:
+ enc_outputs = tuple(value for value in [enc_outputs_class, enc_outputs_coord_logits] if value is not None)
+ tuple_outputs = (init_reference_points,) + decoder_outputs + encoder_outputs + enc_outputs
+
+ return tuple_outputs
+
+ return DeformableDetrModelOutput(
+ init_reference_points=init_reference_points,
+ last_hidden_state=decoder_outputs.last_hidden_state,
+ intermediate_hidden_states=decoder_outputs.intermediate_hidden_states,
+ intermediate_reference_points=decoder_outputs.intermediate_reference_points,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ enc_outputs_class=enc_outputs_class,
+ enc_outputs_coord_logits=enc_outputs_coord_logits,
+ )
+
+
+@add_start_docstrings(
+ """
+ Deformable DETR Model (consisting of a backbone and encoder-decoder Transformer) with object detection heads on
+ top, for tasks such as COCO detection.
+ """,
+ DEFORMABLE_DETR_START_DOCSTRING,
+)
+class DeformableDetrForObjectDetection(DeformableDetrPreTrainedModel):
+ # When using clones, all layers > 0 will be clones, but layer 0 *is* required
+ _tied_weights_keys = [r"bbox_embed\.[1-9]\d*", r"class_embed\.[1-9]\d*"]
+ # We can't initialize the model on meta device as some weights are modified during the initialization
+ _no_split_modules = None
+
+ def __init__(self, config: DeformableDetrConfig):
+ super().__init__(config)
+
+ # Deformable DETR encoder-decoder model
+ self.model = DeformableDetrModel(config)
+
+ # Detection heads on top
+ self.class_embed = nn.Linear(config.d_model, config.num_labels)
+ self.bbox_embed = DeformableDetrMLPPredictionHead(
+ input_dim=config.d_model, hidden_dim=config.d_model, output_dim=4, num_layers=3
+ )
+
+ prior_prob = 0.01
+ bias_value = -math.log((1 - prior_prob) / prior_prob)
+ self.class_embed.bias.data = torch.ones(config.num_labels) * bias_value
+ nn.init.constant_(self.bbox_embed.layers[-1].weight.data, 0)
+ nn.init.constant_(self.bbox_embed.layers[-1].bias.data, 0)
+
+ # if two-stage, the last class_embed and bbox_embed is for region proposal generation
+ num_pred = (config.decoder_layers + 1) if config.two_stage else config.decoder_layers
+ if config.with_box_refine:
+ self.class_embed = _get_clones(self.class_embed, num_pred)
+ self.bbox_embed = _get_clones(self.bbox_embed, num_pred)
+ nn.init.constant_(self.bbox_embed[0].layers[-1].bias.data[2:], -2.0)
+ # hack implementation for iterative bounding box refinement
+ self.model.decoder.bbox_embed = self.bbox_embed
+ else:
+ nn.init.constant_(self.bbox_embed.layers[-1].bias.data[2:], -2.0)
+ self.class_embed = nn.ModuleList([self.class_embed for _ in range(num_pred)])
+ self.bbox_embed = nn.ModuleList([self.bbox_embed for _ in range(num_pred)])
+ self.model.decoder.bbox_embed = None
+ if config.two_stage:
+ # hack implementation for two-stage
+ self.model.decoder.class_embed = self.class_embed
+ for box_embed in self.bbox_embed:
+ nn.init.constant_(box_embed.layers[-1].bias.data[2:], 0.0)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ # taken from https://github.com/facebookresearch/detr/blob/master/models/detr.py
+ @torch.jit.unused
+ def _set_aux_loss(self, outputs_class, outputs_coord):
+ # this is a workaround to make torchscript happy, as torchscript
+ # doesn't support dictionary with non-homogeneous values, such
+ # as a dict having both a Tensor and a list.
+ return [{"logits": a, "pred_boxes": b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1])]
+
+ @add_start_docstrings_to_model_forward(DEFORMABLE_DETR_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=DeformableDetrObjectDetectionOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ pixel_values: torch.FloatTensor,
+ pixel_mask: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.FloatTensor] = None,
+ encoder_outputs: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[List[dict]] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.FloatTensor], DeformableDetrObjectDetectionOutput]:
+ r"""
+ labels (`List[Dict]` of len `(batch_size,)`, *optional*):
+ Labels for computing the bipartite matching loss. List of dicts, each dictionary containing at least the
+ following 2 keys: 'class_labels' and 'boxes' (the class labels and bounding boxes of an image in the batch
+ respectively). The class labels themselves should be a `torch.LongTensor` of len `(number of bounding boxes
+ in the image,)` and the boxes a `torch.FloatTensor` of shape `(number of bounding boxes in the image, 4)`.
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoImageProcessor, DeformableDetrForObjectDetection
+ >>> from PIL import Image
+ >>> import requests
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> image_processor = AutoImageProcessor.from_pretrained("SenseTime/deformable-detr")
+ >>> model = DeformableDetrForObjectDetection.from_pretrained("SenseTime/deformable-detr")
+
+ >>> inputs = image_processor(images=image, return_tensors="pt")
+ >>> outputs = model(**inputs)
+
+ >>> # convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax)
+ >>> target_sizes = torch.tensor([image.size[::-1]])
+ >>> results = image_processor.post_process_object_detection(outputs, threshold=0.5, target_sizes=target_sizes)[
+ ... 0
+ ... ]
+ >>> for score, label, box in zip(results["scores"], results["labels"], results["boxes"]):
+ ... box = [round(i, 2) for i in box.tolist()]
+ ... print(
+ ... f"Detected {model.config.id2label[label.item()]} with confidence "
+ ... f"{round(score.item(), 3)} at location {box}"
+ ... )
+ Detected cat with confidence 0.8 at location [16.5, 52.84, 318.25, 470.78]
+ Detected cat with confidence 0.789 at location [342.19, 24.3, 640.02, 372.25]
+ Detected remote with confidence 0.633 at location [40.79, 72.78, 176.76, 117.25]
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # First, sent images through DETR base model to obtain encoder + decoder outputs
+ outputs = self.model(
+ pixel_values,
+ pixel_mask=pixel_mask,
+ decoder_attention_mask=decoder_attention_mask,
+ encoder_outputs=encoder_outputs,
+ inputs_embeds=inputs_embeds,
+ decoder_inputs_embeds=decoder_inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = outputs.intermediate_hidden_states if return_dict else outputs[2]
+ init_reference = outputs.init_reference_points if return_dict else outputs[0]
+ inter_references = outputs.intermediate_reference_points if return_dict else outputs[3]
+
+ # class logits + predicted bounding boxes
+ outputs_classes = []
+ outputs_coords = []
+
+ for level in range(hidden_states.shape[1]):
+ if level == 0:
+ reference = init_reference
+ else:
+ reference = inter_references[:, level - 1]
+ reference = inverse_sigmoid(reference)
+ outputs_class = self.class_embed[level](hidden_states[:, level])
+ delta_bbox = self.bbox_embed[level](hidden_states[:, level])
+ if reference.shape[-1] == 4:
+ outputs_coord_logits = delta_bbox + reference
+ elif reference.shape[-1] == 2:
+ delta_bbox[..., :2] += reference
+ outputs_coord_logits = delta_bbox
+ else:
+ raise ValueError(f"reference.shape[-1] should be 4 or 2, but got {reference.shape[-1]}")
+ outputs_coord = outputs_coord_logits.sigmoid()
+ outputs_classes.append(outputs_class)
+ outputs_coords.append(outputs_coord)
+ outputs_class = torch.stack(outputs_classes)
+ outputs_coord = torch.stack(outputs_coords)
+
+ logits = outputs_class[-1]
+ pred_boxes = outputs_coord[-1]
+
+ loss, loss_dict, auxiliary_outputs = None, None, None
+ if labels is not None:
+ # First: create the matcher
+ matcher = DeformableDetrHungarianMatcher(
+ class_cost=self.config.class_cost, bbox_cost=self.config.bbox_cost, giou_cost=self.config.giou_cost
+ )
+ # Second: create the criterion
+ losses = ["labels", "boxes", "cardinality"]
+ criterion = DeformableDetrLoss(
+ matcher=matcher,
+ num_classes=self.config.num_labels,
+ focal_alpha=self.config.focal_alpha,
+ losses=losses,
+ )
+ criterion.to(self.device)
+ # Third: compute the losses, based on outputs and labels
+ outputs_loss = {}
+ outputs_loss["logits"] = logits
+ outputs_loss["pred_boxes"] = pred_boxes
+ if self.config.auxiliary_loss:
+ auxiliary_outputs = self._set_aux_loss(outputs_class, outputs_coord)
+ outputs_loss["auxiliary_outputs"] = auxiliary_outputs
+ if self.config.two_stage:
+ enc_outputs_coord = outputs.enc_outputs_coord_logits.sigmoid()
+ outputs_loss["enc_outputs"] = {"logits": outputs.enc_outputs_class, "pred_boxes": enc_outputs_coord}
+
+ loss_dict = criterion(outputs_loss, labels)
+ # Fourth: compute total loss, as a weighted sum of the various losses
+ weight_dict = {"loss_ce": 1, "loss_bbox": self.config.bbox_loss_coefficient}
+ weight_dict["loss_giou"] = self.config.giou_loss_coefficient
+ if self.config.auxiliary_loss:
+ aux_weight_dict = {}
+ for i in range(self.config.decoder_layers - 1):
+ aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()})
+ weight_dict.update(aux_weight_dict)
+ loss = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict)
+
+ if not return_dict:
+ if auxiliary_outputs is not None:
+ output = (logits, pred_boxes) + auxiliary_outputs + outputs
+ else:
+ output = (logits, pred_boxes) + outputs
+ tuple_outputs = ((loss, loss_dict) + output) if loss is not None else output
+
+ return tuple_outputs
+
+ dict_outputs = DeformableDetrObjectDetectionOutput(
+ loss=loss,
+ loss_dict=loss_dict,
+ logits=logits,
+ pred_boxes=pred_boxes,
+ auxiliary_outputs=auxiliary_outputs,
+ last_hidden_state=outputs.last_hidden_state,
+ decoder_hidden_states=outputs.decoder_hidden_states,
+ decoder_attentions=outputs.decoder_attentions,
+ cross_attentions=outputs.cross_attentions,
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
+ encoder_hidden_states=outputs.encoder_hidden_states,
+ encoder_attentions=outputs.encoder_attentions,
+ intermediate_hidden_states=outputs.intermediate_hidden_states,
+ intermediate_reference_points=outputs.intermediate_reference_points,
+ init_reference_points=outputs.init_reference_points,
+ enc_outputs_class=outputs.enc_outputs_class,
+ enc_outputs_coord_logits=outputs.enc_outputs_coord_logits,
+ )
+
+ return dict_outputs
+
+
+# Copied from transformers.models.detr.modeling_detr.dice_loss
+def dice_loss(inputs, targets, num_boxes):
+ """
+ Compute the DICE loss, similar to generalized IOU for masks
+
+ Args:
+ inputs: A float tensor of arbitrary shape.
+ The predictions for each example.
+ targets: A float tensor with the same shape as inputs. Stores the binary
+ classification label for each element in inputs (0 for the negative class and 1 for the positive
+ class).
+ """
+ inputs = inputs.sigmoid()
+ inputs = inputs.flatten(1)
+ numerator = 2 * (inputs * targets).sum(1)
+ denominator = inputs.sum(-1) + targets.sum(-1)
+ loss = 1 - (numerator + 1) / (denominator + 1)
+ return loss.sum() / num_boxes
+
+
+# Copied from transformers.models.detr.modeling_detr.sigmoid_focal_loss
+def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2):
+ """
+ Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
+
+ Args:
+ inputs (`torch.FloatTensor` of arbitrary shape):
+ The predictions for each example.
+ targets (`torch.FloatTensor` with the same shape as `inputs`)
+ A tensor storing the binary classification label for each element in the `inputs` (0 for the negative class
+ and 1 for the positive class).
+ alpha (`float`, *optional*, defaults to `0.25`):
+ Optional weighting factor in the range (0,1) to balance positive vs. negative examples.
+ gamma (`int`, *optional*, defaults to `2`):
+ Exponent of the modulating factor (1 - p_t) to balance easy vs hard examples.
+
+ Returns:
+ Loss tensor
+ """
+ prob = inputs.sigmoid()
+ ce_loss = nn.functional.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
+ # add modulating factor
+ p_t = prob * targets + (1 - prob) * (1 - targets)
+ loss = ce_loss * ((1 - p_t) ** gamma)
+
+ if alpha >= 0:
+ alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
+ loss = alpha_t * loss
+
+ return loss.mean(1).sum() / num_boxes
+
+
+class DeformableDetrLoss(nn.Module):
+ """
+ This class computes the losses for `DeformableDetrForObjectDetection`. The process happens in two steps: 1) we
+ compute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of
+ matched ground-truth / prediction (supervise class and box).
+
+ Args:
+ matcher (`DeformableDetrHungarianMatcher`):
+ Module able to compute a matching between targets and proposals.
+ num_classes (`int`):
+ Number of object categories, omitting the special no-object category.
+ focal_alpha (`float`):
+ Alpha parameter in focal loss.
+ losses (`List[str]`):
+ List of all the losses to be applied. See `get_loss` for a list of all available losses.
+ """
+
+ def __init__(self, matcher, num_classes, focal_alpha, losses):
+ super().__init__()
+ self.matcher = matcher
+ self.num_classes = num_classes
+ self.focal_alpha = focal_alpha
+ self.losses = losses
+
+ # removed logging parameter, which was part of the original implementation
+ def loss_labels(self, outputs, targets, indices, num_boxes):
+ """
+ Classification loss (Binary focal loss) targets dicts must contain the key "class_labels" containing a tensor
+ of dim [nb_target_boxes]
+ """
+ if "logits" not in outputs:
+ raise KeyError("No logits were found in the outputs")
+ source_logits = outputs["logits"]
+
+ idx = self._get_source_permutation_idx(indices)
+ target_classes_o = torch.cat([t["class_labels"][J] for t, (_, J) in zip(targets, indices)])
+ target_classes = torch.full(
+ source_logits.shape[:2], self.num_classes, dtype=torch.int64, device=source_logits.device
+ )
+ target_classes[idx] = target_classes_o
+
+ target_classes_onehot = torch.zeros(
+ [source_logits.shape[0], source_logits.shape[1], source_logits.shape[2] + 1],
+ dtype=source_logits.dtype,
+ layout=source_logits.layout,
+ device=source_logits.device,
+ )
+ target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1)
+
+ target_classes_onehot = target_classes_onehot[:, :, :-1]
+ loss_ce = (
+ sigmoid_focal_loss(source_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2)
+ * source_logits.shape[1]
+ )
+ losses = {"loss_ce": loss_ce}
+
+ return losses
+
+ @torch.no_grad()
+ # Copied from transformers.models.detr.modeling_detr.DetrLoss.loss_cardinality
+ def loss_cardinality(self, outputs, targets, indices, num_boxes):
+ """
+ Compute the cardinality error, i.e. the absolute error in the number of predicted non-empty boxes.
+
+ This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients.
+ """
+ logits = outputs["logits"]
+ device = logits.device
+ target_lengths = torch.as_tensor([len(v["class_labels"]) for v in targets], device=device)
+ # Count the number of predictions that are NOT "no-object" (which is the last class)
+ card_pred = (logits.argmax(-1) != logits.shape[-1] - 1).sum(1)
+ card_err = nn.functional.l1_loss(card_pred.float(), target_lengths.float())
+ losses = {"cardinality_error": card_err}
+ return losses
+
+ # Copied from transformers.models.detr.modeling_detr.DetrLoss.loss_boxes
+ def loss_boxes(self, outputs, targets, indices, num_boxes):
+ """
+ Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss.
+
+ Targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]. The target boxes
+ are expected in format (center_x, center_y, w, h), normalized by the image size.
+ """
+ if "pred_boxes" not in outputs:
+ raise KeyError("No predicted boxes found in outputs")
+ idx = self._get_source_permutation_idx(indices)
+ source_boxes = outputs["pred_boxes"][idx]
+ target_boxes = torch.cat([t["boxes"][i] for t, (_, i) in zip(targets, indices)], dim=0)
+
+ loss_bbox = nn.functional.l1_loss(source_boxes, target_boxes, reduction="none")
+
+ losses = {}
+ losses["loss_bbox"] = loss_bbox.sum() / num_boxes
+
+ loss_giou = 1 - torch.diag(
+ generalized_box_iou(center_to_corners_format(source_boxes), center_to_corners_format(target_boxes))
+ )
+ losses["loss_giou"] = loss_giou.sum() / num_boxes
+ return losses
+
+ # Copied from transformers.models.detr.modeling_detr.DetrLoss._get_source_permutation_idx
+ def _get_source_permutation_idx(self, indices):
+ # permute predictions following indices
+ batch_idx = torch.cat([torch.full_like(source, i) for i, (source, _) in enumerate(indices)])
+ source_idx = torch.cat([source for (source, _) in indices])
+ return batch_idx, source_idx
+
+ # Copied from transformers.models.detr.modeling_detr.DetrLoss._get_target_permutation_idx
+ def _get_target_permutation_idx(self, indices):
+ # permute targets following indices
+ batch_idx = torch.cat([torch.full_like(target, i) for i, (_, target) in enumerate(indices)])
+ target_idx = torch.cat([target for (_, target) in indices])
+ return batch_idx, target_idx
+
+ def get_loss(self, loss, outputs, targets, indices, num_boxes):
+ loss_map = {
+ "labels": self.loss_labels,
+ "cardinality": self.loss_cardinality,
+ "boxes": self.loss_boxes,
+ }
+ if loss not in loss_map:
+ raise ValueError(f"Loss {loss} not supported")
+ return loss_map[loss](outputs, targets, indices, num_boxes)
+
+ def forward(self, outputs, targets):
+ """
+ This performs the loss computation.
+
+ Args:
+ outputs (`dict`, *optional*):
+ Dictionary of tensors, see the output specification of the model for the format.
+ targets (`List[dict]`, *optional*):
+ List of dicts, such that `len(targets) == batch_size`. The expected keys in each dict depends on the
+ losses applied, see each loss' doc.
+ """
+ outputs_without_aux = {k: v for k, v in outputs.items() if k != "auxiliary_outputs" and k != "enc_outputs"}
+
+ # Retrieve the matching between the outputs of the last layer and the targets
+ indices = self.matcher(outputs_without_aux, targets)
+
+ # Compute the average number of target boxes accross all nodes, for normalization purposes
+ num_boxes = sum(len(t["class_labels"]) for t in targets)
+ num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device)
+ # (Niels): comment out function below, distributed training to be added
+ # if is_dist_avail_and_initialized():
+ # torch.distributed.all_reduce(num_boxes)
+ # (Niels) in original implementation, num_boxes is divided by get_world_size()
+ num_boxes = torch.clamp(num_boxes, min=1).item()
+
+ # Compute all the requested losses
+ losses = {}
+ for loss in self.losses:
+ losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))
+
+ # In case of auxiliary losses, we repeat this process with the output of each intermediate layer.
+ if "auxiliary_outputs" in outputs:
+ for i, auxiliary_outputs in enumerate(outputs["auxiliary_outputs"]):
+ indices = self.matcher(auxiliary_outputs, targets)
+ for loss in self.losses:
+ l_dict = self.get_loss(loss, auxiliary_outputs, targets, indices, num_boxes)
+ l_dict = {k + f"_{i}": v for k, v in l_dict.items()}
+ losses.update(l_dict)
+
+ if "enc_outputs" in outputs:
+ enc_outputs = outputs["enc_outputs"]
+ bin_targets = copy.deepcopy(targets)
+ for bt in bin_targets:
+ bt["class_labels"] = torch.zeros_like(bt["class_labels"])
+ indices = self.matcher(enc_outputs, bin_targets)
+ for loss in self.losses:
+ l_dict = self.get_loss(loss, enc_outputs, bin_targets, indices, num_boxes)
+ l_dict = {k + "_enc": v for k, v in l_dict.items()}
+ losses.update(l_dict)
+
+ return losses
+
+
+# Copied from transformers.models.detr.modeling_detr.DetrMLPPredictionHead
+class DeformableDetrMLPPredictionHead(nn.Module):
+ """
+ Very simple multi-layer perceptron (MLP, also called FFN), used to predict the normalized center coordinates,
+ height and width of a bounding box w.r.t. an image.
+
+ Copied from https://github.com/facebookresearch/detr/blob/master/models/detr.py
+
+ """
+
+ def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
+ super().__init__()
+ self.num_layers = num_layers
+ h = [hidden_dim] * (num_layers - 1)
+ self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
+
+ def forward(self, x):
+ for i, layer in enumerate(self.layers):
+ x = nn.functional.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
+ return x
+
+
+class DeformableDetrHungarianMatcher(nn.Module):
+ """
+ This class computes an assignment between the targets and the predictions of the network.
+
+ For efficiency reasons, the targets don't include the no_object. Because of this, in general, there are more
+ predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, while the others are
+ un-matched (and thus treated as non-objects).
+
+ Args:
+ class_cost:
+ The relative weight of the classification error in the matching cost.
+ bbox_cost:
+ The relative weight of the L1 error of the bounding box coordinates in the matching cost.
+ giou_cost:
+ The relative weight of the giou loss of the bounding box in the matching cost.
+ """
+
+ def __init__(self, class_cost: float = 1, bbox_cost: float = 1, giou_cost: float = 1):
+ super().__init__()
+ requires_backends(self, ["scipy"])
+
+ self.class_cost = class_cost
+ self.bbox_cost = bbox_cost
+ self.giou_cost = giou_cost
+ if class_cost == 0 and bbox_cost == 0 and giou_cost == 0:
+ raise ValueError("All costs of the Matcher can't be 0")
+
+ @torch.no_grad()
+ def forward(self, outputs, targets):
+ """
+ Args:
+ outputs (`dict`):
+ A dictionary that contains at least these entries:
+ * "logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits
+ * "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates.
+ targets (`List[dict]`):
+ A list of targets (len(targets) = batch_size), where each target is a dict containing:
+ * "class_labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of
+ ground-truth
+ objects in the target) containing the class labels
+ * "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates.
+
+ Returns:
+ `List[Tuple]`: A list of size `batch_size`, containing tuples of (index_i, index_j) where:
+ - index_i is the indices of the selected predictions (in order)
+ - index_j is the indices of the corresponding selected targets (in order)
+ For each batch element, it holds: len(index_i) = len(index_j) = min(num_queries, num_target_boxes)
+ """
+ batch_size, num_queries = outputs["logits"].shape[:2]
+
+ # We flatten to compute the cost matrices in a batch
+ out_prob = outputs["logits"].flatten(0, 1).sigmoid() # [batch_size * num_queries, num_classes]
+ out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4]
+
+ # Also concat the target labels and boxes
+ target_ids = torch.cat([v["class_labels"] for v in targets])
+ target_bbox = torch.cat([v["boxes"] for v in targets])
+
+ # Compute the classification cost.
+ alpha = 0.25
+ gamma = 2.0
+ neg_cost_class = (1 - alpha) * (out_prob**gamma) * (-(1 - out_prob + 1e-8).log())
+ pos_cost_class = alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log())
+ class_cost = pos_cost_class[:, target_ids] - neg_cost_class[:, target_ids]
+
+ # Compute the L1 cost between boxes
+ bbox_cost = torch.cdist(out_bbox, target_bbox, p=1)
+
+ # Compute the giou cost between boxes
+ giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(target_bbox))
+
+ # Final cost matrix
+ cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost
+ cost_matrix = cost_matrix.view(batch_size, num_queries, -1).cpu()
+
+ sizes = [len(v["boxes"]) for v in targets]
+ indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_matrix.split(sizes, -1))]
+ return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
+
+
+# Copied from transformers.models.detr.modeling_detr._upcast
+def _upcast(t: Tensor) -> Tensor:
+ # Protects from numerical overflows in multiplications by upcasting to the equivalent higher type
+ if t.is_floating_point():
+ return t if t.dtype in (torch.float32, torch.float64) else t.float()
+ else:
+ return t if t.dtype in (torch.int32, torch.int64) else t.int()
+
+
+# Copied from transformers.models.detr.modeling_detr.box_area
+def box_area(boxes: Tensor) -> Tensor:
+ """
+ Computes the area of a set of bounding boxes, which are specified by its (x1, y1, x2, y2) coordinates.
+
+ Args:
+ boxes (`torch.FloatTensor` of shape `(number_of_boxes, 4)`):
+ Boxes for which the area will be computed. They are expected to be in (x1, y1, x2, y2) format with `0 <= x1
+ < x2` and `0 <= y1 < y2`.
+
+ Returns:
+ `torch.FloatTensor`: a tensor containing the area for each box.
+ """
+ boxes = _upcast(boxes)
+ return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
+
+
+# Copied from transformers.models.detr.modeling_detr.box_iou
+def box_iou(boxes1, boxes2):
+ area1 = box_area(boxes1)
+ area2 = box_area(boxes2)
+
+ left_top = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]
+ right_bottom = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]
+
+ width_height = (right_bottom - left_top).clamp(min=0) # [N,M,2]
+ inter = width_height[:, :, 0] * width_height[:, :, 1] # [N,M]
+
+ union = area1[:, None] + area2 - inter
+
+ iou = inter / union
+ return iou, union
+
+
+# Copied from transformers.models.detr.modeling_detr.generalized_box_iou
+def generalized_box_iou(boxes1, boxes2):
+ """
+ Generalized IoU from https://giou.stanford.edu/. The boxes should be in [x0, y0, x1, y1] (corner) format.
+
+ Returns:
+ `torch.FloatTensor`: a [N, M] pairwise matrix, where N = len(boxes1) and M = len(boxes2)
+ """
+ # degenerate boxes gives inf / nan results
+ # so do an early check
+ if not (boxes1[:, 2:] >= boxes1[:, :2]).all():
+ raise ValueError(f"boxes1 must be in [x0, y0, x1, y1] (corner) format, but got {boxes1}")
+ if not (boxes2[:, 2:] >= boxes2[:, :2]).all():
+ raise ValueError(f"boxes2 must be in [x0, y0, x1, y1] (corner) format, but got {boxes2}")
+ iou, union = box_iou(boxes1, boxes2)
+
+ top_left = torch.min(boxes1[:, None, :2], boxes2[:, :2])
+ bottom_right = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])
+
+ width_height = (bottom_right - top_left).clamp(min=0) # [N,M,2]
+ area = width_height[:, :, 0] * width_height[:, :, 1]
+
+ return iou - (area - union) / area
+
+
+# Copied from transformers.models.detr.modeling_detr._max_by_axis
+def _max_by_axis(the_list):
+ # type: (List[List[int]]) -> List[int]
+ maxes = the_list[0]
+ for sublist in the_list[1:]:
+ for index, item in enumerate(sublist):
+ maxes[index] = max(maxes[index], item)
+ return maxes
+
+
+# Copied from transformers.models.detr.modeling_detr.NestedTensor
+class NestedTensor(object):
+ def __init__(self, tensors, mask: Optional[Tensor]):
+ self.tensors = tensors
+ self.mask = mask
+
+ def to(self, device):
+ cast_tensor = self.tensors.to(device)
+ mask = self.mask
+ if mask is not None:
+ cast_mask = mask.to(device)
+ else:
+ cast_mask = None
+ return NestedTensor(cast_tensor, cast_mask)
+
+ def decompose(self):
+ return self.tensors, self.mask
+
+ def __repr__(self):
+ return str(self.tensors)
+
+
+# Copied from transformers.models.detr.modeling_detr.nested_tensor_from_tensor_list
+def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):
+ if tensor_list[0].ndim == 3:
+ max_size = _max_by_axis([list(img.shape) for img in tensor_list])
+ batch_shape = [len(tensor_list)] + max_size
+ batch_size, num_channels, height, width = batch_shape
+ dtype = tensor_list[0].dtype
+ device = tensor_list[0].device
+ tensor = torch.zeros(batch_shape, dtype=dtype, device=device)
+ mask = torch.ones((batch_size, height, width), dtype=torch.bool, device=device)
+ for img, pad_img, m in zip(tensor_list, tensor, mask):
+ pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
+ m[: img.shape[1], : img.shape[2]] = False
+ else:
+ raise ValueError("Only 3-dimensional tensors are supported")
+ return NestedTensor(tensor, mask)
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/mobilenet_v2/__pycache__/__init__.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/mobilenet_v2/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f518b03f9c8264767eb77e073c739af0dd6c88b2
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/mobilenet_v2/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/mobilenet_v2/__pycache__/configuration_mobilenet_v2.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/mobilenet_v2/__pycache__/configuration_mobilenet_v2.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e19084948214dcfdd8df2bf7a25c4f980d0ea572
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/mobilenet_v2/__pycache__/configuration_mobilenet_v2.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/mobilenet_v2/__pycache__/convert_original_tf_checkpoint_to_pytorch.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/mobilenet_v2/__pycache__/convert_original_tf_checkpoint_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1c581ccfaecfa229dbee69c9109b55d6b606261a
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/mobilenet_v2/__pycache__/convert_original_tf_checkpoint_to_pytorch.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/mobilenet_v2/feature_extraction_mobilenet_v2.py b/llava_next/lib/python3.10/site-packages/transformers/models/mobilenet_v2/feature_extraction_mobilenet_v2.py
new file mode 100644
index 0000000000000000000000000000000000000000..62581e2c09988b84233c224897dd99a9da952008
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/transformers/models/mobilenet_v2/feature_extraction_mobilenet_v2.py
@@ -0,0 +1,33 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Feature extractor class for MobileNetV2."""
+
+import warnings
+
+from ...utils import logging
+from .image_processing_mobilenet_v2 import MobileNetV2ImageProcessor
+
+
+logger = logging.get_logger(__name__)
+
+
+class MobileNetV2FeatureExtractor(MobileNetV2ImageProcessor):
+ def __init__(self, *args, **kwargs) -> None:
+ warnings.warn(
+ "The class MobileNetV2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
+ " Please use MobileNetV2ImageProcessor instead.",
+ FutureWarning,
+ )
+ super().__init__(*args, **kwargs)
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/rembert/__pycache__/modeling_rembert.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/rembert/__pycache__/modeling_rembert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ef766e26cba4a737e6b609da28aaa68173856d6b
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/rembert/__pycache__/modeling_rembert.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/xglm/__init__.py b/llava_next/lib/python3.10/site-packages/transformers/models/xglm/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..747a4ddb4ed9c77048748341446b2eec8227570a
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/transformers/models/xglm/__init__.py
@@ -0,0 +1,138 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_flax_available,
+ is_sentencepiece_available,
+ is_tf_available,
+ is_tokenizers_available,
+ is_torch_available,
+)
+
+
+_import_structure = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
+
+try:
+ if not is_sentencepiece_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_xglm"] = ["XGLMTokenizer"]
+
+try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_xglm_fast"] = ["XGLMTokenizerFast"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_xglm"] = [
+ "XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "XGLMForCausalLM",
+ "XGLMModel",
+ "XGLMPreTrainedModel",
+ ]
+
+
+try:
+ if not is_flax_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_flax_xglm"] = [
+ "FlaxXGLMForCausalLM",
+ "FlaxXGLMModel",
+ "FlaxXGLMPreTrainedModel",
+ ]
+
+
+try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_tf_xglm"] = [
+ "TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "TFXGLMForCausalLM",
+ "TFXGLMModel",
+ "TFXGLMPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
+
+ try:
+ if not is_sentencepiece_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_xglm import XGLMTokenizer
+
+ try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_xglm_fast import XGLMTokenizerFast
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
+
+ try:
+ if not is_flax_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
+
+ try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_tf_xglm import (
+ TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
+ TFXGLMForCausalLM,
+ TFXGLMModel,
+ TFXGLMPreTrainedModel,
+ )
+
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/xglm/configuration_xglm.py b/llava_next/lib/python3.10/site-packages/transformers/models/xglm/configuration_xglm.py
new file mode 100644
index 0000000000000000000000000000000000000000..9377bbce6f01ec9a9bcf7aca30e971a4f508aa96
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/transformers/models/xglm/configuration_xglm.py
@@ -0,0 +1,141 @@
+# coding=utf-8
+# Copyright The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" XGLM model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP = {
+ "facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/config.json",
+ # See all XGLM models at https://huggingface.co/models?filter=xglm
+}
+
+
+class XGLMConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`XGLMModel`]. It is used to instantiate an XGLM
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
+ defaults will yield a similar configuration to that of the XGLM
+ [facebook/xglm-564M](https://huggingface.co/facebook/xglm-564M) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 256008):
+ Vocabulary size of the XGLM model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`XGLMModel`] or [`FlaxXGLMModel`].
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ d_model (`int`, *optional*, defaults to 1024):
+ Dimension of the layers and the pooler layer.
+ ffn_dim (`int`, *optional*, defaults to 4096):
+ Dimension of the "intermediate" (often named feed-forward) layer in decoder.
+ num_layers (`int`, *optional*, defaults to 24):
+ Number of hidden layers Transformer decoder.
+ attention_heads (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer decoder.
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, dencoder, and pooler.
+ attention_dropout (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention probabilities.
+ activation_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for activations inside the fully connected layer.
+ layerdrop (`float`, *optional*, defaults to 0.0):
+ The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
+ for more details.
+ init_std (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ scale_embedding (`bool`, *optional*, defaults to `True`):
+ Scale embeddings by diving by sqrt(d_model).
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models).
+
+ Example:
+
+ ```python
+ >>> from transformers import XGLMModel, XGLMConfig
+
+ >>> # Initializing a XGLM facebook/xglm-564M style configuration
+ >>> configuration = XGLMConfig()
+
+ >>> # Initializing a model from the facebook/xglm-564M style configuration
+ >>> model = XGLMModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "xglm"
+ keys_to_ignore_at_inference = ["past_key_values"]
+
+ attribute_map = {
+ "num_attention_heads": "attention_heads",
+ "hidden_size": "d_model",
+ "num_hidden_layers": "num_layers",
+ }
+
+ def __init__(
+ self,
+ vocab_size=256008,
+ max_position_embeddings=2048,
+ d_model=1024,
+ ffn_dim=4096,
+ num_layers=24,
+ attention_heads=16,
+ activation_function="gelu",
+ dropout=0.1,
+ attention_dropout=0.1,
+ activation_dropout=0.0,
+ layerdrop=0.0,
+ init_std=0.02,
+ scale_embedding=True,
+ use_cache=True,
+ decoder_start_token_id=2,
+ pad_token_id=1,
+ bos_token_id=0,
+ eos_token_id=2,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.max_position_embeddings = max_position_embeddings
+ self.d_model = d_model
+ self.ffn_dim = ffn_dim
+ self.num_layers = num_layers
+ self.attention_heads = attention_heads
+ self.activation_function = activation_function
+ self.dropout = dropout
+ self.attention_dropout = attention_dropout
+ self.activation_dropout = activation_dropout
+ self.layerdrop = layerdrop
+ self.init_std = init_std
+ self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
+ self.use_cache = use_cache
+
+ super().__init__(
+ pad_token_id=pad_token_id,
+ bos_token_id=bos_token_id,
+ eos_token_id=eos_token_id,
+ decoder_start_token_id=decoder_start_token_id,
+ **kwargs,
+ )
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/xglm/convert_xglm_original_ckpt_to_trfms.py b/llava_next/lib/python3.10/site-packages/transformers/models/xglm/convert_xglm_original_ckpt_to_trfms.py
new file mode 100644
index 0000000000000000000000000000000000000000..f8b5dba3c1e47bb9cee6c23c4281746c4dde4761
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/transformers/models/xglm/convert_xglm_original_ckpt_to_trfms.py
@@ -0,0 +1,68 @@
+import argparse
+from argparse import Namespace
+
+import torch
+from torch import nn
+
+from transformers import XGLMConfig, XGLMForCausalLM
+
+
+def remove_ignore_keys_(state_dict):
+ ignore_keys = [
+ "decoder.version",
+ "decoder.output_projection.weight",
+ "_float_tensor",
+ "decoder.embed_positions._float_tensor",
+ ]
+ for k in ignore_keys:
+ state_dict.pop(k, None)
+
+
+def make_linear_from_emb(emb):
+ vocab_size, emb_size = emb.weight.shape
+ lin_layer = nn.Linear(vocab_size, emb_size, bias=False)
+ lin_layer.weight.data = emb.weight.data
+ return lin_layer
+
+
+def convert_fairseq_xglm_checkpoint_from_disk(checkpoint_path):
+ checkpoint = torch.load(checkpoint_path, map_location="cpu")
+ args = Namespace(**checkpoint["cfg"]["model"])
+ state_dict = checkpoint["model"]
+ remove_ignore_keys_(state_dict)
+ vocab_size = state_dict["decoder.embed_tokens.weight"].shape[0]
+
+ state_dict = {key.replace("decoder", "model"): val for key, val in state_dict.items()}
+
+ config = XGLMConfig(
+ vocab_size=vocab_size,
+ max_position_embeddings=args.max_target_positions,
+ num_layers=args.decoder_layers,
+ attention_heads=args.decoder_attention_heads,
+ ffn_dim=args.decoder_ffn_embed_dim,
+ d_model=args.decoder_embed_dim,
+ layerdrop=args.decoder_layerdrop,
+ dropout=args.dropout,
+ attention_dropout=args.attention_dropout,
+ activation_dropout=args.activation_dropout,
+ activation_function="gelu",
+ scale_embedding=not args.no_scale_embedding,
+ tie_word_embeddings=args.share_decoder_input_output_embed,
+ )
+
+ model = XGLMForCausalLM(config)
+ missing = model.load_state_dict(state_dict, strict=False)
+ print(missing)
+ model.lm_head = make_linear_from_emb(model.model.embed_tokens)
+
+ return model
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
+ parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
+ args = parser.parse_args()
+ model = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
+ model.save_pretrained(args.pytorch_dump_folder_path)
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/cpm/__pycache__/__init__.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/cpm/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2aaf1ebf3ba6526f3b522f0c7cf91f559b351cb7
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/cpm/__pycache__/__init__.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/cpm/tokenization_cpm.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/cpm/tokenization_cpm.py
new file mode 100644
index 0000000000000000000000000000000000000000..884068f1a1571037a1502c94c6dbb6ea5bdc22ae
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/cpm/tokenization_cpm.py
@@ -0,0 +1,348 @@
+# coding=utf-8
+# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tokenization classes."""
+
+import os
+import unicodedata
+from shutil import copyfile
+from typing import Any, Dict, List, Optional, Tuple
+
+import sentencepiece as spm
+
+from ...tokenization_utils import AddedToken, PreTrainedTokenizer
+from ...utils import SPIECE_UNDERLINE, logging
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"}
+
+
+class CpmTokenizer(PreTrainedTokenizer):
+ """Runs pre-tokenization with Jieba segmentation tool. It is used in CPM models."""
+
+ vocab_files_names = VOCAB_FILES_NAMES
+
+ def __init__(
+ self,
+ vocab_file,
+ do_lower_case=False,
+ remove_space=True,
+ keep_accents=False,
+ bos_token="",
+ eos_token="",
+ unk_token="",
+ sep_token="",
+ pad_token="",
+ cls_token="",
+ mask_token="",
+ additional_special_tokens=["", ""],
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
+ **kwargs,
+ ) -> None:
+ """
+ Construct a CPM tokenizer. Based on [Jieba](https://pypi.org/project/jieba/) and
+ [SentencePiece](https://github.com/google/sentencepiece).
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should
+ refer to this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm extension) that
+ contains the vocabulary necessary to instantiate a tokenizer.
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether to lowercase the input when tokenizing.
+ remove_space (`bool`, *optional*, defaults to `True`):
+ Whether to strip the text when tokenizing (removing excess spaces before and after the string).
+ keep_accents (`bool`, *optional*, defaults to `False`):
+ Whether to keep accents when tokenizing.
+ bos_token (`str`, *optional*, defaults to `""`):
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier
+ token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
+ sequence. The token used is the `cls_token`.
+
+
+
+ eos_token (`str`, *optional*, defaults to `""`):
+ The end of sequence token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the end of
+ sequence. The token used is the `sep_token`.
+
+
+
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be
+ this token instead.
+ sep_token (`str`, *optional*, defaults to `""`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences
+ for sequence classification or for a text and a question for question answering. It is also used as the
+ last token of a sequence built with special tokens.
+ pad_token (`str`, *optional*, defaults to `""`):
+ The token used for padding, for example when batching sequences of different lengths.
+ cls_token (`str`, *optional*, defaults to `""`):
+ The classifier token which is used when doing sequence classification (classification of the whole
+ sequence instead of per-token classification). It is the first token of the sequence when built with
+ special tokens.
+ mask_token (`str`, *optional*, defaults to `""`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ additional_special_tokens (`List[str]`, *optional*, defaults to `["", ""]`):
+ Additional special tokens used by the tokenizer.
+
+ Attributes:
+ sp_model (`SentencePieceProcessor`):
+ The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
+ """
+ # Mask token behave like a normal word, i.e. include the space before it
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
+
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
+
+ self.do_lower_case = do_lower_case
+ self.remove_space = remove_space
+ self.keep_accents = keep_accents
+ self.vocab_file = vocab_file
+
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.Load(vocab_file)
+
+ try:
+ import jieba
+ except ModuleNotFoundError as error:
+ raise error.__class__(
+ "You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
+ "See https://pypi.org/project/jieba/ for installation."
+ )
+ self.jieba = jieba
+ self.translator = str.maketrans(" \n", "\u2582\u2583")
+
+ super().__init__(
+ do_lower_case=do_lower_case,
+ remove_space=remove_space,
+ keep_accents=keep_accents,
+ bos_token=bos_token,
+ eos_token=eos_token,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ pad_token=pad_token,
+ cls_token=cls_token,
+ mask_token=mask_token,
+ additional_special_tokens=additional_special_tokens,
+ sp_model_kwargs=self.sp_model_kwargs,
+ **kwargs,
+ )
+
+ self._pad_token_type_id = 3
+
+ @property
+ # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
+ def vocab_size(self):
+ return len(self.sp_model)
+
+ # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.get_vocab
+ def get_vocab(self):
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
+ vocab.update(self.added_tokens_encoder)
+ return vocab
+
+ # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.__getstate__
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ state["sp_model"] = None
+ return state
+
+ # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.__setstate__
+ def __setstate__(self, d):
+ self.__dict__ = d
+
+ # for backward compatibility
+ if not hasattr(self, "sp_model_kwargs"):
+ self.sp_model_kwargs = {}
+
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.Load(self.vocab_file)
+
+ # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.preprocess_text
+ def preprocess_text(self, inputs):
+ if self.remove_space:
+ outputs = " ".join(inputs.strip().split())
+ else:
+ outputs = inputs
+ outputs = outputs.replace("``", '"').replace("''", '"')
+
+ if not self.keep_accents:
+ outputs = unicodedata.normalize("NFKD", outputs)
+ outputs = "".join([c for c in outputs if not unicodedata.combining(c)])
+ if self.do_lower_case:
+ outputs = outputs.lower()
+
+ return outputs
+
+ # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer._tokenize
+ def _tokenize(self, text: str) -> List[str]:
+ """Tokenize a string."""
+ text = self.preprocess_text(text)
+ pieces = self.sp_model.encode(text, out_type=str)
+ new_pieces = []
+ for piece in pieces:
+ if len(piece) > 1 and piece[-1] == str(",") and piece[-2].isdigit():
+ cur_pieces = self.sp_model.EncodeAsPieces(piece[:-1].replace(SPIECE_UNDERLINE, ""))
+ if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
+ if len(cur_pieces[0]) == 1:
+ cur_pieces = cur_pieces[1:]
+ else:
+ cur_pieces[0] = cur_pieces[0][1:]
+ cur_pieces.append(piece[-1])
+ new_pieces.extend(cur_pieces)
+ else:
+ new_pieces.append(piece)
+
+ return new_pieces
+
+ # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer._convert_token_to_id
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.sp_model.PieceToId(token)
+
+ # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer._convert_id_to_token
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ return self.sp_model.IdToPiece(index)
+
+ # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.convert_tokens_to_string
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (strings for sub-words) in a single string."""
+ out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
+ return out_string
+
+ # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.build_inputs_with_special_tokens
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. An XLNet sequence has the following format:
+
+ - single sequence: `X `
+ - pair of sequences: `A B `
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+ if token_ids_1 is None:
+ return token_ids_0 + sep + cls
+ return token_ids_0 + sep + token_ids_1 + sep + cls
+
+ # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.get_special_tokens_mask
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ if token_ids_1 is not None:
+ return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1, 1]
+ return ([0] * len(token_ids_0)) + [1, 1]
+
+ # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.create_token_type_ids_from_sequences
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLNet
+ sequence pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ sep = [self.sep_token_id]
+ cls_segment_id = [2]
+
+ if token_ids_1 is None:
+ return len(token_ids_0 + sep) * [0] + cls_segment_id
+ return len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + cls_segment_id
+
+ # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.save_vocabulary
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ out_vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
+ copyfile(self.vocab_file, out_vocab_file)
+ elif not os.path.isfile(self.vocab_file):
+ with open(out_vocab_file, "wb") as fi:
+ content_spiece_model = self.sp_model.serialized_model_proto()
+ fi.write(content_spiece_model)
+
+ return (out_vocab_file,)
+
+ def _decode(self, *args, **kwargs):
+ text = super()._decode(*args, **kwargs)
+ text = text.replace(" ", "").replace("\u2582", " ").replace("\u2583", "\n")
+ return text
+
+
+__all__ = ["CpmTokenizer"]
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/cpm/tokenization_cpm_fast.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/cpm/tokenization_cpm_fast.py
new file mode 100644
index 0000000000000000000000000000000000000000..ef933e084ddb2b375df12cc27aa03a27de55db43
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/cpm/tokenization_cpm_fast.py
@@ -0,0 +1,241 @@
+# coding=utf-8
+# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tokenization classes."""
+
+import os
+from shutil import copyfile
+from typing import List, Optional, Tuple
+
+from ...tokenization_utils_fast import AddedToken, PreTrainedTokenizerFast
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
+
+
+class CpmTokenizerFast(PreTrainedTokenizerFast):
+ """Runs pre-tokenization with Jieba segmentation tool. It is used in CPM models."""
+
+ def __init__(
+ self,
+ vocab_file=None,
+ tokenizer_file=None,
+ do_lower_case=False,
+ remove_space=True,
+ keep_accents=False,
+ bos_token="",
+ eos_token="",
+ unk_token="",
+ sep_token="",
+ pad_token="",
+ cls_token="",
+ mask_token="",
+ additional_special_tokens=["", ""],
+ **kwargs,
+ ):
+ """
+ Construct a CPM tokenizer. Based on [Jieba](https://pypi.org/project/jieba/) and
+ [SentencePiece](https://github.com/google/sentencepiece).
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should
+ refer to this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm extension) that
+ contains the vocabulary necessary to instantiate a tokenizer.
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether to lowercase the input when tokenizing.
+ remove_space (`bool`, *optional*, defaults to `True`):
+ Whether to strip the text when tokenizing (removing excess spaces before and after the string).
+ keep_accents (`bool`, *optional*, defaults to `False`):
+ Whether to keep accents when tokenizing.
+ bos_token (`str`, *optional*, defaults to `""`):
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier
+ token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
+ sequence. The token used is the `cls_token`.
+
+
+
+ eos_token (`str`, *optional*, defaults to `""`):
+ The end of sequence token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the end of
+ sequence. The token used is the `sep_token`.
+
+
+
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be
+ this token instead.
+ sep_token (`str`, *optional*, defaults to `""`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences
+ for sequence classification or for a text and a question for question answering. It is also used as the
+ last token of a sequence built with special tokens.
+ pad_token (`str`, *optional*, defaults to `""`):
+ The token used for padding, for example when batching sequences of different lengths.
+ cls_token (`str`, *optional*, defaults to `""`):
+ The classifier token which is used when doing sequence classification (classification of the whole
+ sequence instead of per-token classification). It is the first token of the sequence when built with
+ special tokens.
+ mask_token (`str`, *optional*, defaults to `""`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ additional_special_tokens (`List[str]`, *optional*, defaults to `["", ""]`):
+ Additional special tokens used by the tokenizer.
+
+ Attributes:
+ sp_model (`SentencePieceProcessor`):
+ The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
+ """
+ # Mask token behave like a normal word, i.e. include the space before it
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
+
+ super().__init__(
+ vocab_file=vocab_file,
+ tokenizer_file=tokenizer_file,
+ do_lower_case=do_lower_case,
+ remove_space=remove_space,
+ keep_accents=keep_accents,
+ bos_token=bos_token,
+ eos_token=eos_token,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ pad_token=pad_token,
+ cls_token=cls_token,
+ mask_token=mask_token,
+ additional_special_tokens=additional_special_tokens,
+ **kwargs,
+ )
+
+ self._pad_token_type_id = 3
+ self.do_lower_case = do_lower_case
+ self.remove_space = remove_space
+ self.keep_accents = keep_accents
+ self.vocab_file = vocab_file
+
+ try:
+ import jieba
+ except ModuleNotFoundError as error:
+ raise error.__class__(
+ "You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
+ "See https://pypi.org/project/jieba/ for installation."
+ )
+ self.jieba = jieba
+ self.translator = str.maketrans(" \n", "\u2582\u2583")
+
+ @property
+ def can_save_slow_tokenizer(self) -> bool:
+ return os.path.isfile(self.vocab_file) if self.vocab_file else False
+
+ # Copied from transformers.models.xlnet.tokenization_xlnet_fast.XLNetTokenizerFast.build_inputs_with_special_tokens
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. An XLNet sequence has the following format:
+
+ - single sequence: `X `
+ - pair of sequences: `A B `
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+ if token_ids_1 is None:
+ return token_ids_0 + sep + cls
+ return token_ids_0 + sep + token_ids_1 + sep + cls
+
+ # Copied from transformers.models.xlnet.tokenization_xlnet_fast.XLNetTokenizerFast.create_token_type_ids_from_sequences
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLNet
+ sequence pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ sep = [self.sep_token_id]
+ cls_segment_id = [2]
+
+ if token_ids_1 is None:
+ return len(token_ids_0 + sep) * [0] + cls_segment_id
+ return len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + cls_segment_id
+
+ # Copied from transformers.models.xlnet.tokenization_xlnet_fast.XLNetTokenizerFast.save_vocabulary
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if not self.can_save_slow_tokenizer:
+ raise ValueError(
+ "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
+ "tokenizer."
+ )
+
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ out_vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
+ copyfile(self.vocab_file, out_vocab_file)
+
+ return (out_vocab_file,)
+
+ def _batch_encode_plus(self, batch_text_or_text_pairs, *args, **kwargs):
+ batch_text_or_text_pairs = [
+ " ".join([x.translate(self.translator) for x in self.jieba.cut(text, cut_all=False)])
+ for text in batch_text_or_text_pairs
+ ]
+ return super()._batch_encode_plus(batch_text_or_text_pairs, *args, **kwargs)
+
+ def _decode(self, *args, **kwargs):
+ text = super()._decode(*args, **kwargs)
+ text = text.replace(" ", "").replace("\u2582", " ").replace("\u2583", "\n")
+ return text
+
+
+__all__ = ["CpmTokenizerFast"]
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/longt5/__init__.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/longt5/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..2716e62cd7b28ac6121300395a470d855e144a42
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/longt5/__init__.py
@@ -0,0 +1,28 @@
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import _LazyModule
+from ...utils.import_utils import define_import_structure
+
+
+if TYPE_CHECKING:
+ from .configuration_longt5 import *
+ from .modeling_flax_longt5 import *
+ from .modeling_longt5 import *
+else:
+ import sys
+
+ _file = globals()["__file__"]
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/__init__.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ef991e970740c67d62bdd44cabc9adffdc48be6f
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/__init__.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/configuration_longt5.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/configuration_longt5.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..136b341f35891e92dc963d0935d848f4e93c2d2b
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/configuration_longt5.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/convert_longt5x_checkpoint_to_flax.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/convert_longt5x_checkpoint_to_flax.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8169870cd36898a56fc357fb7c70dc9f01f91606
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/convert_longt5x_checkpoint_to_flax.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/modeling_flax_longt5.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/modeling_flax_longt5.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..931264aef70a8692aee49fb03a638f0f3a7a42eb
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/modeling_flax_longt5.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/modeling_longt5.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/modeling_longt5.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6799145d436eb2bfccfe542f42388b39359c5c9c
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/modeling_longt5.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/longt5/configuration_longt5.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/longt5/configuration_longt5.py
new file mode 100644
index 0000000000000000000000000000000000000000..9acac4e447d81d598f4b3163078fbde65bdb4b42
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/longt5/configuration_longt5.py
@@ -0,0 +1,180 @@
+# coding=utf-8
+# Copyright 2022, The LongT5 Authors and HuggingFace Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""LongT5 model configuration"""
+
+from typing import Mapping
+
+from ...configuration_utils import PretrainedConfig
+from ...onnx import OnnxSeq2SeqConfigWithPast
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+class LongT5Config(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`LongT5Model`] or a [`FlaxLongT5Model`]. It is
+ used to instantiate a LongT5 model according to the specified arguments, defining the model architecture.
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the LongT5
+ [google/long-t5-local-base](https://huggingface.co/google/long-t5-local-base) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Arguments:
+ vocab_size (`int`, *optional*, defaults to 32128):
+ Vocabulary size of the LongT5 model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`LongT5Model`].
+ d_model (`int`, *optional*, defaults to 512):
+ Size of the encoder layers and the pooler layer.
+ d_kv (`int`, *optional*, defaults to 64):
+ Size of the key, query, value projections per attention head. `d_kv` has to be equal to `d_model //
+ num_heads`.
+ d_ff (`int`, *optional*, defaults to 2048):
+ Size of the intermediate feed forward layer in each `LongT5Block`.
+ num_layers (`int`, *optional*, defaults to 6):
+ Number of hidden layers in the Transformer encoder.
+ num_decoder_layers (`int`, *optional*):
+ Number of hidden layers in the Transformer decoder. Will use the same value as `num_layers` if not set.
+ num_heads (`int`, *optional*, defaults to 8):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ local_radius (`int`, *optional*, defaults to 127)
+ Number of tokens to the left/right for each token to locally self-attend in a local attention mechanism.
+ global_block_size (`int`, *optional*, defaults to 16)
+ Lenght of blocks an input sequence is divided into for a global token representation. Used only for
+ `encoder_attention_type = "transient-global"`.
+ relative_attention_num_buckets (`int`, *optional*, defaults to 32):
+ The number of buckets to use for each attention layer.
+ relative_attention_max_distance (`int`, *optional*, defaults to 128):
+ The maximum distance of the longer sequences for the bucket separation.
+ dropout_rate (`float`, *optional*, defaults to 0.1):
+ The ratio for all dropout layers.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-6):
+ The epsilon used by the layer normalization layers.
+ initializer_factor (`float`, *optional*, defaults to 1):
+ A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
+ testing).
+ feed_forward_proj (`string`, *optional*, defaults to `"relu"`):
+ Type of feed forward layer to be used. Should be one of `"relu"` or `"gated-gelu"`. LongT5v1.1 uses the
+ `"gated-gelu"` feed forward projection. Original LongT5 implementation uses `"gated-gelu"`.
+ encoder_attention_type (`string`, *optional*, defaults to `"local"`):
+ Type of encoder attention to be used. Should be one of `"local"` or `"transient-global"`, which are
+ supported by LongT5 implementation.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models).
+ """
+
+ model_type = "longt5"
+ keys_to_ignore_at_inference = ["past_key_values"]
+ attribute_map = {
+ "hidden_size": "d_model",
+ "num_attention_heads": "num_heads",
+ "num_hidden_layers": "num_layers",
+ "head_dim": "d_kv",
+ }
+
+ def __init__(
+ self,
+ vocab_size=32128,
+ d_model=512,
+ d_kv=64,
+ d_ff=2048,
+ num_layers=6,
+ num_decoder_layers=None,
+ num_heads=8,
+ local_radius=127,
+ global_block_size=16,
+ relative_attention_num_buckets=32,
+ relative_attention_max_distance=128,
+ dropout_rate=0.1,
+ layer_norm_epsilon=1e-6,
+ initializer_factor=1.0,
+ feed_forward_proj="relu",
+ is_encoder_decoder=True,
+ encoder_attention_type="local",
+ use_cache=True,
+ pad_token_id=0,
+ eos_token_id=1,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.d_model = d_model
+ self.d_kv = d_kv
+ self.d_ff = d_ff
+ self.num_layers = num_layers
+ # default = symmetry
+ self.num_decoder_layers = num_decoder_layers if num_decoder_layers is not None else self.num_layers
+ self.num_heads = num_heads
+ self.local_radius = local_radius
+ self.global_block_size = global_block_size
+ self.relative_attention_num_buckets = relative_attention_num_buckets
+ self.relative_attention_max_distance = relative_attention_max_distance
+ self.dropout_rate = dropout_rate
+ self.layer_norm_epsilon = layer_norm_epsilon
+ self.initializer_factor = initializer_factor
+ self.feed_forward_proj = feed_forward_proj
+ self.encoder_attention_type = encoder_attention_type
+ self.use_cache = use_cache
+
+ act_info = self.feed_forward_proj.split("-")
+ self.dense_act_fn = act_info[-1]
+ self.is_gated_act = act_info[0] == "gated"
+
+ if len(act_info) > 1 and act_info[0] != "gated" or len(act_info) > 2:
+ raise ValueError(
+ f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer. "
+ "Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
+ "'gated-gelu' or 'relu'"
+ )
+
+ # for backwards compatibility
+ if feed_forward_proj == "gated-gelu":
+ self.dense_act_fn = "gelu_new"
+
+ super().__init__(
+ pad_token_id=pad_token_id,
+ eos_token_id=eos_token_id,
+ is_encoder_decoder=is_encoder_decoder,
+ **kwargs,
+ )
+
+
+class LongT5OnnxConfig(OnnxSeq2SeqConfigWithPast):
+ @property
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
+ common_inputs = {
+ "input_ids": {0: "batch", 1: "encoder_sequence"},
+ "attention_mask": {0: "batch", 1: "encoder_sequence"},
+ }
+ if self.use_past:
+ common_inputs["attention_mask"][1] = "past_encoder_sequence + sequence"
+ common_inputs["decoder_input_ids"] = {0: "batch"}
+ common_inputs["decoder_attention_mask"] = {0: "batch", 1: "past_decoder_sequence + sequence"}
+ else:
+ common_inputs["decoder_input_ids"] = {0: "batch", 1: "decoder_sequence"}
+ common_inputs["decoder_attention_mask"] = {0: "batch", 1: "decoder_sequence"}
+
+ if self.use_past:
+ self.fill_with_past_key_values_(common_inputs, direction="inputs")
+
+ return common_inputs
+
+ @property
+ def default_onnx_opset(self) -> int:
+ return 13
+
+
+__all__ = ["LongT5Config", "LongT5OnnxConfig"]
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/longt5/convert_longt5x_checkpoint_to_flax.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/longt5/convert_longt5x_checkpoint_to_flax.py
new file mode 100644
index 0000000000000000000000000000000000000000..cf5c2d52d8ea084687dae41758e79fadd453bc5f
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/longt5/convert_longt5x_checkpoint_to_flax.py
@@ -0,0 +1,215 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Convert T5/LongT5X checkpoints from the original repository to JAX/FLAX model. This script is an extension of
+'src/transformers/models/t5/convert_t5x_checkpoint_to_flax.
+"""
+
+import argparse
+
+from t5x import checkpoints
+
+from transformers import AutoConfig, FlaxAutoModelForSeq2SeqLM
+
+
+def convert_t5x_checkpoint_to_flax(t5x_checkpoint_path, config_name, flax_dump_folder_path):
+ config = AutoConfig.from_pretrained(config_name)
+ flax_model = FlaxAutoModelForSeq2SeqLM.from_config(config=config)
+ t5x_model = checkpoints.load_t5x_checkpoint(t5x_checkpoint_path)
+
+ split_mlp_wi = "wi_0" in t5x_model["target"]["encoder"]["layers_0"]["mlp"]
+
+ if config.model_type == "t5":
+ encoder_attn_name = "SelfAttention"
+ if config.model_type == "longt5" and config.encoder_attention_type == "local":
+ encoder_attn_name = "LocalSelfAttention"
+ elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
+ encoder_attn_name = "TransientGlobalSelfAttention"
+ else:
+ raise ValueError(
+ "Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
+ " attribute with a value from ['local', 'transient-global]."
+ )
+
+ # Encoder
+ for layer_index in range(config.num_layers):
+ layer_name = f"layers_{str(layer_index)}"
+
+ # Self-Attention
+ t5x_attention_key = t5x_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
+ t5x_attention_out = t5x_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
+ t5x_attention_query = t5x_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
+ t5x_attention_value = t5x_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
+
+ # Global input layer norm
+ if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
+ t5x_global_layer_norm = t5x_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
+
+ # Layer Normalization
+ t5x_attention_layer_norm = t5x_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
+
+ if split_mlp_wi:
+ t5x_mlp_wi_0 = t5x_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
+ t5x_mlp_wi_1 = t5x_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
+ else:
+ t5x_mlp_wi = t5x_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
+
+ t5x_mlp_wo = t5x_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
+
+ # Layer Normalization
+ t5x_mlp_layer_norm = t5x_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
+
+ # Assigning
+ flax_model_encoder_layer_block = flax_model.params["encoder"]["block"][str(layer_index)]["layer"]
+ flax_model_encoder_layer_block["0"][encoder_attn_name]["k"]["kernel"] = t5x_attention_key
+ flax_model_encoder_layer_block["0"][encoder_attn_name]["o"]["kernel"] = t5x_attention_out
+ flax_model_encoder_layer_block["0"][encoder_attn_name]["q"]["kernel"] = t5x_attention_query
+ flax_model_encoder_layer_block["0"][encoder_attn_name]["v"]["kernel"] = t5x_attention_value
+
+ flax_model_encoder_layer_block["0"]["layer_norm"]["weight"] = t5x_attention_layer_norm
+
+ # Global input layer norm
+ if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
+ flax_model_encoder_layer_block["0"][encoder_attn_name]["global_input_layer_norm"]["weight"] = (
+ t5x_global_layer_norm
+ )
+
+ if split_mlp_wi:
+ flax_model_encoder_layer_block["1"]["DenseReluDense"]["wi_0"]["kernel"] = t5x_mlp_wi_0
+ flax_model_encoder_layer_block["1"]["DenseReluDense"]["wi_1"]["kernel"] = t5x_mlp_wi_1
+ else:
+ flax_model_encoder_layer_block["1"]["DenseReluDense"]["wi"]["kernel"] = t5x_mlp_wi
+
+ flax_model_encoder_layer_block["1"]["DenseReluDense"]["wo"]["kernel"] = t5x_mlp_wo
+ flax_model_encoder_layer_block["1"]["layer_norm"]["weight"] = t5x_mlp_layer_norm
+
+ flax_model.params["encoder"]["block"][str(layer_index)]["layer"] = flax_model_encoder_layer_block
+
+ # Only for layer 0:
+ t5x_encoder_rel_embedding = t5x_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
+ flax_model.params["encoder"]["block"]["0"]["layer"]["0"][encoder_attn_name]["relative_attention_bias"][
+ "embedding"
+ ] = t5x_encoder_rel_embedding
+
+ # Side/global relative position_bias + layer norm
+ if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
+ t5x_encoder_global_rel_embedding = t5x_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
+ flax_model.params["encoder"]["block"]["0"]["layer"]["0"][encoder_attn_name]["global_relative_attention_bias"][
+ "embedding"
+ ] = t5x_encoder_global_rel_embedding
+
+ # Assigning
+ t5x_encoder_norm = t5x_model["target"]["encoder"]["encoder_norm"]["scale"]
+ flax_model.params["encoder"]["final_layer_norm"]["weight"] = t5x_encoder_norm
+
+ # Decoder
+ for layer_index in range(config.num_layers):
+ layer_name = f"layers_{str(layer_index)}"
+
+ # Self-Attention
+ t5x_attention_key = t5x_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
+ t5x_attention_out = t5x_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
+ t5x_attention_query = t5x_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
+ t5x_attention_value = t5x_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
+
+ # Layer Normalization
+ t5x_pre_attention_layer_norm = t5x_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
+ "scale"
+ ]
+
+ # Encoder-Decoder-Attention
+ t5x_enc_dec_attention_module = t5x_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
+ t5x_enc_dec_attention_key = t5x_enc_dec_attention_module["key"]["kernel"]
+ t5x_enc_dec_attention_out = t5x_enc_dec_attention_module["out"]["kernel"]
+ t5x_enc_dec_attention_query = t5x_enc_dec_attention_module["query"]["kernel"]
+ t5x_enc_dec_attention_value = t5x_enc_dec_attention_module["value"]["kernel"]
+
+ # Layer Normalization
+ t5x_cross_layer_norm = t5x_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
+
+ # MLP
+ if split_mlp_wi:
+ t5x_mlp_wi_0 = t5x_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
+ t5x_mlp_wi_1 = t5x_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
+ else:
+ t5x_mlp_wi = t5x_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
+
+ t5x_mlp_wo = t5x_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
+
+ # Layer Normalization
+ tx5_mlp_layer_norm = t5x_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
+
+ # Assigning
+ flax_model_decoder_layer_block = flax_model.params["decoder"]["block"][str(layer_index)]["layer"]
+ flax_model_decoder_layer_block["0"]["SelfAttention"]["k"]["kernel"] = t5x_attention_key
+ flax_model_decoder_layer_block["0"]["SelfAttention"]["o"]["kernel"] = t5x_attention_out
+ flax_model_decoder_layer_block["0"]["SelfAttention"]["q"]["kernel"] = t5x_attention_query
+ flax_model_decoder_layer_block["0"]["SelfAttention"]["v"]["kernel"] = t5x_attention_value
+
+ flax_model_decoder_layer_block["0"]["layer_norm"]["weight"] = t5x_pre_attention_layer_norm
+
+ flax_model_decoder_layer_block["1"]["EncDecAttention"]["k"]["kernel"] = t5x_enc_dec_attention_key
+ flax_model_decoder_layer_block["1"]["EncDecAttention"]["o"]["kernel"] = t5x_enc_dec_attention_out
+ flax_model_decoder_layer_block["1"]["EncDecAttention"]["q"]["kernel"] = t5x_enc_dec_attention_query
+ flax_model_decoder_layer_block["1"]["EncDecAttention"]["v"]["kernel"] = t5x_enc_dec_attention_value
+
+ flax_model_decoder_layer_block["1"]["layer_norm"]["weight"] = t5x_cross_layer_norm
+
+ if split_mlp_wi:
+ flax_model_decoder_layer_block["2"]["DenseReluDense"]["wi_0"]["kernel"] = t5x_mlp_wi_0
+ flax_model_decoder_layer_block["2"]["DenseReluDense"]["wi_1"]["kernel"] = t5x_mlp_wi_1
+ else:
+ flax_model_decoder_layer_block["2"]["DenseReluDense"]["wi"]["kernel"] = t5x_mlp_wi
+
+ flax_model_decoder_layer_block["2"]["DenseReluDense"]["wo"]["kernel"] = t5x_mlp_wo
+
+ flax_model_decoder_layer_block["2"]["layer_norm"]["weight"] = tx5_mlp_layer_norm
+
+ flax_model.params["decoder"]["block"][str(layer_index)]["layer"] = flax_model_decoder_layer_block
+
+ # Decoder Normalization
+ tx5_decoder_norm = t5x_model["target"]["decoder"]["decoder_norm"]["scale"]
+ flax_model.params["decoder"]["final_layer_norm"]["weight"] = tx5_decoder_norm
+
+ # Only for layer 0:
+ t5x_decoder_rel_embedding = t5x_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
+ flax_model.params["decoder"]["block"]["0"]["layer"]["0"]["SelfAttention"]["relative_attention_bias"][
+ "embedding"
+ ] = t5x_decoder_rel_embedding
+
+ # Token Embeddings
+ tx5_token_embeddings = t5x_model["target"]["token_embedder"]["embedding"]
+ flax_model.params["shared"]["embedding"] = tx5_token_embeddings
+
+ # LM Head (only in v1.1 and LongT5 checkpoints)
+ if "logits_dense" in t5x_model["target"]["decoder"]:
+ flax_model.params["lm_head"]["kernel"] = t5x_model["target"]["decoder"]["logits_dense"]["kernel"]
+
+ flax_model.save_pretrained(flax_dump_folder_path)
+ print("T5X Model was sucessfully converted!")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint."
+ )
+ parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.")
+ parser.add_argument(
+ "--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model."
+ )
+ args = parser.parse_args()
+ convert_t5x_checkpoint_to_flax(args.t5x_checkpoint_path, args.config_name, args.flax_dump_folder_path)
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/longt5/modeling_flax_longt5.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/longt5/modeling_flax_longt5.py
new file mode 100644
index 0000000000000000000000000000000000000000..55081978dbf65d03c1264fc34dee948cf5a109fc
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/longt5/modeling_flax_longt5.py
@@ -0,0 +1,2449 @@
+# coding=utf-8
+# Copyright 2022 LongT5 Authors and HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Flax LongT5 model."""
+
+import copy
+from typing import Any, Callable, List, Optional, Tuple
+
+import flax.linen as nn
+import jax
+import jax.numpy as jnp
+import numpy as np
+from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
+from flax.linen import combine_masks, make_causal_mask
+from flax.linen import partitioning as nn_partitioning
+from flax.linen.attention import dot_product_attention_weights
+from flax.traverse_util import flatten_dict, unflatten_dict
+from jax.random import PRNGKey
+
+from ...modeling_flax_outputs import (
+ FlaxBaseModelOutput,
+ FlaxBaseModelOutputWithPastAndCrossAttentions,
+ FlaxCausalLMOutputWithCrossAttentions,
+ FlaxSeq2SeqLMOutput,
+ FlaxSeq2SeqModelOutput,
+)
+from ...modeling_flax_utils import (
+ ACT2FN,
+ FlaxPreTrainedModel,
+ append_call_sample_docstring,
+ append_replace_return_docstrings,
+ overwrite_call_docstring,
+)
+from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
+from .configuration_longt5 import LongT5Config
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "google/long-t5-local-base"
+_CONFIG_FOR_DOC = "LongT5Config"
+
+remat = nn_partitioning.remat
+
+
+# Copied from transformers.models.bart.modeling_flax_bart.shift_tokens_right
+def shift_tokens_right(input_ids: jnp.ndarray, pad_token_id: int, decoder_start_token_id: int) -> jnp.ndarray:
+ """
+ Shift input ids one token to the right.
+ """
+ shifted_input_ids = jnp.zeros_like(input_ids)
+ shifted_input_ids = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1])
+ shifted_input_ids = shifted_input_ids.at[:, 0].set(decoder_start_token_id)
+
+ shifted_input_ids = jnp.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids)
+ return shifted_input_ids
+
+
+def _pad_to_multiple(x: jnp.ndarray, block_len: int, axis: int, pad_value: int = 0) -> jnp.ndarray:
+ """Pad an array so that a sequence length will be a multiple of `block_len`"""
+ pad_len = -x.shape[axis] % block_len
+ pad = [(0, 0)] * x.ndim
+ pad[axis] = (0, pad_len)
+ x = jnp.pad(x, pad_width=pad, mode="constant", constant_values=pad_value)
+ return x
+
+
+def _split_into_blocks(x: jnp.ndarray, block_len: int, axis: int) -> jnp.ndarray:
+ """Split an input array into blocks of a given `block_len` along the given `axis`. If the dimension length
+ is not a multiple of `block_len`, it will be padded first with selected `pad_value`.
+ """
+ # pad tensor to multiple of block_len
+ if x.shape[axis] % block_len != 0:
+ x = _pad_to_multiple(x, block_len, axis, pad_value=0)
+ num_blocks = x.shape[axis] // block_len
+ output_shape = x.shape[:axis] + (num_blocks, block_len) + x.shape[(axis + 1) :]
+ return x.reshape(output_shape)
+
+
+def _concatenate_3_blocks(x: jnp.ndarray, block_axis: int, sequence_axis: int, pad_value: int = 0) -> jnp.ndarray:
+ """Concatenate three consecutive blocks for each input block for local attentiont.
+ For more information, see: https://arxiv.org/pdf/2112.07916.pdf.
+ """
+ num_blocks = x.shape[block_axis]
+
+ pad = [(0, 0)] * x.ndim
+ pad[block_axis] = (1, 1)
+ # [batch_size, num_blocks, block_len] -> [batch_size, num_blocks + 2, block_len]
+ x = jnp.pad(x, pad_width=pad, mode="constant", constant_values=pad_value)
+
+ blocks_list: List[np.array] = []
+ for i in range(3):
+ # We use indexing approach here:
+ # https://numpy.org/doc/stable/user/basics.indexing.html#dealing-with-variable-numbers-of-indices-within-programs
+ indices = [slice(0, None)] * x.ndim
+ indices[block_axis] = slice(i, i + num_blocks)
+ indices = tuple(indices)
+ blocks_list.append(x[indices])
+ return jnp.concatenate(blocks_list, axis=sequence_axis) # [batch_size, num_blocks, 3 * block_len, ...]
+
+
+def _make_3block_relative_position_ids(block_len: int) -> jnp.ndarray:
+ """Makes 3-blocked relative position ids for local attention."""
+ position_ids = jnp.arange(3 * block_len, dtype=jnp.int32)
+ center_position_ids = position_ids[block_len:-block_len]
+ relative_position_ids = position_ids[None, :] - center_position_ids[:, None] # [block_len, 3 * block_len]
+ return relative_position_ids
+
+
+def _mask_local_attention_mask(local_attention_mask: np.ndarray, block_len: int) -> jnp.ndarray:
+ """Mask local attention mask to enforce that tokens are not allowed to attend tokens farther than ``local_radius."""
+ relative_position_ids = _make_3block_relative_position_ids(block_len)
+ locality_mask = jnp.abs(relative_position_ids) < block_len
+ locality_mask = locality_mask[None, None, :, :]
+ return jnp.logical_and(local_attention_mask, locality_mask)
+
+
+def _get_local_attention_mask(attention_mask: np.ndarray, block_len: int) -> jnp.ndarray:
+ """Prepare attention mask to be applied for a local attention."""
+ # [batch_size, num_blocks, block_len]
+ _blocked_attention_mask = _split_into_blocks(attention_mask, block_len, axis=1)
+ # [batch_size, num_block, 3 * block_len]
+ _3blocked_attention_mask = _concatenate_3_blocks(_blocked_attention_mask, block_axis=1, sequence_axis=2)
+
+ _blocked_attention_mask = _blocked_attention_mask[..., None]
+ _3blocked_attention_mask = _3blocked_attention_mask[..., None, :]
+ # [batch_size, num_block, block_len, 3 * block_len]
+ local_attention_mask = jnp.logical_and(_blocked_attention_mask, _3blocked_attention_mask)
+ local_attention_mask = _mask_local_attention_mask(local_attention_mask, block_len)
+ # [batch_size, 1, num_block, block_len, 3 * block_len]
+ return local_attention_mask[:, None, ...]
+
+
+def _make_global_fixed_block_ids(attention_mask: np.ndarray, global_block_size: int) -> Tuple[jnp.ndarray, np.ndarray]:
+ """Obtain the "fixed block" global id corresponding to each input token.
+
+ This implementation is a simlified version of the original Flaxformr implementation adopted from:
+ https://github.com/google/flaxformer/blob/main/flaxformer/architectures/longt5/long_attention.py.
+
+ In our scenario, as we use this strategy only for a decoder, orphan tokens, i.e. those tokens which do not make for
+ the whole fixed block, are assigned to the preceding block.
+
+ Padding tokens from the original sequence are represented by -1.
+ """
+ batch_size, seq_len = attention_mask.shape[:2]
+
+ def handle_orphan_tokens(block_ids: np.ndarray) -> jnp.ndarray:
+ block_ends = (jnp.arange(seq_len) % global_block_size) == global_block_size - 1
+ true_block_ends = jnp.logical_and(block_ends, block_ids >= 0)
+ full_blocks = true_block_ends.sum(-1)[..., None]
+ block_ids = jnp.minimum(block_ids, full_blocks - 1)
+ return block_ids
+
+ fixed_block_mask = jnp.ones_like(attention_mask) / global_block_size
+ fixed_block_mask = jnp.cumsum(fixed_block_mask, axis=1) - fixed_block_mask
+ mask = jnp.where(attention_mask != 0.0, 1.0, -1000.0)
+ global_block_ids = jnp.maximum(
+ jnp.floor(mask + fixed_block_mask - 1.0), jnp.array(-1.0, dtype=attention_mask.dtype)
+ )
+ # set padding tokens to -1
+ global_block_ids = (global_block_ids * attention_mask) + (attention_mask - 1)
+ # [batch_size, seq_len]
+ global_block_ids = handle_orphan_tokens(global_block_ids)
+ num_globals = seq_len // global_block_size
+
+ # [batch_size, seq_len // global_block_size]
+ if num_globals > 0:
+ _sequence_block_ids_max = jnp.repeat(global_block_ids.max(axis=-1)[:, None], repeats=num_globals, axis=1)
+ else:
+ _sequence_block_ids_max = jnp.zeros((batch_size, 0), dtype=global_block_ids.dtype)
+ global_segment_ids = jnp.cumsum(jnp.ones((batch_size, num_globals)), axis=-1) - 1
+ global_segment_ids = jnp.where(global_segment_ids <= _sequence_block_ids_max, 1, 0)
+ return global_block_ids, global_segment_ids
+
+
+def _make_side_relative_position_ids(attention_mask: np.ndarray, global_block_size: int) -> np.ndarray:
+ """Create the relative position tensor for local -> global attention."""
+ block_ids, global_segment_ids = _make_global_fixed_block_ids(attention_mask, global_block_size)
+ global_seq_len = global_segment_ids.shape[-1]
+ global_positions = jnp.arange(global_seq_len)
+ side_relative_position = global_positions - block_ids[..., None]
+ return side_relative_position
+
+
+def _create_global_aggregates(hidden_states: np.ndarray, block_ids: np.ndarray, global_seq_len: int) -> np.ndarray:
+ """Compute individual block aggregates by summing over individual blocks."""
+ # (batch..., seq_len, global_seq_len))
+ one_hot_block_ids = jax.nn.one_hot(block_ids, global_seq_len)
+ return jnp.einsum("...nd,...ng->...gd", hidden_states, one_hot_block_ids)
+
+
+# Copied from transformers.models.t5.modeling_flax_t5.FlaxT5LayerNorm with T5->LongT5
+class FlaxLongT5LayerNorm(nn.Module):
+ hidden_size: int
+ dtype: jnp.dtype = jnp.float32
+ eps: float = 1e-6
+ weight_init: Callable[..., np.ndarray] = jax.nn.initializers.ones
+
+ def setup(self):
+ self.weight = self.param("weight", self.weight_init, (self.hidden_size,))
+
+ def __call__(self, hidden_states):
+ """
+ Construct a layernorm module in the LongT5 style; No bias and no subtraction of mean.
+ """
+ # layer norm should always be calculated in float32
+ variance = jnp.power(hidden_states.astype("f4"), 2).mean(axis=-1, keepdims=True)
+ hidden_states = hidden_states / jnp.sqrt(variance + self.eps)
+
+ return self.weight * hidden_states
+
+
+# Copied from transformers.models.t5.modeling_flax_t5.FlaxT5DenseActDense with T5->LongT5
+class FlaxLongT5DenseActDense(nn.Module):
+ config: LongT5Config
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ wi_init_std = self.config.initializer_factor * (self.config.d_model**-0.5)
+ wo_init_std = self.config.initializer_factor * (self.config.d_ff**-0.5)
+
+ self.wi = nn.Dense(
+ self.config.d_ff,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(wi_init_std),
+ dtype=self.dtype,
+ )
+ self.wo = nn.Dense(
+ self.config.d_model,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(wo_init_std),
+ dtype=self.dtype,
+ )
+ self.dropout = nn.Dropout(self.config.dropout_rate)
+ self.act = ACT2FN[self.config.dense_act_fn]
+
+ def __call__(self, hidden_states, deterministic=True):
+ hidden_states = self.wi(hidden_states)
+ hidden_states = self.act(hidden_states)
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
+ hidden_states = self.wo(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.t5.modeling_flax_t5.FlaxT5DenseGatedActDense with T5->LongT5
+class FlaxLongT5DenseGatedActDense(nn.Module):
+ config: LongT5Config
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ wi_init_std = self.config.initializer_factor * (self.config.d_model**-0.5)
+ wo_init_std = self.config.initializer_factor * (self.config.d_ff**-0.5)
+
+ self.wi_0 = nn.Dense(
+ self.config.d_ff,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(wi_init_std),
+ dtype=self.dtype,
+ )
+ self.wi_1 = nn.Dense(
+ self.config.d_ff,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(wi_init_std),
+ dtype=self.dtype,
+ )
+ self.wo = nn.Dense(
+ self.config.d_model,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(wo_init_std),
+ dtype=self.dtype,
+ )
+ self.dropout = nn.Dropout(self.config.dropout_rate)
+ self.act = ACT2FN[self.config.dense_act_fn]
+
+ def __call__(self, hidden_states, deterministic):
+ hidden_gelu = self.act(self.wi_0(hidden_states))
+ hidden_linear = self.wi_1(hidden_states)
+ hidden_states = hidden_gelu * hidden_linear
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
+ hidden_states = self.wo(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.t5.modeling_flax_t5.FlaxT5LayerFF with T5->LongT5
+class FlaxLongT5LayerFF(nn.Module):
+ config: LongT5Config
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ if self.config.is_gated_act:
+ self.DenseReluDense = FlaxLongT5DenseGatedActDense(self.config, dtype=self.dtype)
+ else:
+ self.DenseReluDense = FlaxLongT5DenseActDense(self.config, dtype=self.dtype)
+
+ self.layer_norm = FlaxLongT5LayerNorm(
+ self.config.d_model, eps=self.config.layer_norm_epsilon, dtype=self.dtype
+ )
+ self.dropout = nn.Dropout(self.config.dropout_rate)
+
+ def __call__(self, hidden_states, deterministic=True):
+ forwarded_states = self.layer_norm(hidden_states)
+ forwarded_states = self.DenseReluDense(forwarded_states, deterministic=deterministic)
+ hidden_states = hidden_states + self.dropout(forwarded_states, deterministic=deterministic)
+ return hidden_states
+
+
+# Copied from transformers.models.t5.modeling_flax_t5.FlaxT5Attention with T5->LongT5
+class FlaxLongT5Attention(nn.Module):
+ config: LongT5Config
+ has_relative_attention_bias: bool = False
+ causal: bool = False
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.relative_attention_num_buckets = self.config.relative_attention_num_buckets
+ self.relative_attention_max_distance = self.config.relative_attention_max_distance
+ self.d_model = self.config.d_model
+ self.key_value_proj_dim = self.config.d_kv
+ self.n_heads = self.config.num_heads
+ self.dropout = self.config.dropout_rate
+ self.inner_dim = self.n_heads * self.key_value_proj_dim
+
+ q_init_std = self.config.initializer_factor * ((self.inner_dim * self.key_value_proj_dim) ** -0.5)
+ kv_init_std = self.config.initializer_factor * (self.inner_dim**-0.5)
+ o_init_std = self.config.initializer_factor * (self.inner_dim**-0.5)
+
+ self.q = nn.Dense(
+ self.inner_dim,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(q_init_std),
+ dtype=self.dtype,
+ )
+ self.k = nn.Dense(
+ self.inner_dim,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(kv_init_std),
+ dtype=self.dtype,
+ )
+ self.v = nn.Dense(
+ self.inner_dim,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(kv_init_std),
+ dtype=self.dtype,
+ )
+ self.o = nn.Dense(
+ self.d_model,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(o_init_std),
+ dtype=self.dtype,
+ )
+
+ if self.has_relative_attention_bias:
+ self.relative_attention_bias = nn.Embed(
+ self.relative_attention_num_buckets,
+ self.n_heads,
+ embedding_init=jax.nn.initializers.normal(kv_init_std),
+ dtype=self.dtype,
+ )
+
+ @staticmethod
+ def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
+ """
+ Adapted from Mesh Tensorflow:
+ https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
+
+ Translate relative position to a bucket number for relative attention. The relative position is defined as
+ memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
+ position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
+ small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
+ positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
+ This should allow for more graceful generalization to longer sequences than the model has been trained on
+ """
+ relative_buckets = 0
+ if bidirectional:
+ num_buckets //= 2
+ relative_buckets += (relative_position > 0) * num_buckets
+ relative_position = jnp.abs(relative_position)
+ else:
+ relative_position = -jnp.clip(relative_position, a_max=0)
+ # now relative_position is in the range [0, inf)
+
+ # half of the buckets are for exact increments in positions
+ max_exact = num_buckets // 2
+ is_small = relative_position < max_exact
+
+ # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
+ relative_position_if_large = max_exact + (
+ jnp.log(relative_position / max_exact) / jnp.log(max_distance / max_exact) * (num_buckets - max_exact)
+ )
+ relative_position_if_large = jnp.clip(relative_position_if_large, a_max=num_buckets - 1)
+
+ relative_buckets += jnp.where(is_small, relative_position, relative_position_if_large)
+
+ return relative_buckets.astype("i4")
+
+ def compute_bias(self, query_length, key_length):
+ """Compute binned relative position bias"""
+ context_position = jnp.arange(query_length, dtype="i4")[:, None]
+ memory_position = jnp.arange(key_length, dtype="i4")[None, :]
+
+ relative_position = memory_position - context_position
+ relative_position_bucket = self._relative_position_bucket(
+ relative_position,
+ bidirectional=(not self.causal),
+ num_buckets=self.relative_attention_num_buckets,
+ max_distance=self.relative_attention_max_distance,
+ )
+
+ values = self.relative_attention_bias(relative_position_bucket)
+ values = values.transpose((2, 0, 1))[None, :, :, :]
+ return values
+
+ def _split_heads(self, hidden_states):
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.n_heads, self.key_value_proj_dim))
+
+ def _merge_heads(self, hidden_states):
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.inner_dim,))
+
+ @nn.compact
+ def _concatenate_to_cache(self, key, value, query, attention_mask):
+ """
+ This function takes projected key, value states from a single input token and concatenates the states to cached
+ states from previous steps. This function is slighly adapted from the official Flax repository:
+ https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
+ """
+ # detect if we're initializing by absence of existing cache data.
+ is_initialized = self.has_variable("cache", "cached_key")
+ cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
+ cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
+ cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
+
+ if is_initialized:
+ *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
+ # update key, value caches with our new 1d spatial slices
+ cur_index = cache_index.value
+ indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
+ key = jax.lax.dynamic_update_slice(cached_key.value, key, indices)
+ value = jax.lax.dynamic_update_slice(cached_value.value, value, indices)
+ cached_key.value = key
+ cached_value.value = value
+ num_updated_cache_vectors = query.shape[1]
+ cache_index.value = cache_index.value + num_updated_cache_vectors
+ # causal mask for cached decoder self-attention: our single query position should only attend to those key positions
+ # that have already been generated and cached, not the remaining zero elements.
+ pad_mask = jnp.broadcast_to(
+ jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
+ tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
+ )
+ attention_mask = combine_masks(pad_mask, attention_mask)
+ return key, value, attention_mask
+
+ def _create_position_bias(
+ self, key_states, query_states, attention_mask, init_cache, seq_length, causal_attention_mask_shift
+ ):
+ cache_is_filled = self.causal and self.has_variable("cache", "cached_key") and (not init_cache)
+ key_length = key_states.shape[1]
+ query_length = key_length if cache_is_filled else query_states.shape[1]
+
+ if self.has_relative_attention_bias:
+ position_bias = self.compute_bias(query_length, key_length)
+ elif attention_mask is not None:
+ position_bias = jnp.zeros_like(attention_mask)
+ else:
+ position_bias = jnp.zeros((1, self.n_heads, query_length, key_length), dtype=self.dtype)
+
+ # if key and values are already calculated, only the last query position bias should be taken
+ if cache_is_filled:
+ max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
+ position_bias = jax.lax.dynamic_slice(
+ position_bias,
+ (0, 0, causal_attention_mask_shift, 0),
+ (1, self.n_heads, seq_length, max_decoder_length),
+ )
+ return position_bias
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask=None,
+ key_value_states=None,
+ position_bias=None,
+ use_cache=False,
+ output_attentions=False,
+ deterministic=True,
+ init_cache=False,
+ ):
+ """
+ Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states).
+ """
+ batch_size, seq_length = hidden_states.shape[:2]
+
+ # q, k, v projections
+ query_states = self.q(hidden_states) # (batch_size, n_heads, seq_length, dim_per_head)
+ key_states = self.k(hidden_states) if key_value_states is None else self.k(key_value_states)
+ value_states = self.v(hidden_states) if key_value_states is None else self.v(key_value_states)
+
+ # reshape to (batch_size, seq_length, n_heads, head_dim)
+ query_states = self._split_heads(query_states)
+ key_states = self._split_heads(key_states)
+ value_states = self._split_heads(value_states)
+
+ # counter-act scaling in dot_product_attention_weights function
+ query_states *= jnp.sqrt(query_states.shape[-1])
+
+ # for fast decoding causal attention mask should be shifted
+ causal_attention_mask_shift = (
+ self.variables["cache"]["cache_index"] if (self.has_variable("cache", "cached_key") and self.causal) else 0
+ )
+ # create causal attention_mask; attention_mask has to be defined when model is causal
+ if self.causal:
+ causal_attention_mask = make_causal_mask(attention_mask, dtype="bool")
+
+ # fast decoding for generate requires special attention_mask
+ if self.has_variable("cache", "cached_key"):
+ max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
+ causal_attention_mask = jax.lax.dynamic_slice(
+ causal_attention_mask,
+ (0, 0, causal_attention_mask_shift, 0),
+ (1, 1, seq_length, max_decoder_length),
+ )
+
+ # broadcast causal attention mask & attention mask to fit for merge
+ causal_attention_mask = jnp.broadcast_to(
+ causal_attention_mask, (batch_size,) + causal_attention_mask.shape[1:]
+ )
+ attention_mask = jnp.broadcast_to(
+ jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_attention_mask.shape
+ )
+ attention_mask = combine_masks(attention_mask, causal_attention_mask)
+ elif attention_mask is not None:
+ attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
+
+ # During fast autoregressive decoding, we feed one position at a time,
+ # and cache the keys and values step by step.
+ if self.causal and (self.has_variable("cache", "cached_key") or init_cache):
+ key_states, value_states, attention_mask = self._concatenate_to_cache(
+ key_states, value_states, query_states, attention_mask
+ )
+
+ # replace masked positions with -10_000
+ if attention_mask is not None:
+ mask_value = jnp.finfo(self.dtype).min
+ attention_mask = jax.lax.select(
+ attention_mask > 0,
+ jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
+ jnp.full(attention_mask.shape, mask_value).astype(self.dtype),
+ )
+
+ if position_bias is None:
+ # compute position bias (only for first layer)
+ position_bias = self._create_position_bias(
+ key_states, query_states, attention_mask, init_cache, seq_length, causal_attention_mask_shift
+ )
+
+ if attention_mask is not None:
+ position_bias = position_bias + attention_mask
+
+ # create dropout rng
+ dropout_rng = None
+ if not deterministic and self.dropout > 0.0:
+ dropout_rng = self.make_rng("dropout")
+
+ # Softmax(QK^T)
+ attn_weights = dot_product_attention_weights(
+ query_states,
+ key_states,
+ bias=position_bias,
+ dropout_rng=dropout_rng,
+ dropout_rate=self.dropout,
+ broadcast_dropout=True,
+ deterministic=deterministic,
+ dtype=self.dtype,
+ )
+
+ # multiply with value states
+ attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
+
+ # bring back to (batch_size, seq_length, d_model)
+ attn_output = self._merge_heads(attn_output)
+
+ # apply output matrix
+ attn_output = self.o(attn_output)
+
+ outputs = (attn_output, position_bias)
+
+ if output_attentions:
+ outputs = outputs + (attn_weights,)
+
+ return outputs
+
+
+class FlaxLongT5LocalAttention(nn.Module):
+ config: LongT5Config
+ has_relative_attention_bias: bool = False
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.relative_attention_num_buckets = self.config.relative_attention_num_buckets
+ self.relative_attention_max_distance = self.config.relative_attention_max_distance
+ self.d_model = self.config.d_model
+ self.key_value_proj_dim = self.config.d_kv
+ self.n_heads = self.config.num_heads
+ self.local_radius = self.config.local_radius
+ self.block_len = self.local_radius + 1
+ self.dropout = self.config.dropout_rate
+ self.inner_dim = self.n_heads * self.key_value_proj_dim
+
+ q_init_std = self.config.initializer_factor * ((self.inner_dim * self.key_value_proj_dim) ** -0.5)
+ kv_init_std = self.config.initializer_factor * (self.inner_dim**-0.5)
+ o_init_std = self.config.initializer_factor * (self.inner_dim**-0.5)
+
+ self.q = nn.Dense(
+ self.inner_dim,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(q_init_std),
+ dtype=self.dtype,
+ )
+ self.k = nn.Dense(
+ self.inner_dim,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(kv_init_std),
+ dtype=self.dtype,
+ )
+ self.v = nn.Dense(
+ self.inner_dim,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(kv_init_std),
+ dtype=self.dtype,
+ )
+ self.o = nn.Dense(
+ self.d_model,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(o_init_std),
+ dtype=self.dtype,
+ )
+
+ if self.has_relative_attention_bias:
+ self.relative_attention_bias = nn.Embed(
+ self.relative_attention_num_buckets,
+ self.n_heads,
+ embedding_init=jax.nn.initializers.normal(kv_init_std),
+ )
+
+ @staticmethod
+ # Copied from transformers.models.t5.modeling_flax_t5.FlaxT5Attention._relative_position_bucket
+ def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
+ """
+ Adapted from Mesh Tensorflow:
+ https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
+
+ Translate relative position to a bucket number for relative attention. The relative position is defined as
+ memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
+ position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
+ small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
+ positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
+ This should allow for more graceful generalization to longer sequences than the model has been trained on
+ """
+ relative_buckets = 0
+ if bidirectional:
+ num_buckets //= 2
+ relative_buckets += (relative_position > 0) * num_buckets
+ relative_position = jnp.abs(relative_position)
+ else:
+ relative_position = -jnp.clip(relative_position, a_max=0)
+ # now relative_position is in the range [0, inf)
+
+ # half of the buckets are for exact increments in positions
+ max_exact = num_buckets // 2
+ is_small = relative_position < max_exact
+
+ # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
+ relative_position_if_large = max_exact + (
+ jnp.log(relative_position / max_exact) / jnp.log(max_distance / max_exact) * (num_buckets - max_exact)
+ )
+ relative_position_if_large = jnp.clip(relative_position_if_large, a_max=num_buckets - 1)
+
+ relative_buckets += jnp.where(is_small, relative_position, relative_position_if_large)
+
+ return relative_buckets.astype("i4")
+
+ def compute_bias(self, block_length: int):
+ """Compute binned relative position bias"""
+ memory_position = jnp.arange(3 * block_length, dtype="i4")
+ context_position = memory_position[block_length:-block_length]
+
+ relative_position = memory_position[None, :] - context_position[:, None]
+ relative_position_bucket = self._relative_position_bucket(
+ relative_position,
+ bidirectional=True,
+ num_buckets=self.relative_attention_num_buckets,
+ max_distance=self.relative_attention_max_distance,
+ )
+
+ values = self.relative_attention_bias(relative_position_bucket)
+ values = values.transpose((2, 0, 1))[None, None, :, :, :]
+ return values
+
+ def _split_heads(self, hidden_states):
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.n_heads, self.key_value_proj_dim))
+
+ def _merge_heads(self, hidden_states):
+ return hidden_states.reshape(hidden_states.shape[0], -1, self.inner_dim)
+
+ def _create_position_bias(self, block_len: int, attention_mask: Optional[np.ndarray]) -> np.ndarray:
+ # position_bias shape: # (1, 1, n_heads, block_len, 3 * block_len)
+ if self.has_relative_attention_bias:
+ position_bias = self.compute_bias(block_len)
+ elif attention_mask is not None:
+ position_bias = jnp.zeros_like(attention_mask)
+ else:
+ position_bias = jnp.zeros((1, 1, self.n_heads, block_len, 3 * block_len), dtype=self.dtype)
+
+ return position_bias
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask=None,
+ key_value_states=None,
+ position_bias=None,
+ output_attentions=False,
+ deterministic=True,
+ ):
+ """
+ Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states).
+ """
+ batch_size, seq_length = hidden_states.shape[:2]
+
+ # q, k, v projections
+ query_states = self.q(hidden_states) # (batch_size, n_heads, seq_length, dim_per_head)
+ key_states = self.k(hidden_states) if key_value_states is None else self.k(key_value_states)
+ value_states = self.v(hidden_states) if key_value_states is None else self.v(key_value_states)
+
+ # reshape to (batch_size, seq_length, n_heads, head_dim)
+ query_states = self._split_heads(query_states)
+ key_states = self._split_heads(key_states)
+ value_states = self._split_heads(value_states)
+
+ # Split into blocks -> (batch_size, num_blocks, block_len, n_heads, head_dim)
+ query_states = _split_into_blocks(query_states, self.block_len, axis=1)
+ key_states = _split_into_blocks(key_states, self.block_len, axis=1)
+ value_states = _split_into_blocks(value_states, self.block_len, axis=1)
+
+ # Concatenate 3 blocks for keys and values -> (batch_size, num_blocks, 3 * block_len, n_heads, dim_per_head)
+ key_states = _concatenate_3_blocks(key_states, block_axis=1, sequence_axis=2)
+ value_states = _concatenate_3_blocks(value_states, block_axis=1, sequence_axis=2)
+
+ # counter-act scaling in dot_product_attention_weights function
+ query_states *= jnp.sqrt(query_states.shape[-1])
+
+ if attention_mask is not None:
+ attention_mask = _get_local_attention_mask(attention_mask, self.block_len)
+
+ # replace masked positions with -10_000
+ attention_mask = jax.lax.select(
+ attention_mask > 0,
+ jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
+ jnp.full(attention_mask.shape, -1e10).astype(self.dtype),
+ )
+
+ if position_bias is None:
+ # compute position bias (only for first layer)
+ position_bias = self._create_position_bias(self.block_len, attention_mask)
+
+ if attention_mask is not None:
+ position_bias = position_bias + attention_mask.swapaxes(1, 2)
+
+ # create dropout rng
+ dropout_rng = None
+ if not deterministic and self.dropout > 0.0:
+ dropout_rng = self.make_rng("dropout")
+
+ # Softmax(QK^T)
+ attn_weights = dot_product_attention_weights(
+ query_states,
+ key_states,
+ bias=position_bias,
+ dropout_rng=dropout_rng,
+ dropout_rate=self.dropout,
+ broadcast_dropout=True,
+ deterministic=deterministic,
+ dtype=self.dtype,
+ )
+
+ # multiply with value states
+ attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
+
+ # bring back to (batch_size, seq_length, d_model)
+ attn_output = self._merge_heads(attn_output)
+ attn_output = attn_output[:, :seq_length, :]
+
+ # apply output matrix
+ attn_output = self.o(attn_output)
+
+ outputs = (attn_output, position_bias)
+
+ if output_attentions:
+ outputs = outputs + (attn_weights,)
+
+ return outputs
+
+
+class FlaxLongT5TransientGlobalAttention(nn.Module):
+ config: LongT5Config
+ has_relative_attention_bias: bool = False
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.relative_attention_num_buckets = self.config.relative_attention_num_buckets
+ self.relative_attention_max_distance = self.config.relative_attention_max_distance
+ self.d_model = self.config.d_model
+ self.key_value_proj_dim = self.config.d_kv
+ self.n_heads = self.config.num_heads
+ self.local_radius = self.config.local_radius
+ self.block_len = self.local_radius + 1
+ self.global_block_size = self.config.global_block_size
+ self.dropout = self.config.dropout_rate
+ self.inner_dim = self.n_heads * self.key_value_proj_dim
+
+ q_init_std = self.config.initializer_factor * ((self.inner_dim * self.key_value_proj_dim) ** -0.5)
+ kv_init_std = self.config.initializer_factor * (self.inner_dim**-0.5)
+ o_init_std = self.config.initializer_factor * (self.inner_dim**-0.5)
+
+ self.q = nn.Dense(
+ self.inner_dim,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(q_init_std),
+ dtype=self.dtype,
+ )
+ self.k = nn.Dense(
+ self.inner_dim,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(kv_init_std),
+ dtype=self.dtype,
+ )
+ self.v = nn.Dense(
+ self.inner_dim,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(kv_init_std),
+ dtype=self.dtype,
+ )
+ self.o = nn.Dense(
+ self.d_model,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(o_init_std),
+ dtype=self.dtype,
+ )
+
+ if self.has_relative_attention_bias:
+ self.relative_attention_bias = nn.Embed(
+ self.relative_attention_num_buckets,
+ self.n_heads,
+ embedding_init=jax.nn.initializers.normal(kv_init_std),
+ )
+
+ # Relativen attention bias & Layer norm for global attention
+ if self.has_relative_attention_bias:
+ self.global_relative_attention_bias = nn.Embed(
+ self.relative_attention_num_buckets,
+ self.n_heads,
+ embedding_init=jax.nn.initializers.normal(kv_init_std),
+ )
+ self.global_input_layer_norm = FlaxLongT5LayerNorm(
+ self.config.d_model, eps=self.config.layer_norm_epsilon, dtype=self.dtype
+ )
+
+ @staticmethod
+ # Copied from transformers.models.t5.modeling_flax_t5.FlaxT5Attention._relative_position_bucket
+ def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
+ """
+ Adapted from Mesh Tensorflow:
+ https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
+
+ Translate relative position to a bucket number for relative attention. The relative position is defined as
+ memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
+ position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
+ small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
+ positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
+ This should allow for more graceful generalization to longer sequences than the model has been trained on
+ """
+ relative_buckets = 0
+ if bidirectional:
+ num_buckets //= 2
+ relative_buckets += (relative_position > 0) * num_buckets
+ relative_position = jnp.abs(relative_position)
+ else:
+ relative_position = -jnp.clip(relative_position, a_max=0)
+ # now relative_position is in the range [0, inf)
+
+ # half of the buckets are for exact increments in positions
+ max_exact = num_buckets // 2
+ is_small = relative_position < max_exact
+
+ # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
+ relative_position_if_large = max_exact + (
+ jnp.log(relative_position / max_exact) / jnp.log(max_distance / max_exact) * (num_buckets - max_exact)
+ )
+ relative_position_if_large = jnp.clip(relative_position_if_large, a_max=num_buckets - 1)
+
+ relative_buckets += jnp.where(is_small, relative_position, relative_position_if_large)
+
+ return relative_buckets.astype("i4")
+
+ def compute_bias(self, block_length: int):
+ """Compute binned relative position bias"""
+ memory_position = jnp.arange(3 * block_length, dtype="i4")
+ context_position = memory_position[block_length:-block_length]
+
+ relative_position = memory_position[None, :] - context_position[:, None]
+ relative_position_bucket = self._relative_position_bucket(
+ relative_position,
+ bidirectional=True,
+ num_buckets=self.relative_attention_num_buckets,
+ max_distance=self.relative_attention_max_distance,
+ )
+
+ values = self.relative_attention_bias(relative_position_bucket)
+ values = values.transpose((2, 0, 1))[None, None, :, :, :]
+ return values
+
+ def compute_side_bias(self, attention_mask: np.ndarray, global_segment_ids: np.ndarray) -> np.ndarray:
+ # (batch_size, 1, 1, seq_len, global_seq_len)
+ side_attention_mask = jnp.equal(attention_mask[..., None], global_segment_ids[:, None, :])[:, None, ...]
+ attention_side_bias = jax.lax.select(
+ side_attention_mask > 0,
+ jnp.full(side_attention_mask.shape, 0.0).astype(self.dtype),
+ jnp.full(side_attention_mask.shape, -1e10).astype(self.dtype),
+ )
+ # (batch_size, seq_len, global_seq_len)
+ side_relative_position = _make_side_relative_position_ids(attention_mask, self.global_block_size)
+ side_relative_position_bucket = self._relative_position_bucket(
+ side_relative_position,
+ bidirectional=True,
+ num_buckets=self.relative_attention_num_buckets,
+ max_distance=self.relative_attention_max_distance,
+ )
+ # (batch_size, seq_len, global_seq_len, num_heads)
+ side_bias = self.global_relative_attention_bias(side_relative_position_bucket)
+
+ # (batch_size, 1, num_heads, seq_len, global_seq_len)
+ side_bias = jnp.transpose(side_bias, (0, 3, 1, 2))
+ # (batch_size, num_heads, seq_len, global_seq_len)
+ attention_side_bias = attention_side_bias + side_bias
+ return attention_side_bias
+
+ def _split_heads(self, hidden_states):
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.n_heads, self.key_value_proj_dim))
+
+ def _merge_heads(self, hidden_states):
+ return hidden_states.reshape(hidden_states.shape[0], -1, self.inner_dim)
+
+ def _create_position_bias(self, block_len: int, attention_mask: Optional[np.ndarray]) -> np.ndarray:
+ # position_bias shape: # (1, 1, n_heads, block_len, 3 * block_len)
+ if self.has_relative_attention_bias:
+ position_bias = self.compute_bias(block_len)
+ elif attention_mask is not None:
+ position_bias = jnp.zeros_like(attention_mask)
+ else:
+ position_bias = jnp.zeros((1, 1, self.n_heads, block_len, 3 * block_len), dtype=self.dtype)
+
+ return position_bias
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask=None,
+ key_value_states=None,
+ position_bias=None,
+ output_attentions=False,
+ deterministic=True,
+ ):
+ """
+ Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states).
+ """
+ batch_size, seq_length = hidden_states.shape[:2]
+
+ # Prepare components for transient-global attention
+ # Obtain block_ids and global_segment_ids
+ # global_seq_len := seq_len // self.global_block_size
+ # shapes: (batch_size, seq_len) & (batch_size, global_seq_len)
+ block_ids, global_segment_ids = _make_global_fixed_block_ids(
+ attention_mask if attention_mask is not None else jnp.ones((batch_size, seq_length)),
+ self.global_block_size,
+ )
+ # Create global inputs
+ _global_seq_len = global_segment_ids.shape[-1]
+ global_inputs = _create_global_aggregates(hidden_states, block_ids, _global_seq_len)
+ global_inputs = self.global_input_layer_norm(global_inputs)
+
+ # q, k, v projections
+ query_states = self.q(hidden_states) # (batch_size, n_heads, seq_length, dim_per_head)
+ key_states = self.k(hidden_states) if key_value_states is None else self.k(key_value_states)
+ value_states = self.v(hidden_states) if key_value_states is None else self.v(key_value_states)
+
+ # reshape to (batch_size, seq_length, n_heads, head_dim)
+ query_states = self._split_heads(query_states)
+ key_states = self._split_heads(key_states)
+ value_states = self._split_heads(value_states)
+
+ # Get global/side key/value_states
+ side_key_states = self.k(global_inputs)
+ side_value_states = self.v(global_inputs)
+
+ # reshape to (batch_size, global_seq_len, n_heads, head_dim)
+ side_key_states = self._split_heads(side_key_states)
+ side_value_states = self._split_heads(side_value_states)
+
+ # Split into blocks -> (batch_size, num_blocks, block_len, n_heads, head_dim)
+ query_states = _split_into_blocks(query_states, self.block_len, axis=1)
+ key_states = _split_into_blocks(key_states, self.block_len, axis=1)
+ value_states = _split_into_blocks(value_states, self.block_len, axis=1)
+
+ # Concatenate 3 blocks for keys and values -> (batch_size, num_blocks, 3 * block_len, n_heads, dim_per_head)
+ key_states = _concatenate_3_blocks(key_states, block_axis=1, sequence_axis=2)
+ value_states = _concatenate_3_blocks(value_states, block_axis=1, sequence_axis=2)
+
+ # Tile side inputs across local key/value blocks
+ # New shape: (batch_size, num_blocks, global_seq_len, n_heads, dim_per_head)
+ reps = [1] * (side_key_states.ndim + 1)
+ reps[1] = key_states.shape[1]
+ side_key_states = jnp.tile(side_key_states[:, None, ...], reps)
+ side_value_states = jnp.tile(side_value_states[:, None, ...], reps)
+
+ # Concatenate "local" and "side"/"global" key/value states to allow each token to attend global aggregated ones
+ # New shape: (batch_size, num_blocks, 3 * block_len + global_seq_len, n_heads, dim_per_head)
+ key_states = jnp.concatenate((key_states, side_key_states), axis=2)
+ value_states = jnp.concatenate((value_states, side_value_states), axis=2)
+
+ # counter-act scaling in dot_product_attention_weights function
+ query_states *= jnp.sqrt(query_states.shape[-1])
+
+ if attention_mask is not None:
+ local_attention_mask = _get_local_attention_mask(attention_mask, self.block_len)
+ local_attention_mask = jax.lax.select(
+ local_attention_mask > 0,
+ jnp.full(local_attention_mask.shape, 0.0).astype(self.dtype),
+ jnp.full(local_attention_mask.shape, -1e10).astype(self.dtype),
+ )
+ else:
+ local_attention_mask = None
+
+ if position_bias is None:
+ # compute position bias (only for first layer)
+ position_bias = self._create_position_bias(self.block_len, attention_mask)
+ if local_attention_mask is not None:
+ position_bias = position_bias + local_attention_mask.swapaxes(1, 2)
+
+ # Calculate global/side bias - shape: # (batch_size, num_heads, seq_len, global_seq_len)
+ if attention_mask is None:
+ attention_mask = jnp.ones((batch_size, seq_length))
+ side_position_bias = self.compute_side_bias(attention_mask, global_segment_ids)
+ side_position_bias = _split_into_blocks(side_position_bias, self.block_len, axis=-2)
+ side_position_bias = jnp.swapaxes(side_position_bias, 1, 2)
+ position_bias = jnp.concatenate((position_bias, side_position_bias), axis=-1)
+
+ # create dropout rng
+ dropout_rng = None
+ if not deterministic and self.dropout > 0.0:
+ dropout_rng = self.make_rng("dropout")
+
+ # Softmax(QK^T)
+ attn_weights = dot_product_attention_weights(
+ query_states,
+ key_states,
+ bias=position_bias,
+ dropout_rng=dropout_rng,
+ dropout_rate=self.dropout,
+ broadcast_dropout=True,
+ deterministic=deterministic,
+ dtype=self.dtype,
+ )
+
+ # multiply with value states
+ attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
+
+ # bring back to (batch_size, seq_length, d_model)
+ attn_output = self._merge_heads(attn_output)
+ attn_output = attn_output[:, :seq_length, :]
+
+ # apply output matrix
+ attn_output = self.o(attn_output)
+
+ outputs = (attn_output, position_bias)
+
+ if output_attentions:
+ outputs = outputs + (attn_weights,)
+
+ return outputs
+
+
+class FlaxLongT5LayerLocalSelfAttention(nn.Module):
+ """Local self attention used in encoder"""
+
+ config: LongT5Config
+ has_relative_attention_bias: bool = False
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.LocalSelfAttention = FlaxLongT5LocalAttention(
+ self.config, has_relative_attention_bias=self.has_relative_attention_bias, dtype=self.dtype
+ )
+ self.layer_norm = FlaxLongT5LayerNorm(
+ self.config.d_model, eps=self.config.layer_norm_epsilon, dtype=self.dtype
+ )
+ self.dropout = nn.Dropout(self.config.dropout_rate)
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask=None,
+ position_bias=None,
+ output_attentions=False,
+ deterministic=True,
+ **kwargs: Any, # to accept init_cache kwargs
+ ):
+ normed_hidden_states = self.layer_norm(hidden_states)
+ attention_output = self.LocalSelfAttention(
+ normed_hidden_states,
+ attention_mask=attention_mask,
+ position_bias=position_bias,
+ output_attentions=output_attentions,
+ deterministic=deterministic,
+ )
+ hidden_states = hidden_states + self.dropout(attention_output[0], deterministic=deterministic)
+ outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them
+ return outputs
+
+
+class FlaxLongT5LayerTransientGlobalSelfAttention(nn.Module):
+ """Transient-Global self attention used in encoder"""
+
+ config: LongT5Config
+ has_relative_attention_bias: bool = False
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.TransientGlobalSelfAttention = FlaxLongT5TransientGlobalAttention(
+ self.config, has_relative_attention_bias=self.has_relative_attention_bias, dtype=self.dtype
+ )
+ self.layer_norm = FlaxLongT5LayerNorm(
+ self.config.d_model, eps=self.config.layer_norm_epsilon, dtype=self.dtype
+ )
+ self.dropout = nn.Dropout(self.config.dropout_rate)
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask=None,
+ position_bias=None,
+ output_attentions=False,
+ deterministic=True,
+ **kwargs: Any, # to accept init_cache kwargs
+ ):
+ normed_hidden_states = self.layer_norm(hidden_states)
+ attention_output = self.TransientGlobalSelfAttention(
+ normed_hidden_states,
+ attention_mask=attention_mask,
+ position_bias=position_bias,
+ output_attentions=output_attentions,
+ deterministic=deterministic,
+ )
+ hidden_states = hidden_states + self.dropout(attention_output[0], deterministic=deterministic)
+ outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them
+ return outputs
+
+
+# Copied from transformers.models.t5.modeling_flax_t5.FlaxT5LayerSelfAttention with T5->LongT5
+class FlaxLongT5LayerSelfAttention(nn.Module):
+ config: LongT5Config
+ has_relative_attention_bias: bool = False
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.SelfAttention = FlaxLongT5Attention(
+ self.config,
+ has_relative_attention_bias=self.has_relative_attention_bias,
+ causal=self.config.causal,
+ dtype=self.dtype,
+ )
+ self.layer_norm = FlaxLongT5LayerNorm(
+ self.config.d_model, eps=self.config.layer_norm_epsilon, dtype=self.dtype
+ )
+ self.dropout = nn.Dropout(self.config.dropout_rate)
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask=None,
+ position_bias=None,
+ output_attentions=False,
+ deterministic=True,
+ init_cache=False,
+ ):
+ normed_hidden_states = self.layer_norm(hidden_states)
+ attention_output = self.SelfAttention(
+ normed_hidden_states,
+ attention_mask=attention_mask,
+ position_bias=position_bias,
+ output_attentions=output_attentions,
+ deterministic=deterministic,
+ init_cache=init_cache,
+ )
+ hidden_states = hidden_states + self.dropout(attention_output[0], deterministic=deterministic)
+ outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them
+ return outputs
+
+
+# Copied from transformers.models.t5.modeling_flax_t5.FlaxT5LayerCrossAttention with T5->LongT5
+class FlaxLongT5LayerCrossAttention(nn.Module):
+ config: LongT5Config
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.EncDecAttention = FlaxLongT5Attention(
+ self.config, has_relative_attention_bias=False, causal=False, dtype=self.dtype
+ )
+ self.layer_norm = FlaxLongT5LayerNorm(
+ self.config.d_model, eps=self.config.layer_norm_epsilon, dtype=self.dtype
+ )
+ self.dropout = nn.Dropout(self.config.dropout_rate)
+
+ def __call__(
+ self,
+ hidden_states,
+ key_value_states,
+ attention_mask=None,
+ position_bias=None,
+ output_attentions=False,
+ deterministic=True,
+ ):
+ normed_hidden_states = self.layer_norm(hidden_states)
+ attention_output = self.EncDecAttention(
+ normed_hidden_states,
+ attention_mask=attention_mask,
+ key_value_states=key_value_states,
+ position_bias=position_bias,
+ output_attentions=output_attentions,
+ )
+ hidden_states = hidden_states + self.dropout(attention_output[0], deterministic=deterministic)
+ outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them
+ return outputs
+
+
+class FlaxLongT5Block(nn.Module):
+ config: LongT5Config
+ has_relative_attention_bias: bool = False
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.causal = self.config.causal
+ if self.causal:
+ attention_layer = FlaxLongT5LayerSelfAttention
+ elif self.config.encoder_attention_type == "local":
+ attention_layer = FlaxLongT5LayerLocalSelfAttention
+ elif self.config.encoder_attention_type == "transient-global":
+ attention_layer = FlaxLongT5LayerTransientGlobalSelfAttention
+ else:
+ raise ValueError(
+ "For encoder attention mechanism, either `local` or `transient-global` attention type is expected, "
+ f"but got {self.config.encoder_attention_type}."
+ )
+ self.layer = (
+ attention_layer(
+ self.config,
+ has_relative_attention_bias=self.has_relative_attention_bias,
+ name=str(0),
+ dtype=self.dtype,
+ ),
+ )
+ feed_forward_index = 1
+ if self.causal:
+ self.layer += (FlaxLongT5LayerCrossAttention(self.config, name=str(1), dtype=self.dtype),)
+ feed_forward_index += 1
+
+ self.layer += (FlaxLongT5LayerFF(self.config, name=str(feed_forward_index), dtype=self.dtype),)
+
+ # Copied from transformers.models.t5.modeling_flax_t5.FlaxT5Block.__call__ with T5->LongT5
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask=None,
+ position_bias=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ encoder_decoder_position_bias=None,
+ output_attentions=False,
+ return_dict=True,
+ deterministic=True,
+ init_cache=False,
+ ):
+ self_attention_outputs = self.layer[0](
+ hidden_states,
+ attention_mask=attention_mask,
+ position_bias=position_bias,
+ output_attentions=output_attentions,
+ deterministic=deterministic,
+ init_cache=init_cache,
+ )
+ hidden_states = self_attention_outputs[0]
+ attention_outputs = self_attention_outputs[1:] # Keep self-attention outputs and relative position weights
+
+ do_cross_attention = self.causal and encoder_hidden_states is not None
+ if do_cross_attention:
+ cross_attention_outputs = self.layer[1](
+ hidden_states,
+ key_value_states=encoder_hidden_states,
+ attention_mask=encoder_attention_mask,
+ position_bias=encoder_decoder_position_bias,
+ output_attentions=output_attentions,
+ deterministic=deterministic,
+ )
+ hidden_states = cross_attention_outputs[0]
+
+ # Keep cross-attention outputs and relative position weights
+ attention_outputs = attention_outputs + cross_attention_outputs[1:]
+
+ # Apply Feed Forward layer
+ hidden_states = self.layer[-1](hidden_states, deterministic=deterministic)
+
+ outputs = (hidden_states,)
+
+ outputs = outputs + attention_outputs
+
+ # returns hidden-states, present_key_value_states, (self-attention position bias), (self-attention weights),
+ # (cross-attention position bias), (cross-attention weights)
+ return outputs
+
+
+# Copied from transformers.models.t5.modeling_flax_t5.FlaxT5LayerCollection with T5->LongT5
+class FlaxLongT5LayerCollection(nn.Module):
+ config: LongT5Config
+ has_relative_attention_bias: bool
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.layer = FlaxLongT5Block(
+ self.config, has_relative_attention_bias=self.has_relative_attention_bias, dtype=self.dtype
+ )
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask=None,
+ position_bias=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ encoder_decoder_position_bias=None,
+ output_attentions=False,
+ deterministic=True,
+ init_cache=False,
+ ):
+ return self.layer(
+ hidden_states,
+ attention_mask=attention_mask,
+ position_bias=position_bias,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ encoder_decoder_position_bias=encoder_decoder_position_bias,
+ output_attentions=output_attentions,
+ deterministic=deterministic,
+ init_cache=init_cache,
+ )
+
+
+# Copied from transformers.models.t5.modeling_flax_t5.FlaxT5BlockCollection with T5->LongT5
+class FlaxLongT5BlockCollection(nn.Module):
+ config: LongT5Config
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+ gradient_checkpointing: bool = False
+
+ def setup(self):
+ self.causal = self.config.causal
+ if self.gradient_checkpointing:
+ FlaxLongT5CheckpointLayer = remat(FlaxLongT5LayerCollection, static_argnums=(6, 7, 8))
+ self.blocks = [
+ FlaxLongT5CheckpointLayer(
+ self.config,
+ has_relative_attention_bias=(i == 0),
+ dtype=self.dtype,
+ name=str(i),
+ )
+ for i in range(self.config.num_layers)
+ ]
+ else:
+ self.blocks = [
+ FlaxLongT5LayerCollection(
+ self.config,
+ has_relative_attention_bias=(i == 0),
+ dtype=self.dtype,
+ name=str(i),
+ )
+ for i in range(self.config.num_layers)
+ ]
+
+ def __call__(
+ self,
+ hidden_states=None,
+ attention_mask=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ deterministic: bool = True,
+ init_cache: bool = False,
+ ):
+ # Prepare head mask if needed
+ all_hidden_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+ all_cross_attentions = () if (output_attentions and self.causal) else None
+ position_bias = None
+ encoder_decoder_position_bias = None
+
+ for i, layer_module in enumerate(self.blocks):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_outputs = layer_module(
+ hidden_states,
+ attention_mask,
+ position_bias,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ encoder_decoder_position_bias,
+ output_attentions,
+ deterministic,
+ init_cache,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ # We share the position biases between the layers - the first layer store them
+ # layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights),
+ # (cross-attention position bias), (cross-attention weights)
+ position_bias = layer_outputs[1]
+
+ if self.causal and encoder_hidden_states is not None:
+ encoder_decoder_position_bias = layer_outputs[3 if output_attentions else 2]
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[2],)
+ if self.causal:
+ all_cross_attentions = all_cross_attentions + (layer_outputs[4],)
+
+ return FlaxBaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_attentions,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+# Copied from transformers.models.t5.modeling_flax_t5.FlaxT5Stack with T5->LongT5
+class FlaxLongT5Stack(nn.Module):
+ config: LongT5Config
+ embed_tokens: nn.Embed
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+ gradient_checkpointing: bool = False
+
+ def setup(self):
+ self.causal = self.config.causal
+
+ self.block = FlaxLongT5BlockCollection(
+ self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing
+ )
+ self.final_layer_norm = FlaxLongT5LayerNorm(
+ self.config.d_model, eps=self.config.layer_norm_epsilon, dtype=self.dtype
+ )
+ self.dropout = nn.Dropout(self.config.dropout_rate)
+
+ def __call__(
+ self,
+ input_ids=None,
+ attention_mask=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ deterministic: bool = True,
+ init_cache: bool = False,
+ ):
+ hidden_states = self.embed_tokens(input_ids)
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
+
+ outputs = self.block(
+ hidden_states,
+ attention_mask=attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ deterministic=deterministic,
+ init_cache=init_cache,
+ )
+
+ hidden_states = outputs[0]
+
+ hidden_states = self.final_layer_norm(hidden_states)
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
+
+ # Add last layer
+ all_hidden_states = None
+
+ if output_hidden_states:
+ all_hidden_states = outputs.hidden_states
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ if output_hidden_states:
+ return (
+ hidden_states,
+ all_hidden_states,
+ ) + outputs[2:]
+ return (hidden_states,) + outputs[1:]
+
+ return FlaxBaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=outputs.attentions,
+ cross_attentions=outputs.cross_attentions,
+ )
+
+
+LONGT5_ENCODE_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. LongT5 is a model with relative position embeddings so
+ you should be able to pad the inputs on both the right and the left.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for detail.
+
+ To know more on how to prepare `input_ids` for pretraining take a look a [LONGT5
+ Training](./longt5#training).
+ attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+LONGT5_DECODE_INPUTS_DOCSTRING = r"""
+ Args:
+ decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`):
+ Indices of decoder input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
+
+ For training, `decoder_input_ids` should be provided.
+ encoder_outputs (`tuple(tuple(jnp.ndarray)`):
+ Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
+ encoder_attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
+ be used by default.
+
+ If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the
+ paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.
+ past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`):
+ Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast
+ auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+LONGT5_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. LongT5 is a model with relative position embeddings so
+ you should be able to pad the inputs on both the right and the left.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for detail.
+
+ [What are input IDs?](../glossary#input-ids)
+
+ To know more on how to prepare `input_ids` for pretraining take a look a [LONGT5
+ Training](./longt5#training).
+ attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Indices of decoder input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
+
+ LONGT5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
+ `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
+ `past_key_values`).
+
+ To know more on how to prepare `decoder_input_ids` for pretraining take a look at [LONGT5
+ Training](./longt5#training).
+ decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
+ be used by default.
+ encoder_outputs (`tuple(tuple(jnp.ndarray)`, *optional*):
+ Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*)
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at
+ the output of the last layer of the encoder. Used in the cross-attention of the decoder.
+ past_key_values (`tuple(tuple(jnp.ndarray))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+class FlaxLongT5PreTrainedModel(FlaxPreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = LongT5Config
+ base_model_prefix = "transformer"
+ module_class: nn.Module = None
+
+ def __init__(
+ self,
+ config: LongT5Config,
+ input_shape: Tuple[int] = (1, 1),
+ seed: int = 0,
+ dtype: jnp.dtype = jnp.float32,
+ _do_init: bool = True,
+ **kwargs,
+ ):
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
+
+ def enable_gradient_checkpointing(self):
+ self._module = self.module_class(
+ config=self.config,
+ dtype=self.dtype,
+ gradient_checkpointing=True,
+ )
+
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
+ # init input tensors
+ input_ids = jnp.zeros(input_shape, dtype="i4")
+
+ attention_mask = jnp.ones_like(input_ids)
+ decoder_input_ids = jnp.ones_like(input_ids)
+ decoder_attention_mask = jnp.ones_like(input_ids)
+
+ params_rng, dropout_rng = jax.random.split(rng)
+ rngs = {"params": params_rng, "dropout": dropout_rng}
+
+ random_params = self.module.init(
+ rngs,
+ input_ids,
+ attention_mask,
+ decoder_input_ids,
+ decoder_attention_mask,
+ )["params"]
+
+ if params is not None:
+ random_params = flatten_dict(unfreeze(random_params))
+ params = flatten_dict(unfreeze(params))
+ for missing_key in self._missing_keys:
+ params[missing_key] = random_params[missing_key]
+ self._missing_keys = set()
+ return freeze(unflatten_dict(params))
+ else:
+ return random_params
+
+ @add_start_docstrings_to_model_forward(LONGT5_INPUTS_DOCSTRING)
+ def __call__(
+ self,
+ input_ids: jnp.ndarray,
+ attention_mask: Optional[jnp.ndarray] = None,
+ decoder_input_ids: jnp.ndarray = None,
+ decoder_attention_mask: Optional[jnp.ndarray] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ train: bool = False,
+ params: dict = None,
+ dropout_rng: PRNGKey = None,
+ ):
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ if decoder_input_ids is None:
+ raise ValueError(
+ "Make sure to provide both `input_ids` and `decoder_input_ids`. `decoder_input_ids` is not passed"
+ " here."
+ )
+
+ # prepare encoder inputs
+ if attention_mask is None:
+ attention_mask = jnp.ones_like(input_ids)
+
+ # prepare decoder inputs
+ if decoder_attention_mask is None:
+ decoder_attention_mask = jnp.ones_like(decoder_input_ids)
+
+ # Handle any PRNG if needed
+ rngs = {"dropout": dropout_rng} if dropout_rng is not None else {}
+
+ return self.module.apply(
+ {"params": params or self.params},
+ input_ids=jnp.array(input_ids, dtype="i4"),
+ attention_mask=jnp.array(attention_mask, dtype="i4"),
+ decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
+ decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=not train,
+ rngs=rngs,
+ )
+
+ def init_cache(self, batch_size, max_length, encoder_outputs):
+ r"""
+ Args:
+ batch_size (`int`):
+ batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
+ max_length (`int`):
+ maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
+ cache.
+ encoder_outputs (`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`):
+ `encoder_outputs` consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*:
+ `attentions`). `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*)
+ is a sequence of hidden-states at the output of the last layer of the encoder. Used in the
+ cross-attention of the decoder.
+ """
+ # init input variables to retrieve cache
+ decoder_input_ids = jnp.ones((batch_size, max_length), dtype="i4")
+ decoder_attention_mask = jnp.ones_like(decoder_input_ids)
+
+ def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, **kwargs):
+ decoder_module = module._get_decoder_module()
+ return decoder_module(
+ decoder_input_ids,
+ decoder_attention_mask,
+ **kwargs,
+ )
+
+ init_variables = self.module.init(
+ jax.random.PRNGKey(0),
+ decoder_input_ids=decoder_input_ids,
+ decoder_attention_mask=decoder_attention_mask,
+ encoder_hidden_states=encoder_outputs[0],
+ init_cache=True,
+ method=_decoder_forward, # we only need to call the decoder to init the cache
+ )
+ return unfreeze(init_variables["cache"])
+
+ @add_start_docstrings(LONGT5_ENCODE_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=FlaxBaseModelOutput, config_class=LongT5Config)
+ def encode(
+ self,
+ input_ids: jnp.ndarray,
+ attention_mask: Optional[jnp.ndarray] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ train: bool = False,
+ params: dict = None,
+ dropout_rng: PRNGKey = None,
+ ):
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, FlaxLongT5ForConditionalGeneration
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-base")
+ >>> model = FlaxLongT5ForConditionalGeneration.from_pretrained("google/long-t5-local-base")
+
+ >>> text = "My friends are cool but they eat too many carbs."
+ >>> inputs = tokenizer(text, return_tensors="np")
+ >>> encoder_outputs = model.encode(**inputs)
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ if attention_mask is None:
+ attention_mask = jnp.ones_like(input_ids)
+
+ # Handle any PRNG if needed
+ rngs = {}
+ if dropout_rng is not None:
+ rngs["dropout"] = dropout_rng
+
+ def _encoder_forward(module, input_ids, attention_mask, **kwargs):
+ encode_module = module._get_encoder_module()
+ return encode_module(input_ids, attention_mask, **kwargs)
+
+ return self.module.apply(
+ {"params": params or self.params},
+ input_ids=jnp.array(input_ids, dtype="i4"),
+ attention_mask=jnp.array(attention_mask, dtype="i4"),
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=not train,
+ rngs=rngs,
+ method=_encoder_forward,
+ )
+
+ @add_start_docstrings(LONGT5_DECODE_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=FlaxBaseModelOutputWithPastAndCrossAttentions, config_class=LongT5Config)
+ def decode(
+ self,
+ decoder_input_ids,
+ encoder_outputs,
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
+ decoder_attention_mask: Optional[jnp.ndarray] = None,
+ past_key_values: dict = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ train: bool = False,
+ params: dict = None,
+ dropout_rng: PRNGKey = None,
+ ):
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, FlaxLongT5ForConditionalGeneration
+ >>> import jax.numpy as jnp
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-base")
+ >>> model = FlaxLongT5ForConditionalGeneration.from_pretrained("google/long-t5-local-base")
+
+ >>> text = "My friends are cool but they eat too many carbs."
+ >>> inputs = tokenizer(text, return_tensors="np")
+ >>> encoder_outputs = model.encode(**inputs)
+
+ >>> decoder_start_token_id = model.config.decoder_start_token_id
+ >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
+
+ >>> outputs = model.decode(decoder_input_ids, encoder_outputs)
+ >>> logits = outputs.logits
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ encoder_hidden_states = encoder_outputs[0]
+ if encoder_attention_mask is None:
+ batch_size, sequence_length = encoder_hidden_states.shape[:2]
+ encoder_attention_mask = jnp.ones((batch_size, sequence_length))
+
+ batch_size, sequence_length = decoder_input_ids.shape
+ if decoder_attention_mask is None:
+ decoder_attention_mask = jnp.ones((batch_size, sequence_length))
+
+ # Handle any PRNG if needed
+ rngs = {}
+ if dropout_rng is not None:
+ rngs["dropout"] = dropout_rng
+
+ inputs = {"params": params or self.params}
+
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be
+ # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that
+ # it can be changed by FlaxLongT5Attention module
+ if past_key_values:
+ inputs["cache"] = past_key_values
+ mutable = ["cache"]
+ else:
+ mutable = False
+
+ def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, **kwargs):
+ decoder_module = module._get_decoder_module()
+ return decoder_module(
+ decoder_input_ids,
+ decoder_attention_mask,
+ **kwargs,
+ )
+
+ outputs = self.module.apply(
+ inputs,
+ decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
+ decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"),
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=not train,
+ rngs=rngs,
+ mutable=mutable,
+ method=_decoder_forward,
+ )
+
+ # add updated cache to model output
+ if past_key_values is not None and return_dict:
+ outputs, past = outputs
+ outputs["past_key_values"] = unfreeze(past["cache"])
+ return outputs
+ elif past_key_values is not None and not return_dict:
+ outputs, past = outputs
+ outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:]
+
+ return outputs
+
+
+LONGT5_START_DOCSTRING = r"""
+ The LongT5 model was proposed in [LongT5: Efficient Text-To-Text Transformer for Long
+ Sequences](https://arxiv.org/abs/2112.07916) by Mandy Guo, Joshua Ainslie, David Uthus, Santiago Ontanon, Jianmo
+ Ni, Yun-Hsuan Sung and Yinfei Yang. It's an encoder-decoder transformer pre-trained in a text-to-text denoising
+ generative setting. LongT5 model is an extension of T5 model, and it enables using one of the two different
+ efficient attention mechanisms - (1) Local attention, or (2) Transient-Global attention.
+
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a Flax Linen
+ [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a
+ regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.
+
+ Finally, this model supports inherent JAX features such as:
+
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
+
+ Parameters:
+ config ([`LongT5Config`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
+ dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
+ The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
+ `jax.numpy.bfloat16` (on TPUs).
+
+ This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
+ specified all the computation will be performed with the given `dtype`.
+
+ **Note that this only specifies the dtype of the computation and does not influence the dtype of model
+ parameters.**
+
+ If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
+ [`~FlaxPreTrainedModel.to_bf16`].
+"""
+
+
+@add_start_docstrings(
+ "The bare LONGT5 Model transformer outputting raw hidden-stateswithout any specific head on top.",
+ LONGT5_START_DOCSTRING,
+)
+# Copied from transformers.models.t5.modeling_flax_t5.FlaxT5Module with T5->LongT5
+class FlaxLongT5Module(nn.Module):
+ config: LongT5Config
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+ gradient_checkpointing: bool = False
+
+ def _get_encoder_module(self):
+ return self.encoder
+
+ def _get_decoder_module(self):
+ return self.decoder
+
+ def setup(self):
+ self.shared = nn.Embed(
+ self.config.vocab_size,
+ self.config.d_model,
+ embedding_init=jax.nn.initializers.normal(self.config.initializer_factor * 1.0),
+ dtype=self.dtype,
+ )
+
+ encoder_config = copy.deepcopy(self.config)
+ encoder_config.causal = False
+ self.encoder = FlaxLongT5Stack(
+ encoder_config,
+ embed_tokens=self.shared,
+ dtype=self.dtype,
+ gradient_checkpointing=self.gradient_checkpointing,
+ )
+
+ decoder_config = copy.deepcopy(self.config)
+ decoder_config.causal = True
+ decoder_config.num_layers = self.config.num_decoder_layers
+ self.decoder = FlaxLongT5Stack(
+ decoder_config,
+ embed_tokens=self.shared,
+ dtype=self.dtype,
+ gradient_checkpointing=self.gradient_checkpointing,
+ )
+
+ def __call__(
+ self,
+ input_ids=None,
+ attention_mask=None,
+ decoder_input_ids=None,
+ decoder_attention_mask=None,
+ encoder_outputs=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ deterministic: bool = True,
+ ):
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # Encode if needed (training, first prediction pass)
+ encoder_outputs = self.encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=deterministic,
+ )
+
+ # Decode
+ decoder_outputs = self.decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ encoder_hidden_states=encoder_outputs[0],
+ encoder_attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=deterministic,
+ )
+
+ if not return_dict:
+ return decoder_outputs + encoder_outputs
+
+ return FlaxSeq2SeqModelOutput(
+ last_hidden_state=decoder_outputs.last_hidden_state,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+
+# Copied from transformers.models.t5.modeling_flax_t5.FlaxT5Model with T5->LongT5
+class FlaxLongT5Model(FlaxLongT5PreTrainedModel):
+ module_class = FlaxLongT5Module
+
+
+append_call_sample_docstring(FlaxLongT5Model, _CHECKPOINT_FOR_DOC, FlaxSeq2SeqModelOutput, _CONFIG_FOR_DOC)
+
+FLAX_LONGT5_MODEL_DOCSTRING = """
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, FlaxLongT5Model
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-base")
+ >>> model = FlaxLongT5Model.from_pretrained("google/long-t5-local-base")
+
+ >>> input_ids = tokenizer(
+ ... "Studies have been shown that owning a dog is good for you", return_tensors="np"
+ ... ).input_ids
+ >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="np").input_ids
+
+ >>> # forward pass
+ >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
+ >>> last_hidden_states = outputs.last_hidden_state
+ ```
+"""
+
+
+overwrite_call_docstring(FlaxLongT5Model, LONGT5_INPUTS_DOCSTRING + FLAX_LONGT5_MODEL_DOCSTRING)
+append_replace_return_docstrings(FlaxLongT5Model, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
+
+
+@add_start_docstrings("""LONGT5 Model with a `language modeling` head on top.""", LONGT5_START_DOCSTRING)
+# Copied from transformers.models.t5.modeling_flax_t5.FlaxT5ForConditionalGenerationModule with T5->LongT5
+class FlaxLongT5ForConditionalGenerationModule(nn.Module):
+ config: LongT5Config
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+ gradient_checkpointing: bool = False
+
+ def _get_encoder_module(self):
+ return self.encoder
+
+ def _get_decoder_module(self):
+ return self.decoder
+
+ def setup(self):
+ self.model_dim = self.config.d_model
+
+ self.shared = nn.Embed(
+ self.config.vocab_size,
+ self.config.d_model,
+ embedding_init=jax.nn.initializers.normal(self.config.initializer_factor),
+ dtype=self.dtype,
+ )
+
+ encoder_config = copy.deepcopy(self.config)
+ encoder_config.causal = False
+ encoder_config.use_cache = False
+ encoder_config.is_encoder_decoder = False
+ self.encoder = FlaxLongT5Stack(
+ encoder_config, self.shared, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing
+ )
+
+ decoder_config = copy.deepcopy(self.config)
+ decoder_config.causal = True
+ decoder_config.is_encoder_decoder = False
+ decoder_config.num_layers = self.config.num_decoder_layers
+ self.decoder = FlaxLongT5Stack(
+ decoder_config, self.shared, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing
+ )
+
+ self.lm_head = nn.Dense(
+ self.config.vocab_size,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_factor),
+ dtype=self.dtype,
+ )
+
+ def __call__(
+ self,
+ input_ids=None,
+ attention_mask=None,
+ decoder_input_ids=None,
+ decoder_attention_mask=None,
+ encoder_outputs=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ deterministic: bool = True,
+ ):
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # Encode
+ encoder_outputs = self.encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=deterministic,
+ )
+
+ hidden_states = encoder_outputs[0]
+
+ # Decode
+ decoder_outputs = self.decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ encoder_hidden_states=hidden_states,
+ encoder_attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=deterministic,
+ )
+
+ sequence_output = decoder_outputs[0]
+
+ if self.config.tie_word_embeddings:
+ # Rescale output before projecting on vocab
+ # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586
+ sequence_output = sequence_output * (self.model_dim**-0.5)
+
+ if self.config.tie_word_embeddings:
+ shared_embedding = self.shared.variables["params"]["embedding"]
+ lm_logits = self.lm_head.apply({"params": {"kernel": shared_embedding.T}}, sequence_output)
+ else:
+ lm_logits = self.lm_head(sequence_output)
+
+ if not return_dict:
+ return (lm_logits,) + decoder_outputs[1:] + encoder_outputs
+
+ return FlaxSeq2SeqLMOutput(
+ logits=lm_logits,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+
+class FlaxLongT5ForConditionalGeneration(FlaxLongT5PreTrainedModel):
+ module_class = FlaxLongT5ForConditionalGenerationModule
+
+ @add_start_docstrings(LONGT5_DECODE_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=FlaxCausalLMOutputWithCrossAttentions, config_class=LongT5Config)
+ def decode(
+ self,
+ decoder_input_ids,
+ encoder_outputs,
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
+ decoder_attention_mask: Optional[jnp.ndarray] = None,
+ past_key_values: dict = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ train: bool = False,
+ params: dict = None,
+ dropout_rng: PRNGKey = None,
+ ):
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, FlaxLongT5ForConditionalGeneration
+ >>> import jax.numpy as jnp
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-base")
+ >>> model = FlaxLongT5ForConditionalGeneration.from_pretrained("google/long-t5-local-base")
+
+ >>> text = "summarize: My friends are cool but they eat too many carbs."
+ >>> inputs = tokenizer(text, return_tensors="np")
+ >>> encoder_outputs = model.encode(**inputs)
+
+ >>> decoder_start_token_id = model.config.decoder_start_token_id
+ >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
+
+ >>> outputs = model.decode(decoder_input_ids, encoder_outputs)
+ >>> logits = outputs.logits
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ encoder_hidden_states = encoder_outputs[0]
+ if encoder_attention_mask is None:
+ batch_size, sequence_length = encoder_hidden_states.shape[:2]
+ encoder_attention_mask = jnp.ones((batch_size, sequence_length))
+
+ batch_size, sequence_length = decoder_input_ids.shape
+ if decoder_attention_mask is None:
+ decoder_attention_mask = jnp.ones((batch_size, sequence_length))
+
+ # Handle any PRNG if needed
+ rngs = {}
+ if dropout_rng is not None:
+ rngs["dropout"] = dropout_rng
+
+ inputs = {"params": params or self.params}
+
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be
+ # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that
+ # it can be changed by FlaxLongT5Attention module
+ if past_key_values:
+ inputs["cache"] = past_key_values
+ mutable = ["cache"]
+ else:
+ mutable = False
+
+ def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, **kwargs):
+ decoder_module = module._get_decoder_module()
+ decoder_outputs = decoder_module(
+ decoder_input_ids,
+ decoder_attention_mask,
+ **kwargs,
+ )
+
+ sequence_output = decoder_outputs[0]
+
+ if self.config.tie_word_embeddings:
+ # Rescale output before projecting on vocab
+ # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586
+ sequence_output = sequence_output * (self.config.d_model**-0.5)
+
+ if self.config.tie_word_embeddings:
+ shared_embedding = module.shared.variables["params"]["embedding"]
+ lm_logits = module.lm_head.apply({"params": {"kernel": shared_embedding.T}}, sequence_output)
+ else:
+ lm_logits = module.lm_head(sequence_output)
+
+ return lm_logits, decoder_outputs
+
+ outputs = self.module.apply(
+ inputs,
+ decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
+ decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"),
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=not train,
+ rngs=rngs,
+ mutable=mutable,
+ method=_decoder_forward,
+ )
+
+ if past_key_values is None:
+ lm_logits, decoder_outputs = outputs
+ else:
+ (lm_logits, decoder_outputs), past = outputs
+
+ if return_dict:
+ outputs = FlaxCausalLMOutputWithCrossAttentions(
+ logits=lm_logits,
+ hidden_states=decoder_outputs.hidden_states,
+ attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ )
+ else:
+ outputs = (lm_logits,) + decoder_outputs[1:]
+
+ # add updated cache to model output
+ if past_key_values is not None and return_dict:
+ outputs["past_key_values"] = unfreeze(past["cache"])
+ return outputs
+ elif past_key_values is not None and not return_dict:
+ outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:]
+
+ return outputs
+
+ def prepare_inputs_for_generation(
+ self,
+ decoder_input_ids,
+ max_length,
+ attention_mask: Optional[jax.Array] = None,
+ decoder_attention_mask: Optional[jax.Array] = None,
+ encoder_outputs=None,
+ **kwargs,
+ ):
+ # initializing the cache
+ batch_size, seq_length = decoder_input_ids.shape
+
+ past_key_values = self.init_cache(batch_size, max_length, encoder_outputs)
+ # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
+ # But since the decoder uses a causal mask, those positions are masked anyways.
+ # Thus we can create a single static attention_mask here, which is more efficient for compilation
+ extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
+ if decoder_attention_mask is not None:
+ extended_attention_mask = jax.lax.dynamic_update_slice(
+ extended_attention_mask, decoder_attention_mask, (0, 0)
+ )
+
+ return {
+ "past_key_values": past_key_values,
+ "encoder_outputs": encoder_outputs,
+ "encoder_attention_mask": attention_mask,
+ "decoder_attention_mask": extended_attention_mask,
+ }
+
+ def update_inputs_for_generation(self, model_outputs, model_kwargs):
+ model_kwargs["past_key_values"] = model_outputs.past_key_values
+ return model_kwargs
+
+
+FLAX_LONGT5_CONDITIONAL_GENERATION_DOCSTRING = """
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, FlaxLongT5ForConditionalGeneration
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-base")
+ >>> model = FlaxLongT5ForConditionalGeneration.from_pretrained("google/long-t5-local-base")
+
+ >>> ARTICLE_TO_SUMMARIZE = "summarize: My friends are cool but they eat too many carbs."
+ >>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], return_tensors="np")
+
+ >>> # Generate Summary
+ >>> summary_ids = model.generate(inputs["input_ids"]).sequences
+ >>> print(tokenizer.decode(summary_ids[0], skip_special_tokens=True, clean_up_tokenization_spaces=False))
+ ```
+"""
+
+
+overwrite_call_docstring(
+ FlaxLongT5ForConditionalGeneration, LONGT5_INPUTS_DOCSTRING + FLAX_LONGT5_CONDITIONAL_GENERATION_DOCSTRING
+)
+append_replace_return_docstrings(
+ FlaxLongT5ForConditionalGeneration, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC
+)
+
+
+__all__ = ["FlaxLongT5ForConditionalGeneration", "FlaxLongT5Model", "FlaxLongT5PreTrainedModel"]
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/longt5/modeling_longt5.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/longt5/modeling_longt5.py
new file mode 100644
index 0000000000000000000000000000000000000000..c85c282439fe55db6141b3f3305c4ac199f4f615
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/longt5/modeling_longt5.py
@@ -0,0 +1,2344 @@
+# coding=utf-8
+# Copyright 2022 Google LLC., LongT5 Authors and HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch LongT5 model."""
+
+import copy
+import math
+import warnings
+from typing import Any, List, Optional, Tuple, Union
+
+import torch
+from torch import nn
+from torch.nn import CrossEntropyLoss
+
+from ...activations import ACT2FN
+from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache, StaticCache
+from ...generation import GenerationMixin
+from ...modeling_attn_mask_utils import AttentionMaskConverter
+from ...modeling_outputs import (
+ BaseModelOutput,
+ BaseModelOutputWithPastAndCrossAttentions,
+ Seq2SeqLMOutput,
+ Seq2SeqModelOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import ALL_LAYERNORM_LAYERS, find_pruneable_heads_and_indices, prune_linear_layer
+from ...utils import (
+ DUMMY_INPUTS,
+ DUMMY_MASK,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ is_torch_fx_proxy,
+ is_torchdynamo_compiling,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_longt5 import LongT5Config
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "LongT5Config"
+_CHECKPOINT_FOR_DOC = "google/long-t5-local-base"
+
+# TODO: Update before the merge
+
+
+def _pad_to_multiple(x: torch.Tensor, block_len: int, dim: int, pad_value: int = 0) -> torch.Tensor:
+ """Pad a tensor so that a sequence length will be a multiple of `block_len`"""
+ pad_len = -x.shape[dim] % block_len
+ # Handle cases when an empty input sequence is given
+ if not all(x.shape):
+ new_shape = list(x.shape)
+ new_shape[dim] += pad_len
+ return torch.zeros(new_shape, dtype=x.dtype)
+
+ pad = [(0, 0)] * x.ndim
+ pad[dim] = (0, pad_len)
+ pad = sum(pad[::-1], ())
+ x = nn.functional.pad(x, pad=pad, mode="constant", value=pad_value)
+ return x
+
+
+def _split_into_blocks(x: torch.Tensor, block_len: int, dim: int) -> torch.Tensor:
+ """Split an input tensor into blocks of a given `block_len` along the given `dim`. If the dimension length
+ is not a multiple of `block_len`, it will be padded first with selected `pad_value`.
+ """
+ # pad tensor to multiple of block_len
+ if x.shape[dim] % block_len != 0:
+ x = _pad_to_multiple(x, block_len, dim, pad_value=0)
+ num_blocks = x.shape[dim] // block_len
+ output_shape = x.shape[:dim] + (num_blocks, block_len) + x.shape[(dim + 1) :]
+ # If 0 is in output_shape, we cannot apply reshape because of incompatibility with ONNX conversion
+ if 0 in output_shape:
+ return torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ return x.reshape(output_shape)
+
+
+def _concatenate_3_blocks(x: torch.Tensor, block_dim: int, sequence_dim: int, pad_value: int = 0) -> torch.Tensor:
+ """Concatenate three consecutive blocks for each input block for local attentiont.
+
+ For more information, see: https://arxiv.org/pdf/2112.07916.pdf.
+ """
+ num_blocks = x.shape[block_dim]
+
+ pad = [(0, 0)] * x.ndim
+ pad[block_dim] = (1, 1)
+ pad = sum(pad[::-1], ())
+ # [batch_size, num_blocks, block_len] -> [batch_size, num_blocks + 2, block_len]
+ x = nn.functional.pad(x, pad=pad, mode="constant", value=pad_value)
+
+ blocks_list: List[torch.Tensor] = []
+ for i in range(3):
+ # We use indexing approach here:
+ # https://numpy.org/doc/stable/user/basics.indexing.html#dealing-with-variable-numbers-of-indices-within-programs
+ indices = [slice(0, None)] * x.ndim
+ indices[block_dim] = slice(i, i + num_blocks)
+ indices = tuple(indices)
+ blocks_list.append(x[indices])
+ # [batch_size, num_blocks, 3 * block_len, ...]
+ return torch.cat(blocks_list, dim=sequence_dim)
+
+
+def _make_3block_relative_position_ids(block_len: int) -> torch.Tensor:
+ """Makes 3-blocked relative position ids for local attention."""
+ position_ids = torch.arange(3 * block_len, dtype=torch.int32)
+ center_position_ids = position_ids[block_len:-block_len]
+ # [block_len, 3 * block_len]
+ relative_position_ids = position_ids.unsqueeze(0) - center_position_ids.unsqueeze(1)
+ return relative_position_ids
+
+
+def _mask_local_attention_mask(local_attention_mask: torch.Tensor, block_len: int) -> torch.Tensor:
+ """Mask local attention mask to enforce that tokens are not allowed to attend tokens farther than ``local_radius."""
+ relative_position_ids = _make_3block_relative_position_ids(block_len)
+ locality_mask = torch.abs(relative_position_ids) < block_len
+ locality_mask = locality_mask[None, None, :, :]
+ locality_mask = locality_mask.to(local_attention_mask.device)
+ return torch.logical_and(local_attention_mask, locality_mask)
+
+
+def _get_local_attention_mask(attention_mask: torch.Tensor, block_len: int, device: torch.device) -> torch.Tensor:
+ """Prepare attention mask to be applied for a local attention."""
+ # [batch_size, num_blocks, block_len]
+ _blocked_attention_mask = _split_into_blocks(attention_mask, block_len, dim=1)
+ # [batch_size, num_block, 3 * block_len]
+ _3blocked_attention_mask = _concatenate_3_blocks(_blocked_attention_mask, block_dim=1, sequence_dim=2)
+
+ _blocked_attention_mask = _blocked_attention_mask.unsqueeze(-1)
+ _3blocked_attention_mask = _3blocked_attention_mask.unsqueeze(-2)
+ # [batch_size, num_block, block_len, 3 * block_len]
+ local_attention_mask = torch.logical_and(_blocked_attention_mask, _3blocked_attention_mask)
+ local_attention_mask = _mask_local_attention_mask(local_attention_mask, block_len)
+ # [batch_size, 1, num_block, block_len, 3 * block_len]
+ return local_attention_mask.unsqueeze(1).to(device)
+
+
+def _make_global_fixed_block_ids(
+ attention_mask: torch.Tensor, global_block_size: int
+) -> Tuple[torch.Tensor, torch.Tensor]:
+ """Obtain the "fixed block" global id corresponding to each input token.
+
+ This implementation is a simlified version of the original Flaxformr implementation adopted from:
+ https://github.com/google/flaxformer/blob/main/flaxformer/architectures/longt5/long_attention.py.
+
+ In our scenario, as we use this strategy only for a decoder, orphan tokens, i.e. those tokens which do not make for
+ the whole fixed block, are assigned to the preceding block.
+
+ Padding tokens from the original sequence are represented by -1.
+ """
+ batch_size, seq_len = attention_mask.shape[:2]
+
+ def handle_orphan_tokens(block_ids: torch.Tensor) -> torch.Tensor:
+ block_ends = (torch.arange(seq_len) % global_block_size) == global_block_size - 1
+ block_ends = block_ends.to(block_ids.device)
+ true_block_ends = torch.logical_and(block_ends, block_ids >= 0)
+ full_blocks = true_block_ends.sum(-1).unsqueeze(-1).type(block_ids.dtype) - 1
+ block_ids = torch.where(block_ids < full_blocks, block_ids, full_blocks)
+ return block_ids
+
+ fixed_block_mask = torch.ones_like(attention_mask, device=attention_mask.device) / global_block_size
+ fixed_block_mask = torch.cumsum(fixed_block_mask, axis=1) - fixed_block_mask
+ mask = torch.where(attention_mask != 0.0, 1.0, -1000.0).type(attention_mask.dtype)
+ global_block_ids = torch.floor(mask + fixed_block_mask - 1.0).type(attention_mask.dtype)
+ _global_block_ids_lower_bound = torch.tensor(-1, dtype=global_block_ids.dtype, device=global_block_ids.device)
+ global_block_ids = torch.where(
+ global_block_ids > _global_block_ids_lower_bound, global_block_ids, _global_block_ids_lower_bound
+ )
+ # set padding tokens to -1
+ global_block_ids = (global_block_ids * attention_mask) + (attention_mask - 1)
+ # [batch_size, seq_len]
+ global_block_ids = handle_orphan_tokens(global_block_ids)
+ num_globals = seq_len // global_block_size
+ # [batch_size, seq_len // global_block_size]
+ if num_globals > 0:
+ _sequence_block_ids_max = torch.max(global_block_ids, dim=-1).values.repeat(num_globals, 1).transpose(0, 1)
+ else:
+ _sequence_block_ids_max = torch.zeros(
+ batch_size, 0, dtype=global_block_ids.dtype, device=global_block_ids.device
+ )
+ global_segment_ids = torch.cumsum(torch.ones(batch_size, num_globals), dim=-1) - 1
+ global_segment_ids = global_segment_ids.to(attention_mask.device)
+ global_segment_ids = torch.where(global_segment_ids <= _sequence_block_ids_max, 1, 0)
+ return global_block_ids.type(torch.int), global_segment_ids.type(torch.int)
+
+
+def _make_side_relative_position_ids(attention_mask: torch.Tensor, global_block_size: int) -> torch.Tensor:
+ """Create the relative position tensor for local -> global attention."""
+ block_ids, global_segment_ids = _make_global_fixed_block_ids(attention_mask, global_block_size)
+ global_seq_len = global_segment_ids.shape[-1]
+ global_positions = torch.arange(global_seq_len, device=block_ids.device)
+ side_relative_position = global_positions - block_ids[..., None]
+ return side_relative_position.type(torch.int64)
+
+
+def _create_global_aggregates(
+ hidden_states: torch.Tensor, block_ids: torch.Tensor, global_seq_len: int
+) -> torch.Tensor:
+ """Compute individual block aggregates by summing over individual blocks."""
+ # (batch..., seq_len, global_seq_len))
+ block_ids = block_ids.where(
+ block_ids >= 0, torch.tensor(global_seq_len, dtype=block_ids.dtype, device=block_ids.device)
+ )
+ one_hot_block_ids = nn.functional.one_hot(block_ids.type(torch.int64), global_seq_len + 1)[:, :, :-1]
+ return torch.einsum("...nd,...ng->...gd", hidden_states, one_hot_block_ids.type(hidden_states.dtype))
+
+
+# Copied from transformers.models.t5.modeling_t5.T5LayerNorm with T5->LongT5
+class LongT5LayerNorm(nn.Module):
+ def __init__(self, hidden_size, eps=1e-6):
+ """
+ Construct a layernorm module in the LongT5 style. No bias and no subtraction of mean.
+ """
+ super().__init__()
+ self.weight = nn.Parameter(torch.ones(hidden_size))
+ self.variance_epsilon = eps
+
+ def forward(self, hidden_states):
+ # LongT5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
+ # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus varience is calculated
+ # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
+ # half-precision inputs is done in fp32
+
+ variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
+
+ # convert into half-precision if necessary
+ if self.weight.dtype in [torch.float16, torch.bfloat16]:
+ hidden_states = hidden_states.to(self.weight.dtype)
+
+ return self.weight * hidden_states
+
+
+try:
+ from apex.normalization import FusedRMSNorm
+
+ LongT5LayerNorm = FusedRMSNorm # noqa
+
+ logger.info("Discovered apex.normalization.FusedRMSNorm - will use it instead of LongT5LayerNorm")
+except ImportError:
+ # using the normal LongT5LayerNorm
+ pass
+except Exception:
+ logger.warning("discovered apex but it failed to load, falling back to LongT5LayerNorm")
+ pass
+
+ALL_LAYERNORM_LAYERS.append(LongT5LayerNorm)
+
+
+# Copied from transformers.models.t5.modeling_t5.T5DenseActDense with T5->LongT5
+class LongT5DenseActDense(nn.Module):
+ def __init__(self, config: LongT5Config):
+ super().__init__()
+ self.wi = nn.Linear(config.d_model, config.d_ff, bias=False)
+ self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
+ self.dropout = nn.Dropout(config.dropout_rate)
+ self.act = ACT2FN[config.dense_act_fn]
+
+ def forward(self, hidden_states):
+ hidden_states = self.wi(hidden_states)
+ hidden_states = self.act(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ if (
+ isinstance(self.wo.weight, torch.Tensor)
+ and hidden_states.dtype != self.wo.weight.dtype
+ and self.wo.weight.dtype != torch.int8
+ ):
+ hidden_states = hidden_states.to(self.wo.weight.dtype)
+ hidden_states = self.wo(hidden_states)
+ return hidden_states
+
+
+class LongT5DenseGatedActDense(nn.Module):
+ def __init__(self, config: LongT5Config):
+ super().__init__()
+ self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False)
+ self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False)
+ self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
+ self.dropout = nn.Dropout(config.dropout_rate)
+ self.act = ACT2FN[config.dense_act_fn]
+
+ def forward(self, hidden_states):
+ hidden_gelu = self.act(self.wi_0(hidden_states))
+ hidden_linear = self.wi_1(hidden_states)
+ hidden_states = hidden_gelu * hidden_linear
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.wo(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.t5.modeling_t5.T5LayerFF with T5->LongT5
+class LongT5LayerFF(nn.Module):
+ def __init__(self, config: LongT5Config):
+ super().__init__()
+ if config.is_gated_act:
+ self.DenseReluDense = LongT5DenseGatedActDense(config)
+ else:
+ self.DenseReluDense = LongT5DenseActDense(config)
+
+ self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
+ self.dropout = nn.Dropout(config.dropout_rate)
+
+ def forward(self, hidden_states):
+ forwarded_states = self.layer_norm(hidden_states)
+ forwarded_states = self.DenseReluDense(forwarded_states)
+ hidden_states = hidden_states + self.dropout(forwarded_states)
+ return hidden_states
+
+
+# Copied from transformers.models.t5.modeling_t5.T5Attention with T5->LongT5
+class LongT5Attention(nn.Module):
+ def __init__(
+ self,
+ config: LongT5Config,
+ has_relative_attention_bias=False,
+ layer_idx: Optional[int] = None,
+ ):
+ super().__init__()
+ self.is_decoder = config.is_decoder
+ self.has_relative_attention_bias = has_relative_attention_bias
+ self.relative_attention_num_buckets = config.relative_attention_num_buckets
+ self.relative_attention_max_distance = config.relative_attention_max_distance
+ self.d_model = config.d_model
+ self.key_value_proj_dim = config.d_kv
+ self.n_heads = config.num_heads
+ self.dropout = config.dropout_rate
+ self.inner_dim = self.n_heads * self.key_value_proj_dim
+ self.layer_idx = layer_idx
+ if layer_idx is None and self.is_decoder:
+ logger.warning_once(
+ f"Instantiating a decoder {self.__class__.__name__} without passing `layer_idx` is not recommended and "
+ "will to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
+ "when creating this class."
+ )
+
+ # Mesh TensorFlow initialization to avoid scaling before softmax
+ self.q = nn.Linear(self.d_model, self.inner_dim, bias=False)
+ self.k = nn.Linear(self.d_model, self.inner_dim, bias=False)
+ self.v = nn.Linear(self.d_model, self.inner_dim, bias=False)
+ self.o = nn.Linear(self.inner_dim, self.d_model, bias=False)
+
+ if self.has_relative_attention_bias:
+ self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)
+ self.pruned_heads = set()
+ self.gradient_checkpointing = False
+
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads
+ )
+ # Prune linear layers
+ self.q = prune_linear_layer(self.q, index)
+ self.k = prune_linear_layer(self.k, index)
+ self.v = prune_linear_layer(self.v, index)
+ self.o = prune_linear_layer(self.o, index, dim=1)
+ # Update hyper params
+ self.n_heads = self.n_heads - len(heads)
+ self.inner_dim = self.key_value_proj_dim * self.n_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ @staticmethod
+ def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
+ """
+ Adapted from Mesh Tensorflow:
+ https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
+
+ Translate relative position to a bucket number for relative attention. The relative position is defined as
+ memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
+ position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
+ small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
+ positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
+ This should allow for more graceful generalization to longer sequences than the model has been trained on
+
+ Args:
+ relative_position: an int32 Tensor
+ bidirectional: a boolean - whether the attention is bidirectional
+ num_buckets: an integer
+ max_distance: an integer
+
+ Returns:
+ a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
+ """
+ relative_buckets = 0
+ if bidirectional:
+ num_buckets //= 2
+ relative_buckets += (relative_position > 0).to(torch.long) * num_buckets
+ relative_position = torch.abs(relative_position)
+ else:
+ relative_position = -torch.min(relative_position, torch.zeros_like(relative_position))
+ # now relative_position is in the range [0, inf)
+
+ # half of the buckets are for exact increments in positions
+ max_exact = num_buckets // 2
+ is_small = relative_position < max_exact
+
+ # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
+ relative_position_if_large = max_exact + (
+ torch.log(relative_position.float() / max_exact)
+ / math.log(max_distance / max_exact)
+ * (num_buckets - max_exact)
+ ).to(torch.long)
+ relative_position_if_large = torch.min(
+ relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1)
+ )
+
+ relative_buckets += torch.where(is_small, relative_position, relative_position_if_large)
+ return relative_buckets
+
+ def compute_bias(self, query_length, key_length, device=None, cache_position=None):
+ """Compute binned relative position bias"""
+ if device is None:
+ device = self.relative_attention_bias.weight.device
+ if cache_position is None:
+ context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None]
+ else:
+ context_position = cache_position[:, None].to(device)
+ memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :]
+ relative_position = memory_position - context_position # shape (query_length, key_length)
+ relative_position_bucket = self._relative_position_bucket(
+ relative_position, # shape (query_length, key_length)
+ bidirectional=(not self.is_decoder),
+ num_buckets=self.relative_attention_num_buckets,
+ max_distance=self.relative_attention_max_distance,
+ )
+ values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads)
+ values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length)
+ return values
+
+ def forward(
+ self,
+ hidden_states,
+ mask=None,
+ key_value_states=None,
+ position_bias=None,
+ past_key_value=None,
+ layer_head_mask=None,
+ query_length=None,
+ use_cache=False,
+ output_attentions=False,
+ cache_position=None,
+ ):
+ """
+ Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states).
+ """
+ # Input is (batch_size, seq_length, dim)
+ # Mask is (batch_size, 1, 1, key_length) (non-causal encoder) or (batch_size, 1, seq_length, key_length) (causal decoder)
+ batch_size, seq_length = hidden_states.shape[:2]
+
+ # if key_value_states are provided this layer is used as a cross-attention layer for the decoder
+ is_cross_attention = key_value_states is not None
+
+ query_states = self.q(hidden_states)
+ query_states = query_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
+
+ if past_key_value is not None:
+ is_updated = past_key_value.is_updated.get(self.layer_idx)
+ if is_cross_attention:
+ # after the first generated id, we can subsequently re-use all key/value_states from cache
+ curr_past_key_value = past_key_value.cross_attention_cache
+ else:
+ curr_past_key_value = past_key_value.self_attention_cache
+
+ current_states = key_value_states if is_cross_attention else hidden_states
+ if is_cross_attention and past_key_value is not None and is_updated:
+ # reuse k,v, cross_attentions
+ key_states = curr_past_key_value.key_cache[self.layer_idx]
+ value_states = curr_past_key_value.value_cache[self.layer_idx]
+ else:
+ key_states = self.k(current_states)
+ value_states = self.v(current_states)
+ key_states = key_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
+ value_states = value_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
+
+ if past_key_value is not None:
+ # save all key/value_states to cache to be re-used for fast auto-regressive generation
+ cache_position = cache_position if not is_cross_attention else None
+ key_states, value_states = curr_past_key_value.update(
+ key_states, value_states, self.layer_idx, {"cache_position": cache_position}
+ )
+ # set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls
+ if is_cross_attention:
+ past_key_value.is_updated[self.layer_idx] = True
+
+ # compute scores, equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9
+ scores = torch.matmul(query_states, key_states.transpose(3, 2))
+
+ if position_bias is None:
+ key_length = key_states.shape[-2]
+ # cache position is 0-indexed so we add 1 to get the real length of queries (aka with past)
+ real_seq_length = query_length if query_length is not None else cache_position[-1] + 1
+ if not self.has_relative_attention_bias:
+ position_bias = torch.zeros(
+ (1, self.n_heads, seq_length, key_length), device=scores.device, dtype=scores.dtype
+ )
+ if self.gradient_checkpointing and self.training:
+ position_bias.requires_grad = True
+ else:
+ position_bias = self.compute_bias(
+ real_seq_length, key_length, device=scores.device, cache_position=cache_position
+ )
+ position_bias = position_bias[:, :, -seq_length:, :]
+
+ if mask is not None:
+ causal_mask = mask[:, :, :, : key_states.shape[-2]]
+ position_bias = position_bias + causal_mask
+
+ if self.pruned_heads:
+ mask = torch.ones(position_bias.shape[1])
+ mask[list(self.pruned_heads)] = 0
+ position_bias_masked = position_bias[:, mask.bool()]
+ else:
+ position_bias_masked = position_bias
+
+ scores += position_bias_masked
+
+ # (batch_size, n_heads, seq_length, key_length)
+ attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores)
+ attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
+
+ # Mask heads if we want to
+ if layer_head_mask is not None:
+ attn_weights = attn_weights * layer_head_mask
+
+ attn_output = torch.matmul(attn_weights, value_states)
+
+ attn_output = attn_output.transpose(1, 2).contiguous()
+ attn_output = attn_output.view(batch_size, -1, self.inner_dim)
+ attn_output = self.o(attn_output)
+
+ outputs = (attn_output, past_key_value, position_bias)
+
+ if output_attentions:
+ outputs = outputs + (attn_weights,)
+ return outputs
+
+
+class LongT5LocalAttention(nn.Module):
+ def __init__(self, config: LongT5Config, has_relative_attention_bias: bool = False) -> None:
+ super().__init__()
+ self.is_decoder = config.is_decoder
+ self.has_relative_attention_bias = has_relative_attention_bias
+ self.relative_attention_num_buckets = config.relative_attention_num_buckets
+ self.relative_attention_max_distance = config.relative_attention_max_distance
+ self.d_model = config.d_model
+ self.key_value_proj_dim = config.d_kv
+ self.n_heads = config.num_heads
+ self.local_radius = config.local_radius
+ self.block_len = self.local_radius + 1
+ self.dropout = config.dropout_rate
+ self.inner_dim = self.n_heads * self.key_value_proj_dim
+
+ # Mesh TensorFlow initialization to avoid scaling before softmax
+ self.q = nn.Linear(self.d_model, self.inner_dim, bias=False)
+ self.k = nn.Linear(self.d_model, self.inner_dim, bias=False)
+ self.v = nn.Linear(self.d_model, self.inner_dim, bias=False)
+ self.o = nn.Linear(self.inner_dim, self.d_model, bias=False)
+
+ if self.has_relative_attention_bias:
+ self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)
+ self.pruned_heads = set()
+ self.gradient_checkpointing = False
+
+ # Copied from transformers.models.t5.modeling_t5.T5Attention.prune_heads
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads
+ )
+ # Prune linear layers
+ self.q = prune_linear_layer(self.q, index)
+ self.k = prune_linear_layer(self.k, index)
+ self.v = prune_linear_layer(self.v, index)
+ self.o = prune_linear_layer(self.o, index, dim=1)
+ # Update hyper params
+ self.n_heads = self.n_heads - len(heads)
+ self.inner_dim = self.key_value_proj_dim * self.n_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ @staticmethod
+ # Copied from transformers.models.t5.modeling_t5.T5Attention._relative_position_bucket
+ def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
+ """
+ Adapted from Mesh Tensorflow:
+ https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
+
+ Translate relative position to a bucket number for relative attention. The relative position is defined as
+ memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
+ position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
+ small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
+ positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
+ This should allow for more graceful generalization to longer sequences than the model has been trained on
+
+ Args:
+ relative_position: an int32 Tensor
+ bidirectional: a boolean - whether the attention is bidirectional
+ num_buckets: an integer
+ max_distance: an integer
+
+ Returns:
+ a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
+ """
+ relative_buckets = 0
+ if bidirectional:
+ num_buckets //= 2
+ relative_buckets += (relative_position > 0).to(torch.long) * num_buckets
+ relative_position = torch.abs(relative_position)
+ else:
+ relative_position = -torch.min(relative_position, torch.zeros_like(relative_position))
+ # now relative_position is in the range [0, inf)
+
+ # half of the buckets are for exact increments in positions
+ max_exact = num_buckets // 2
+ is_small = relative_position < max_exact
+
+ # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
+ relative_position_if_large = max_exact + (
+ torch.log(relative_position.float() / max_exact)
+ / math.log(max_distance / max_exact)
+ * (num_buckets - max_exact)
+ ).to(torch.long)
+ relative_position_if_large = torch.min(
+ relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1)
+ )
+
+ relative_buckets += torch.where(is_small, relative_position, relative_position_if_large)
+ return relative_buckets
+
+ def compute_bias(self, block_length: int):
+ """Compute binned relative position bias"""
+ target_device = (
+ self.relative_attention_bias.weight.device
+ if self.relative_attention_bias.weight.device.type != "meta"
+ else None
+ )
+ memory_position = torch.arange(3 * block_length, dtype=torch.long, device=target_device)
+ context_position = memory_position[block_length:-block_length]
+
+ # (block_length, 3 * block_length)
+ relative_position = memory_position[None, :] - context_position[:, None]
+ relative_position_bucket = self._relative_position_bucket(
+ relative_position, # (block_length, 3 * block_length)
+ bidirectional=(not self.is_decoder),
+ num_buckets=self.relative_attention_num_buckets,
+ max_distance=self.relative_attention_max_distance,
+ )
+ # (block_length, 3 * block_length, num_heads)
+ values = self.relative_attention_bias(relative_position_bucket)
+ # (1, 1, num_heads, block_length, 3 * block_length)
+ values = values.permute([2, 0, 1]).unsqueeze(0).unsqueeze(0)
+ return values
+
+ def forward(
+ self,
+ hidden_states,
+ mask=None,
+ position_bias=None,
+ layer_head_mask=None,
+ output_attentions=False,
+ ):
+ batch_size, seq_length = hidden_states.shape[:2]
+
+ def shape(states):
+ """projection"""
+ return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim)
+
+ def unshape(states):
+ """reshape"""
+ return states.contiguous().view(batch_size, -1, self.inner_dim)
+
+ # get query/key/value states -> (batch_size, seq_length, n_heads, dim_per_head)
+ query_states = shape(self.q(hidden_states))
+ key_states = shape(self.k(hidden_states))
+ value_states = shape(self.v(hidden_states))
+
+ # Split into blocks -> (batch_size, num_blocks, block_len, n_heads, dim_per_head)
+ query_states = _split_into_blocks(query_states, self.block_len, dim=1)
+ key_states = _split_into_blocks(key_states, self.block_len, dim=1)
+ value_states = _split_into_blocks(value_states, self.block_len, dim=1)
+
+ # Concatenate 3 blocks for keys and values -> (batch_size, num_blocks, 3 * block_len, n_heads, dim_per_head)
+ key_states = _concatenate_3_blocks(key_states, block_dim=1, sequence_dim=2)
+ value_states = _concatenate_3_blocks(value_states, block_dim=1, sequence_dim=2)
+
+ # Compute scores
+ scores = torch.einsum(
+ "...qhd,...khd->...hqk", query_states, key_states
+ ) # (batch_size, num_block, n_heads, block_len, 3 * block_len)
+
+ if position_bias is None:
+ # position_bias shape: # (1, 1, n_heads, block_len, 3 * block_len)
+ if not self.has_relative_attention_bias:
+ position_bias = torch.zeros(
+ (1, 1, self.n_heads, self.block_len, 3 * self.block_len), device=scores.device, dtype=scores.dtype
+ )
+ if self.gradient_checkpointing and self.training:
+ position_bias.requires_grad = True
+ else:
+ position_bias = self.compute_bias(self.block_len)
+
+ if mask is not None:
+ # Replace masked positions with -1e10 (according to the original implementation)
+ mask = torch.where(mask > 0, 0.0, -1e10)
+ # We need to adjust position bias shape to be sum with mask
+ position_bias = position_bias + mask.transpose(1, 2)
+
+ scores += position_bias
+ # (batch_size, num_blocks, n_heads, block_len, 3 * block_len)
+ attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores)
+ # (batch_size, num_blocks, n_heads, block_len, 3 * block_len)
+ attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
+
+ # Mask heads if we want to
+ if layer_head_mask is not None:
+ attn_weights = attn_weights * layer_head_mask
+ attn_weights = attn_weights.type(value_states.dtype)
+ attn_output = unshape(torch.einsum("...hqk,...khd->...qhd", attn_weights, value_states))
+ attn_output = attn_output[:, :seq_length, :]
+ attn_output = self.o(attn_output)
+
+ present_key_value_state = None
+ outputs = (attn_output,) + (present_key_value_state,) + (position_bias,)
+
+ if output_attentions:
+ outputs = outputs + (attn_weights,)
+ return outputs
+
+
+class LongT5TransientGlobalAttention(nn.Module):
+ def __init__(self, config: LongT5Config, has_relative_attention_bias: bool = False) -> None:
+ super().__init__()
+ self.is_decoder = config.is_decoder
+ self.has_relative_attention_bias = has_relative_attention_bias
+ self.relative_attention_num_buckets = config.relative_attention_num_buckets
+ self.relative_attention_max_distance = config.relative_attention_max_distance
+ self.d_model = config.d_model
+ self.key_value_proj_dim = config.d_kv
+ self.n_heads = config.num_heads
+ self.local_radius = config.local_radius
+ self.block_len = self.local_radius + 1
+ self.global_block_size = config.global_block_size
+ self.dropout = config.dropout_rate
+ self.inner_dim = self.n_heads * self.key_value_proj_dim
+
+ # Mesh TensorFlow initialization to avoid scaling before softmax
+ self.q = nn.Linear(self.d_model, self.inner_dim, bias=False)
+ self.k = nn.Linear(self.d_model, self.inner_dim, bias=False)
+ self.v = nn.Linear(self.d_model, self.inner_dim, bias=False)
+ self.o = nn.Linear(self.inner_dim, self.d_model, bias=False)
+
+ if self.has_relative_attention_bias:
+ self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)
+ self.pruned_heads = set()
+
+ # Relativen attention bias & Layer norm for global attention
+ if self.has_relative_attention_bias:
+ self.global_relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)
+ self.global_input_layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
+
+ # Copied from transformers.models.t5.modeling_t5.T5Attention.prune_heads
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads
+ )
+ # Prune linear layers
+ self.q = prune_linear_layer(self.q, index)
+ self.k = prune_linear_layer(self.k, index)
+ self.v = prune_linear_layer(self.v, index)
+ self.o = prune_linear_layer(self.o, index, dim=1)
+ # Update hyper params
+ self.n_heads = self.n_heads - len(heads)
+ self.inner_dim = self.key_value_proj_dim * self.n_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ @staticmethod
+ # Copied from transformers.models.t5.modeling_t5.T5Attention._relative_position_bucket
+ def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
+ """
+ Adapted from Mesh Tensorflow:
+ https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
+
+ Translate relative position to a bucket number for relative attention. The relative position is defined as
+ memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
+ position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
+ small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
+ positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
+ This should allow for more graceful generalization to longer sequences than the model has been trained on
+
+ Args:
+ relative_position: an int32 Tensor
+ bidirectional: a boolean - whether the attention is bidirectional
+ num_buckets: an integer
+ max_distance: an integer
+
+ Returns:
+ a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
+ """
+ relative_buckets = 0
+ if bidirectional:
+ num_buckets //= 2
+ relative_buckets += (relative_position > 0).to(torch.long) * num_buckets
+ relative_position = torch.abs(relative_position)
+ else:
+ relative_position = -torch.min(relative_position, torch.zeros_like(relative_position))
+ # now relative_position is in the range [0, inf)
+
+ # half of the buckets are for exact increments in positions
+ max_exact = num_buckets // 2
+ is_small = relative_position < max_exact
+
+ # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
+ relative_position_if_large = max_exact + (
+ torch.log(relative_position.float() / max_exact)
+ / math.log(max_distance / max_exact)
+ * (num_buckets - max_exact)
+ ).to(torch.long)
+ relative_position_if_large = torch.min(
+ relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1)
+ )
+
+ relative_buckets += torch.where(is_small, relative_position, relative_position_if_large)
+ return relative_buckets
+
+ def compute_bias(self, block_length: int):
+ """Compute binned relative position bias"""
+ target_device = (
+ self.relative_attention_bias.weight.device
+ if self.relative_attention_bias.weight.device.type != "meta"
+ else None
+ )
+ memory_position = torch.arange(3 * block_length, dtype=torch.long, device=target_device)
+ context_position = memory_position[block_length:-block_length]
+
+ # (block_length, 3 * block_length)
+ relative_position = memory_position[None, :] - context_position[:, None]
+ relative_position_bucket = self._relative_position_bucket(
+ relative_position, # (block_length, 3 * block_length)
+ bidirectional=(not self.is_decoder),
+ num_buckets=self.relative_attention_num_buckets,
+ max_distance=self.relative_attention_max_distance,
+ )
+ # (block_length, 3 * block_length, num_heads)
+ values = self.relative_attention_bias(relative_position_bucket)
+ # (1, 1, num_heads, block_length, 3 * block_length)
+ values = values.permute([2, 0, 1]).unsqueeze(0).unsqueeze(0)
+ return values
+
+ def compute_side_bias(self, mask: torch.Tensor, global_segment_ids: torch.Tensor) -> torch.Tensor:
+ # (batch_size, 1, seq_len, global_seq_len)
+ side_attention_mask = torch.eq(mask[..., None], global_segment_ids[:, None, :])[:, None, ...]
+ attention_side_bias = torch.where(side_attention_mask > 0, 0.0, -1e10)
+ # (batch_size, seq_len, global_seq_len)
+ side_relative_position = _make_side_relative_position_ids(mask, self.global_block_size)
+ side_relative_position_bucket = self._relative_position_bucket(
+ side_relative_position,
+ bidirectional=(not self.is_decoder),
+ num_buckets=self.relative_attention_num_buckets,
+ max_distance=self.relative_attention_max_distance,
+ )
+ # (batch_size, seq_len, global_seq_len, num_heads)
+ side_bias = self.global_relative_attention_bias(side_relative_position_bucket)
+
+ # (batch_size, num_heads, seq_len, global_seq_len)
+ side_bias = side_bias.permute([0, 3, 1, 2])
+ # (batch_size, num_heads, seq_len, global_seq_len)
+ attention_side_bias = attention_side_bias + side_bias
+ return attention_side_bias
+
+ def forward(
+ self,
+ hidden_states,
+ mask=None,
+ position_bias=None,
+ layer_head_mask=None,
+ output_attentions=False,
+ ):
+ batch_size, seq_length = hidden_states.shape[:2]
+
+ def shape(states):
+ """projection"""
+ return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim)
+
+ def unshape(states):
+ """reshape"""
+ return states.contiguous().view(batch_size, -1, self.inner_dim)
+
+ # Prepare components for transient-global attention
+ # Obtain block_ids and global_segment_ids
+ # global_seq_len := seq_len // self.global_block_size
+ # shapes: (batch_size, seq_len) & (batch_size, global_seq_len)
+ block_ids, global_segment_ids = _make_global_fixed_block_ids(
+ mask if mask is not None else torch.ones(hidden_states.shape[:-1]),
+ self.global_block_size,
+ )
+ # Create global inputs
+ _global_seq_len = global_segment_ids.shape[-1]
+ global_inputs = _create_global_aggregates(hidden_states, block_ids, _global_seq_len)
+ global_inputs = self.global_input_layer_norm(global_inputs)
+
+ # get query states -> (batch_size, seq_length, n_heads, dim_per_head)
+ query_states = shape(self.q(hidden_states))
+ key_states = shape(self.k(hidden_states))
+ value_states = shape(self.v(hidden_states))
+ # Get global/side key/value states shape: (batch_size, global_seq_len, n_heads, dim_per_head)
+ side_key_states = shape(self.k(global_inputs))
+ side_value_states = shape(self.v(global_inputs))
+
+ # Split into blocks -> (batch_size, num_blocks, block_len, n_heads, dim_per_head)
+ query_states = _split_into_blocks(query_states, self.block_len, dim=1)
+ key_states = _split_into_blocks(key_states, self.block_len, dim=1)
+ value_states = _split_into_blocks(value_states, self.block_len, dim=1)
+
+ # Concatenate 3 blocks for keys and values -> (batch_size, num_blocks, 3 * block_len, n_heads, dim_per_head)
+ key_states = _concatenate_3_blocks(key_states, block_dim=1, sequence_dim=2)
+ value_states = _concatenate_3_blocks(value_states, block_dim=1, sequence_dim=2)
+
+ # Tile side inputs across local key/value blocks
+ # New shape: (batch_size, num_blocks, global_seq_len, n_heads, dim_per_head)
+ reps = [1] * (side_key_states.ndim + 1)
+ reps[1] = key_states.shape[1]
+ side_key_states = side_key_states.unsqueeze(1).repeat(reps)
+ side_value_states = side_value_states.unsqueeze(1).repeat(reps)
+
+ # Concatenate "local" and "side"/"global" key/value states to allow each token to attend global aggregated ones
+ # New shape: (batch_size, num_blocks, 3 * block_len + global_seq_len, n_heads, dim_per_head)
+ key_states = torch.cat([key_states, side_key_states], dim=2)
+ value_states = torch.cat([value_states, side_value_states], dim=2)
+
+ # Compute scores -> (batch_size, num_block, n_heads, block_len, 3 * block_len + global_seq_len)
+ scores = torch.einsum("...qhd,...khd->...hqk", query_states, key_states)
+
+ if mask is not None:
+ # We need to adjust position bias shape to be sum with mask
+ local_attention_mask = _get_local_attention_mask(mask, self.block_len, hidden_states.device)
+ # Replace masked positions with -10_000 (according to the original implementation)
+ local_attention_mask = torch.where(local_attention_mask > 0, 0.0, -1e10)
+ else:
+ local_attention_mask = None
+
+ if position_bias is None:
+ # position_bias shape: # (1, 1, n_heads, block_len, 3 * block_len)
+ if not self.has_relative_attention_bias:
+ position_bias = torch.zeros(
+ (1, 1, self.n_heads, self.block_len, 3 * self.block_len),
+ device=scores.device,
+ dtype=scores.dtype,
+ )
+ if self.gradient_checkpointing and self.training:
+ position_bias.requires_grad = True
+ else:
+ position_bias = self.compute_bias(self.block_len)
+
+ if local_attention_mask is not None:
+ # (batch_size, 1, n_heads, block_len, 3 * block_len)
+ position_bias = position_bias + local_attention_mask.transpose(1, 2)
+ position_bias = position_bias.type(scores.dtype)
+
+ # Calculate global/side bias - shape: # (batch_size, num_heads, seq_len, global_seq_len)
+ if mask is None:
+ mask = torch.ones(batch_size, seq_length)
+ # (batch_size, num_heads, seq_len, global_seq_len)
+ side_position_bias = self.compute_side_bias(mask, global_segment_ids)
+ # (batch_size, num_blocks, num_heads, block_len, global_seq_len)
+ side_position_bias = _split_into_blocks(side_position_bias, self.block_len, dim=-2).transpose(1, 2)
+ side_position_bias = side_position_bias.type(scores.dtype).to(scores.device)
+ # (batch_size, num_blocks, num_heads, block_len, 3 * block_len + global_seq_len)
+ position_bias = torch.cat([position_bias, side_position_bias], dim=-1)
+
+ scores += position_bias
+ # (batch_size, num_blocks, n_heads, block_len, 3 * block_len + global_seq_len)
+ attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores)
+ attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
+
+ # Mask heads if we want to
+ if layer_head_mask is not None:
+ attn_weights = attn_weights * layer_head_mask
+ attn_weights = attn_weights.type(value_states.dtype)
+ attn_output = unshape(torch.einsum("...hqk,...khd->...qhd", attn_weights, value_states))
+ attn_output = attn_output[:, :seq_length, :]
+ attn_output = self.o(attn_output)
+
+ present_key_value_state = None
+ outputs = (attn_output,) + (present_key_value_state,) + (position_bias,)
+
+ if output_attentions:
+ outputs = outputs + (attn_weights,)
+ return outputs
+
+
+# Copied from transformers.models.t5.modeling_t5.T5LayerSelfAttention with T5->LongT5
+class LongT5LayerSelfAttention(nn.Module):
+ def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int] = None):
+ super().__init__()
+ self.SelfAttention = LongT5Attention(
+ config, has_relative_attention_bias=has_relative_attention_bias, layer_idx=layer_idx
+ )
+ self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
+ self.dropout = nn.Dropout(config.dropout_rate)
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ position_bias=None,
+ layer_head_mask=None,
+ past_key_value=None,
+ use_cache=False,
+ output_attentions=False,
+ cache_position=None,
+ ):
+ normed_hidden_states = self.layer_norm(hidden_states)
+ attention_output = self.SelfAttention(
+ normed_hidden_states,
+ mask=attention_mask,
+ position_bias=position_bias,
+ layer_head_mask=layer_head_mask,
+ past_key_value=past_key_value,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ cache_position=cache_position,
+ )
+ hidden_states = hidden_states + self.dropout(attention_output[0])
+ outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them
+ return outputs
+
+
+class LongT5LayerLocalSelfAttention(nn.Module):
+ """Local self attention used in encoder"""
+
+ def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int] = None):
+ super().__init__()
+ self.LocalSelfAttention = LongT5LocalAttention(config, has_relative_attention_bias=has_relative_attention_bias)
+ self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
+ self.dropout = nn.Dropout(config.dropout_rate)
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ position_bias=None,
+ layer_head_mask=None,
+ output_attentions=False,
+ **kwargs: Any, # to accept past_key_value and use_cache kwargs
+ ):
+ normed_hidden_states = self.layer_norm(hidden_states)
+ attention_output = self.LocalSelfAttention(
+ normed_hidden_states,
+ mask=attention_mask,
+ position_bias=position_bias,
+ layer_head_mask=layer_head_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = hidden_states + self.dropout(attention_output[0])
+ outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them
+ return outputs
+
+
+class LongT5LayerTransientGlobalSelfAttention(nn.Module):
+ """Transient-Global self attention used in encoder"""
+
+ def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int] = None):
+ super().__init__()
+ self.TransientGlobalSelfAttention = LongT5TransientGlobalAttention(
+ config, has_relative_attention_bias=has_relative_attention_bias
+ )
+ self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
+ self.dropout = nn.Dropout(config.dropout_rate)
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ position_bias=None,
+ layer_head_mask=None,
+ output_attentions=False,
+ **kwargs: Any, # to accept past_key_value and use_cache kwargs
+ ):
+ normed_hidden_states = self.layer_norm(hidden_states)
+ attention_output = self.TransientGlobalSelfAttention(
+ normed_hidden_states,
+ mask=attention_mask,
+ position_bias=position_bias,
+ layer_head_mask=layer_head_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = hidden_states + self.dropout(attention_output[0])
+ outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them
+ return outputs
+
+
+# Copied from transformers.models.t5.modeling_t5.T5LayerCrossAttention with T5->LongT5
+class LongT5LayerCrossAttention(nn.Module):
+ def __init__(self, config, layer_idx: Optional[int] = None):
+ super().__init__()
+ self.EncDecAttention = LongT5Attention(config, has_relative_attention_bias=False, layer_idx=layer_idx)
+ self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
+ self.dropout = nn.Dropout(config.dropout_rate)
+
+ def forward(
+ self,
+ hidden_states,
+ key_value_states,
+ attention_mask=None,
+ position_bias=None,
+ layer_head_mask=None,
+ past_key_value=None,
+ use_cache=False,
+ query_length=None,
+ output_attentions=False,
+ cache_position=None,
+ ):
+ normed_hidden_states = self.layer_norm(hidden_states)
+ attention_output = self.EncDecAttention(
+ normed_hidden_states,
+ mask=attention_mask,
+ key_value_states=key_value_states,
+ position_bias=position_bias,
+ layer_head_mask=layer_head_mask,
+ past_key_value=past_key_value,
+ use_cache=use_cache,
+ query_length=query_length,
+ output_attentions=output_attentions,
+ cache_position=cache_position,
+ )
+ layer_output = hidden_states + self.dropout(attention_output[0])
+ outputs = (layer_output,) + attention_output[1:] # add attentions if we output them
+ return outputs
+
+
+class LongT5Block(nn.Module):
+ def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int] = None):
+ super().__init__()
+ self.is_decoder = config.is_decoder
+ if config.is_decoder:
+ attention_layer = LongT5LayerSelfAttention
+ elif config.encoder_attention_type == "local":
+ attention_layer = LongT5LayerLocalSelfAttention
+ elif config.encoder_attention_type == "transient-global":
+ attention_layer = LongT5LayerTransientGlobalSelfAttention
+ else:
+ raise ValueError(
+ "For encoder attention mechanism, either `local` or `transient-global` attention type is expected, "
+ f"but got {config.encoder_attention_type}."
+ )
+ self.layer = nn.ModuleList()
+ self.layer.append(
+ attention_layer(config, has_relative_attention_bias=has_relative_attention_bias, layer_idx=layer_idx)
+ )
+ if self.is_decoder:
+ self.layer.append(LongT5LayerCrossAttention(config, layer_idx=layer_idx))
+
+ self.layer.append(LongT5LayerFF(config))
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ position_bias=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ encoder_decoder_position_bias=None,
+ layer_head_mask=None,
+ cross_attn_layer_head_mask=None,
+ past_key_value=None,
+ use_cache=False,
+ output_attentions=False,
+ return_dict=True,
+ cache_position=None,
+ ):
+ self_attention_outputs = self.layer[0](
+ hidden_states,
+ attention_mask=attention_mask,
+ position_bias=position_bias,
+ layer_head_mask=layer_head_mask,
+ past_key_value=past_key_value,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ cache_position=cache_position,
+ )
+ hidden_states, past_key_value = self_attention_outputs[:2]
+ attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs and relative position weights
+
+ # clamp inf values to enable fp16 inference - check https://github.com/huggingface/transformers/pull/19229/
+ if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
+ clamp_value = torch.finfo(hidden_states.dtype).max - 1000
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
+
+ do_cross_attention = self.is_decoder and encoder_hidden_states is not None
+ if do_cross_attention:
+ cross_attention_outputs = self.layer[1](
+ hidden_states,
+ key_value_states=encoder_hidden_states,
+ attention_mask=encoder_attention_mask,
+ position_bias=encoder_decoder_position_bias,
+ layer_head_mask=cross_attn_layer_head_mask,
+ past_key_value=past_key_value,
+ query_length=cache_position[-1] + 1,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ cache_position=cache_position,
+ )
+ hidden_states, past_key_value = cross_attention_outputs[:2]
+
+ # clamp inf values to enable fp16 inference - check https://github.com/huggingface/transformers/pull/19229/
+ if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
+ clamp_value = torch.finfo(hidden_states.dtype).max - 1000
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
+
+ # Keep cross-attention outputs and relative position weights
+ attention_outputs = attention_outputs + cross_attention_outputs[2:]
+
+ # Apply Feed Forward layer
+ hidden_states = self.layer[-1](hidden_states)
+
+ # clamp inf values to enable fp16 inference - check https://github.com/huggingface/transformers/pull/19229/
+ if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
+ clamp_value = torch.finfo(hidden_states.dtype).max - 1000
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
+
+ outputs = (hidden_states,)
+
+ if use_cache:
+ outputs = outputs + (past_key_value,) + attention_outputs
+ else:
+ outputs = outputs + attention_outputs
+
+ return outputs # hidden-states, present_key_value_states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights)
+
+
+class LongT5PreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = LongT5Config
+ base_model_prefix = "transformer"
+ supports_gradient_checkpointing = True
+ _no_split_modules = ["LongT5Block"]
+ _supports_cache_class = True
+ _supports_static_cache = False # TODO: @raushan more involved due to local/global attn
+
+ @property
+ # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel.dummy_inputs
+ def dummy_inputs(self):
+ input_ids = torch.tensor(DUMMY_INPUTS)
+ input_mask = torch.tensor(DUMMY_MASK)
+ dummy_inputs = {
+ "decoder_input_ids": input_ids,
+ "input_ids": input_ids,
+ "decoder_attention_mask": input_mask,
+ }
+ return dummy_inputs
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ factor = self.config.initializer_factor # Used for testing weights initialization
+ if isinstance(module, LongT5LayerNorm):
+ module.weight.data.fill_(factor * 1.0)
+ elif isinstance(module, (LongT5Model, LongT5ForConditionalGeneration, LongT5EncoderModel)):
+ # Mesh TensorFlow embeddings initialization
+ # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624
+ module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0)
+ if hasattr(module, "lm_head") and not self.config.tie_word_embeddings:
+ module.lm_head.weight.data.normal_(mean=0.0, std=factor * 1.0)
+ elif isinstance(module, LongT5DenseActDense):
+ # Mesh TensorFlow FF initialization
+ # See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56
+ # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89
+ module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
+ if hasattr(module.wi, "bias") and module.wi.bias is not None:
+ module.wi.bias.data.zero_()
+ module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5))
+ if hasattr(module.wo, "bias") and module.wo.bias is not None:
+ module.wo.bias.data.zero_()
+ elif isinstance(module, LongT5DenseGatedActDense):
+ module.wi_0.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
+ if hasattr(module.wi_0, "bias") and module.wi_0.bias is not None:
+ module.wi_0.bias.data.zero_()
+ module.wi_1.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
+ if hasattr(module.wi_1, "bias") and module.wi_1.bias is not None:
+ module.wi_1.bias.data.zero_()
+ module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5))
+ if hasattr(module.wo, "bias") and module.wo.bias is not None:
+ module.wo.bias.data.zero_()
+ elif isinstance(module, (LongT5Attention, LongT5LocalAttention, LongT5TransientGlobalAttention)):
+ # Mesh TensorFlow attention initialization to avoid scaling before softmax
+ # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136
+ d_model = self.config.d_model
+ key_value_proj_dim = self.config.d_kv
+ n_heads = self.config.num_heads
+ module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5))
+ module.k.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5))
+ module.v.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5))
+ module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5))
+ if module.has_relative_attention_bias:
+ module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model) ** -0.5))
+ if isinstance(module, LongT5TransientGlobalAttention):
+ module.global_relative_attention_bias.weight.data.normal_(
+ mean=0.0, std=factor * ((d_model) ** -0.5)
+ )
+
+ # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._shift_right with T5->LongT5
+ def _shift_right(self, input_ids):
+ decoder_start_token_id = self.config.decoder_start_token_id
+ pad_token_id = self.config.pad_token_id
+
+ if decoder_start_token_id is None:
+ raise ValueError(
+ "self.model.config.decoder_start_token_id has to be defined. In LongT5 it is usually set to the pad_token_id. "
+ "See LongT5 docs for more information."
+ )
+
+ # shift inputs to the right
+ if is_torch_fx_proxy(input_ids):
+ # Item assignment is not supported natively for proxies.
+ shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id)
+ shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1)
+ else:
+ shifted_input_ids = input_ids.new_zeros(input_ids.shape)
+ shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
+ shifted_input_ids[..., 0] = decoder_start_token_id
+
+ if pad_token_id is None:
+ raise ValueError("self.model.config.pad_token_id has to be defined.")
+ # replace possible -100 values in labels by `pad_token_id`
+ shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
+
+ return shifted_input_ids
+
+
+class LongT5Stack(LongT5PreTrainedModel):
+ def __init__(self, config, embed_tokens=None):
+ super().__init__(config)
+
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model)
+ if embed_tokens is not None:
+ self.embed_tokens.weight = embed_tokens.weight
+ self.is_decoder = config.is_decoder
+
+ self.local_radius = config.local_radius
+ self.block_len = self.local_radius + 1
+
+ self.block = nn.ModuleList(
+ [
+ LongT5Block(config, has_relative_attention_bias=bool(i == 0), layer_idx=i)
+ for i in range(config.num_layers)
+ ]
+ )
+ self.final_layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
+ self.dropout = nn.Dropout(config.dropout_rate)
+
+ self.gradient_checkpointing = False
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ # Copied from transformers.models.t5.modeling_t5.T5Stack.get_input_embeddings
+ def get_input_embeddings(self):
+ return self.embed_tokens
+
+ # Copied from transformers.models.t5.modeling_t5.T5Stack.set_input_embeddings
+ def set_input_embeddings(self, new_embeddings):
+ self.embed_tokens = new_embeddings
+
+ def forward(
+ self,
+ input_ids=None,
+ attention_mask=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ inputs_embeds=None,
+ head_mask=None,
+ cross_attn_head_mask=None,
+ past_key_values=None,
+ use_cache=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ cache_position=None,
+ ):
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is not None and inputs_embeds is not None:
+ err_msg_prefix = "decoder_" if self.is_decoder else ""
+ raise ValueError(
+ f"You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time"
+ )
+ elif input_ids is not None:
+ input_shape = input_ids.size()
+ input_ids = input_ids.view(-1, input_shape[-1])
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ err_msg_prefix = "decoder_" if self.is_decoder else ""
+ raise ValueError(f"You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds")
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ if inputs_embeds is None:
+ assert self.embed_tokens is not None, "You have to initialize the model with valid token embeddings"
+ inputs_embeds = self.embed_tokens(input_ids)
+
+ batch_size, seq_length = input_shape
+
+ # initialize past_key_values
+ return_legacy_cache = False
+ return_self_attention_cache = False
+ if self.is_decoder and (use_cache or past_key_values is not None):
+ if isinstance(past_key_values, Cache) and not isinstance(past_key_values, EncoderDecoderCache):
+ return_self_attention_cache = True
+ past_key_values = EncoderDecoderCache(past_key_values, DynamicCache())
+ elif not isinstance(past_key_values, EncoderDecoderCache):
+ return_legacy_cache = True
+ logger.warning_once(
+ "Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.48.0. "
+ "You should pass an instance of `EncoderDecoderCache` instead, e.g. "
+ "`past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`."
+ )
+ past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values)
+ elif past_key_values is None:
+ past_key_values = EncoderDecoderCache(DynamicCache(), DynamicCache())
+ elif not self.is_decoder:
+ # do not pass cache object down the line for encoder stack
+ # it messes indexing later in decoder-stack because cache object is modified in-place
+ past_key_values = None
+
+ past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
+ if cache_position is None:
+ cache_position = torch.arange(
+ past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device
+ )
+
+ if attention_mask is None and not is_torchdynamo_compiling():
+ # required mask seq length can be calculated via length of past
+ mask_seq_length = past_key_values_length + seq_length
+ attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device)
+
+ if self.is_decoder:
+ causal_mask = self._update_causal_mask(
+ attention_mask,
+ inputs_embeds,
+ cache_position,
+ past_key_values.self_attention_cache if past_key_values is not None else None,
+ output_attentions,
+ )
+ # We use local attention in encoder self-attention, otherwise standard self & cross attentions are used
+ elif self.config.encoder_attention_type == "local":
+ causal_mask = _get_local_attention_mask(attention_mask, self.block_len, inputs_embeds.device)
+ else: # we need to use both local attention mask and standard extended mask for transient-global attention
+ causal_mask = attention_mask
+
+ # If a 2D or 3D attention mask is provided for the cross-attention
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
+ if self.is_decoder and encoder_hidden_states is not None:
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
+ if encoder_attention_mask is None:
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device)
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
+ else:
+ encoder_extended_attention_mask = None
+
+ # Prepare head mask if needed
+ head_mask = self.get_head_mask(head_mask, self.config.num_layers)
+ cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers)
+ all_hidden_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+ all_cross_attentions = () if (output_attentions and self.is_decoder) else None
+ position_bias = None
+ encoder_decoder_position_bias = None
+
+ hidden_states = self.dropout(inputs_embeds)
+
+ for i, layer_module in enumerate(self.block):
+ layer_head_mask = head_mask[i]
+ cross_attn_layer_head_mask = cross_attn_head_mask[i]
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.forward,
+ hidden_states,
+ causal_mask,
+ position_bias,
+ encoder_hidden_states,
+ encoder_extended_attention_mask,
+ encoder_decoder_position_bias,
+ layer_head_mask,
+ cross_attn_layer_head_mask,
+ None, # past_key_value is always None with gradient checkpointing
+ use_cache,
+ output_attentions,
+ return_dict,
+ cache_position,
+ )
+ else:
+ layer_outputs = layer_module(
+ hidden_states,
+ attention_mask=causal_mask,
+ position_bias=position_bias,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_extended_attention_mask,
+ encoder_decoder_position_bias=encoder_decoder_position_bias,
+ layer_head_mask=layer_head_mask,
+ cross_attn_layer_head_mask=cross_attn_layer_head_mask,
+ past_key_value=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ return_dict=return_dict,
+ cache_position=cache_position,
+ )
+
+ # layer_outputs is a tuple with:
+ # hidden-states, key-value-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights)
+ if use_cache is False:
+ layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:]
+
+ hidden_states, next_decoder_cache = layer_outputs[:2]
+
+ # We share the position biases between the layers - the first layer store them
+ # layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights),
+ # (cross-attention position bias), (cross-attention weights)
+ position_bias = layer_outputs[2]
+ if self.is_decoder and encoder_hidden_states is not None:
+ encoder_decoder_position_bias = layer_outputs[4 if output_attentions else 3]
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[3],)
+ if self.is_decoder:
+ all_cross_attentions = all_cross_attentions + (layer_outputs[5],)
+
+ hidden_states = self.final_layer_norm(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+
+ # Add last layer
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ next_cache = next_decoder_cache if use_cache else None
+ if return_self_attention_cache:
+ next_cache = past_key_values.self_attention_cache
+ if return_legacy_cache:
+ next_cache = past_key_values.to_legacy_cache()
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [
+ hidden_states,
+ next_cache,
+ all_hidden_states,
+ all_attentions,
+ all_cross_attentions,
+ ]
+ if v is not None
+ )
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=next_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_attentions,
+ cross_attentions=all_cross_attentions,
+ )
+
+ # Copied from transformers.models.llama.modeling_llama.LlamaModel._update_causal_mask
+ def _update_causal_mask(
+ self,
+ attention_mask: torch.Tensor,
+ input_tensor: torch.Tensor,
+ cache_position: torch.Tensor,
+ past_key_values: Cache,
+ output_attentions: bool,
+ ):
+ if self.config._attn_implementation == "flash_attention_2":
+ if attention_mask is not None and (attention_mask == 0.0).any():
+ return attention_mask
+ return None
+
+ # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
+ # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
+ # to infer the attention mask.
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
+ using_static_cache = isinstance(past_key_values, StaticCache)
+
+ # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
+ if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions:
+ if AttentionMaskConverter._ignore_causal_mask_sdpa(
+ attention_mask,
+ inputs_embeds=input_tensor,
+ past_key_values_length=past_seen_tokens,
+ is_training=self.training,
+ ):
+ return None
+
+ dtype, device = input_tensor.dtype, input_tensor.device
+ sequence_length = input_tensor.shape[1]
+ if using_static_cache:
+ target_length = past_key_values.get_max_cache_shape()
+ else:
+ target_length = (
+ attention_mask.shape[-1]
+ if isinstance(attention_mask, torch.Tensor)
+ else past_seen_tokens + sequence_length + 1
+ )
+
+ # In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
+ causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
+ attention_mask,
+ sequence_length=sequence_length,
+ target_length=target_length,
+ dtype=dtype,
+ device=device,
+ cache_position=cache_position,
+ batch_size=input_tensor.shape[0],
+ )
+
+ if (
+ self.config._attn_implementation == "sdpa"
+ and attention_mask is not None
+ and attention_mask.device.type == "cuda"
+ and not output_attentions
+ ):
+ # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
+ # Details: https://github.com/pytorch/pytorch/issues/110213
+ min_dtype = torch.finfo(dtype).min
+ causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
+
+ return causal_mask
+
+ @staticmethod
+ # Copied from transformers.models.llama.modeling_llama.LlamaPreTrainedModel._prepare_4d_causal_attention_mask_with_cache_position
+ def _prepare_4d_causal_attention_mask_with_cache_position(
+ attention_mask: torch.Tensor,
+ sequence_length: int,
+ target_length: int,
+ dtype: torch.dtype,
+ device: torch.device,
+ cache_position: torch.Tensor,
+ batch_size: int,
+ **kwargs,
+ ):
+ """
+ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
+ `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
+
+ Args:
+ attention_mask (`torch.Tensor`):
+ A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
+ `(batch_size, 1, query_length, key_value_length)`.
+ sequence_length (`int`):
+ The sequence length being processed.
+ target_length (`int`):
+ The target length: when generating with static cache, the mask should be as long as the static cache,
+ to account for the 0 padding, the part of the cache that is not filled yet.
+ dtype (`torch.dtype`):
+ The dtype to use for the 4D attention mask.
+ device (`torch.device`):
+ The device to plcae the 4D attention mask on.
+ cache_position (`torch.Tensor`):
+ Indices depicting the position of the input sequence tokens in the sequence.
+ batch_size (`torch.Tensor`):
+ Batch size.
+ """
+ if attention_mask is not None and attention_mask.dim() == 4:
+ # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
+ causal_mask = attention_mask
+ else:
+ min_dtype = torch.finfo(dtype).min
+ causal_mask = torch.full(
+ (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device
+ )
+ if sequence_length != 1:
+ causal_mask = torch.triu(causal_mask, diagonal=1)
+ causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
+ causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
+ if attention_mask is not None:
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
+ mask_length = attention_mask.shape[-1]
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
+ padding_mask = padding_mask == 0
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
+ padding_mask, min_dtype
+ )
+
+ return causal_mask
+
+
+LONGT5_START_DOCSTRING = r"""
+
+ The LongT5 model was proposed in [LongT5: Efficient Text-To-Text Transformer for Long
+ Sequences](https://arxiv.org/abs/2112.07916) by Mandy Guo, Joshua Ainslie, David Uthus, Santiago Ontanon, Jianmo
+ Ni, Yun-Hsuan Sung and Yinfei Yang. It's an encoder-decoder transformer pre-trained in a text-to-text denoising
+ generative setting. LongT5 model is an extension of T5 model, and it enables using one of the two different
+ efficient attention mechanisms - (1) Local attention, or (2) Transient-Global attention.
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`LongT5Config`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+LONGT5_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. LongT5 is a model with relative position embeddings so
+ you should be able to pad the inputs on both the right and the left.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for detail.
+
+ [What are input IDs?](../glossary#input-ids)
+
+ To know more on how to prepare `input_ids` for pretraining take a look a [LONGT5
+ Training](./longt5#training).
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Indices of decoder input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
+
+ LONGT5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
+ `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
+ `past_key_values`).
+
+ To know more on how to prepare `decoder_input_ids` for pretraining take a look at [LONGT5
+ Training](./longt5#training).
+ decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
+ be used by default.
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in `[0,
+ 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0,
+ 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in
+ `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
+ Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*)
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at
+ the output of the last layer of the encoder. Used in the cross-attention of the decoder.
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
+ representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
+ input (see `past_key_values`). This is useful if you want more control over how to convert
+ `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
+
+ If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
+ of `inputs_embeds`.
+
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
+ Indices depicting the position of the input sequence tokens in the sequence. It is used to update the
+ cache in the correct position and to infer the complete sequence length.
+"""
+
+LONGT5_ENCODER_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. LongT5 is a model with relative position embeddings so
+ you should be able to pad the inputs on both the right and the left.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for detail.
+
+ To know more on how to prepare `input_ids` for pretraining take a look a [LONGT5
+ Training](./longt5#training).
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+# Warning message for FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask
+__HEAD_MASK_WARNING_MSG = """
+The input argument `head_mask` was split into two arguments `head_mask` and `decoder_head_mask`. Currently,
+`decoder_head_mask` is set to copy `head_mask`, but this feature is deprecated and will be removed in future versions.
+If you do not want to use any `decoder_head_mask` now, please set `decoder_head_mask = torch.ones(num_layers,
+num_heads)`.
+"""
+
+
+@add_start_docstrings(
+ "The bare LONGT5 Model transformer outputting raw hidden-states without any specific head on top.",
+ LONGT5_START_DOCSTRING,
+)
+class LongT5Model(LongT5PreTrainedModel):
+ _keys_to_ignore_on_load_unexpected = [
+ r"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight",
+ ]
+ _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"]
+
+ def __init__(self, config: LongT5Config):
+ super().__init__(config)
+ self.shared = nn.Embedding(config.vocab_size, config.d_model)
+
+ encoder_config = copy.deepcopy(config)
+ encoder_config.is_decoder = False
+ encoder_config.use_cache = False
+ encoder_config.is_encoder_decoder = False
+ self.encoder = LongT5Stack(encoder_config, self.shared)
+
+ decoder_config = copy.deepcopy(config)
+ decoder_config.is_decoder = True
+ decoder_config.is_encoder_decoder = False
+ decoder_config.num_layers = config.num_decoder_layers
+ self.decoder = LongT5Stack(decoder_config, self.shared)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.shared
+
+ def set_input_embeddings(self, new_embeddings):
+ self.shared = new_embeddings
+ self.encoder.set_input_embeddings(new_embeddings)
+ self.decoder.set_input_embeddings(new_embeddings)
+
+ def _tie_weights(self):
+ if self.config.tie_word_embeddings:
+ self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared)
+ self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared)
+
+ def get_encoder(self):
+ return self.encoder
+
+ def get_decoder(self):
+ return self.decoder
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(LONGT5_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ decoder_head_mask: Optional[torch.FloatTensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ decoder_inputs_embeds: Optional[torch.Tensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ ) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]:
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, LongT5Model
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/long-t5-local-base")
+ >>> model = LongT5Model.from_pretrained("google/long-t5-local-base")
+
+ >>> # Let's try a very long encoder input.
+ >>> input_ids = tokenizer(
+ ... 100 * "Studies have been shown that owning a dog is good for you", return_tensors="pt"
+ ... ).input_ids # Batch size 1
+
+ >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
+
+ >>> # forward pass
+ >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
+ >>> last_hidden_states = outputs.last_hidden_state
+ ```"""
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask
+ if head_mask is not None and decoder_head_mask is None:
+ if self.config.num_layers == self.config.num_decoder_layers:
+ warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning)
+ decoder_head_mask = head_mask
+
+ # Encode if needed (training, first prediction pass)
+ if encoder_outputs is None:
+ encoder_outputs = self.encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ inputs_embeds=inputs_embeds,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
+ encoder_outputs = BaseModelOutput(
+ last_hidden_state=encoder_outputs[0],
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
+ )
+
+ hidden_states = encoder_outputs[0]
+
+ # Decode
+ decoder_outputs = self.decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ inputs_embeds=decoder_inputs_embeds,
+ past_key_values=past_key_values,
+ encoder_hidden_states=hidden_states,
+ encoder_attention_mask=attention_mask,
+ head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ cache_position=cache_position,
+ )
+
+ if not return_dict:
+ return decoder_outputs + encoder_outputs
+
+ return Seq2SeqModelOutput(
+ last_hidden_state=decoder_outputs.last_hidden_state,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+
+@add_start_docstrings("""LONGT5 Model with a `language modeling` head on top.""", LONGT5_START_DOCSTRING)
+class LongT5ForConditionalGeneration(LongT5PreTrainedModel, GenerationMixin):
+ _keys_to_ignore_on_load_unexpected = [
+ r"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight",
+ ]
+ _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "lm_head.weight"]
+
+ def __init__(self, config: LongT5Config):
+ super().__init__(config)
+ self.model_dim = config.d_model
+
+ self.shared = nn.Embedding(config.vocab_size, config.d_model)
+
+ encoder_config = copy.deepcopy(config)
+ encoder_config.is_decoder = False
+ encoder_config.use_cache = False
+ encoder_config.is_encoder_decoder = False
+ self.encoder = LongT5Stack(encoder_config, self.shared)
+
+ decoder_config = copy.deepcopy(config)
+ decoder_config.is_decoder = True
+ decoder_config.is_encoder_decoder = False
+ decoder_config.num_layers = config.num_decoder_layers
+ self.decoder = LongT5Stack(decoder_config, self.shared)
+
+ self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.shared
+
+ def set_input_embeddings(self, new_embeddings):
+ self.shared = new_embeddings
+ self.encoder.set_input_embeddings(new_embeddings)
+ self.decoder.set_input_embeddings(new_embeddings)
+
+ def _tie_weights(self):
+ if self.config.tie_word_embeddings:
+ self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared)
+ self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared)
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def get_encoder(self):
+ return self.encoder
+
+ def get_decoder(self):
+ return self.decoder
+
+ @add_start_docstrings_to_model_forward(LONGT5_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ decoder_head_mask: Optional[torch.FloatTensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for
+ labels in `[0, ..., config.vocab_size]`
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer, LongT5ForConditionalGeneration
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("Stancld/longt5-tglobal-large-16384-pubmed-3k_steps")
+ >>> model = LongT5ForConditionalGeneration.from_pretrained(
+ ... "Stancld/longt5-tglobal-large-16384-pubmed-3k_steps"
+ ... )
+
+ >>> # Let's try a very long input.
+ >>> inputs = tokenizer(100 * "studies have shown that owning a dog is good for you ", return_tensors="pt")
+ >>> input_ids = inputs.input_ids
+
+ >>> outputs = model.generate(input_ids)
+ >>> print(tokenizer.decode(outputs[0], skip_special_tokens=True))
+ abstractthe aim of this article is to provide an overview of the literature on the role of dog
+ ```"""
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask
+ if head_mask is not None and decoder_head_mask is None:
+ if self.config.num_layers == self.config.num_decoder_layers:
+ warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning)
+ decoder_head_mask = head_mask
+
+ # Encode if needed (training, first prediction pass)
+ if encoder_outputs is None:
+ # Convert encoder inputs in embeddings if needed
+ encoder_outputs = self.encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ inputs_embeds=inputs_embeds,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
+ encoder_outputs = BaseModelOutput(
+ last_hidden_state=encoder_outputs[0],
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
+ )
+
+ hidden_states = encoder_outputs[0]
+
+ if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:
+ # get decoder inputs from shifting lm labels to the right
+ decoder_input_ids = self._shift_right(labels)
+
+ # Decode
+ decoder_outputs = self.decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ inputs_embeds=decoder_inputs_embeds,
+ past_key_values=past_key_values,
+ encoder_hidden_states=hidden_states,
+ encoder_attention_mask=attention_mask,
+ head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ cache_position=cache_position,
+ )
+
+ sequence_output = decoder_outputs[0]
+
+ if self.config.tie_word_embeddings:
+ # Rescale output before projecting on vocab
+ # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586
+ sequence_output = sequence_output * (self.model_dim**-0.5)
+
+ lm_logits = self.lm_head(sequence_output)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss(ignore_index=-100)
+
+ labels = labels.to(lm_logits.device)
+ loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))
+ # TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666
+
+ if not return_dict:
+ output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs
+ return ((loss,) + output) if loss is not None else output
+
+ return Seq2SeqLMOutput(
+ loss=loss,
+ logits=lm_logits,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+ def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
+ return self._shift_right(labels)
+
+ def _reorder_cache(self, past_key_values, beam_idx):
+ # if decoder past is not included in output
+ # speedy decoding is disabled and no need to reorder
+ if past_key_values is None:
+ logger.warning("You might want to consider setting `use_cache=True` to speed up decoding")
+ return past_key_values
+
+ reordered_decoder_past = ()
+ for layer_past_states in past_key_values:
+ # get the correct batch idx from layer past batch dim
+ # batch dim of `past` is at 2nd position
+ reordered_layer_past_states = ()
+ for layer_past_state in layer_past_states:
+ # need to set correct `past` for each of the four key / value states
+ reordered_layer_past_states = reordered_layer_past_states + (
+ layer_past_state.index_select(0, beam_idx.to(layer_past_state.device)),
+ )
+
+ assert reordered_layer_past_states[0].shape == layer_past_states[0].shape
+ assert len(reordered_layer_past_states) == len(layer_past_states)
+
+ reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,)
+ return reordered_decoder_past
+
+
+@add_start_docstrings(
+ "The bare LONGT5 Model transformer outputting encoder's raw hidden-states without any specific head on top.",
+ LONGT5_START_DOCSTRING,
+)
+class LongT5EncoderModel(LongT5PreTrainedModel):
+ _tied_weights_keys = ["encoder.embed_tokens.weight"]
+ _keys_to_ignore_on_load_unexpected = [r"decoder"]
+
+ def __init__(self, config: LongT5Config):
+ super().__init__(config)
+ self.shared = nn.Embedding(config.vocab_size, config.d_model)
+
+ encoder_config = copy.deepcopy(config)
+ encoder_config.use_cache = False
+ encoder_config.is_encoder_decoder = False
+ self.encoder = LongT5Stack(encoder_config, self.shared)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.shared
+
+ def set_input_embeddings(self, new_embeddings):
+ self.shared = new_embeddings
+ self.encoder.set_input_embeddings(new_embeddings)
+
+ def _tie_weights(self):
+ if self.config.tie_word_embeddings:
+ self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared)
+
+ def get_encoder(self):
+ return self.encoder
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(LONGT5_ENCODER_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.FloatTensor], BaseModelOutput]:
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, LongT5ForConditionalGeneration
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/long-t5-local-base")
+ >>> model = LongT5EncoderModel.from_pretrained("google/long-t5-local-base")
+ >>> input_ids = tokenizer(
+ ... 100 * "Studies have been shown that owning a dog is good for you ", return_tensors="pt"
+ ... ).input_ids # Batch size 1
+ >>> outputs = model(input_ids=input_ids)
+ >>> last_hidden_states = outputs.last_hidden_state
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ encoder_outputs = self.encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ inputs_embeds=inputs_embeds,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ return encoder_outputs
+
+
+__all__ = ["LongT5EncoderModel", "LongT5ForConditionalGeneration", "LongT5Model", "LongT5PreTrainedModel"]
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/mt5/__init__.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/mt5/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..444a8f8cc8e0203a4cd0151c6bff0644e4e3d6b6
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/mt5/__init__.py
@@ -0,0 +1,30 @@
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import _LazyModule
+from ...utils.import_utils import define_import_structure
+
+
+if TYPE_CHECKING:
+ from .configuration_mt5 import *
+ from .modeling_flax_mt5 import *
+ from .modeling_mt5 import *
+ from .modeling_tf_mt5 import *
+ from .tokenization_mt5 import *
+else:
+ import sys
+
+ _file = globals()["__file__"]
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/mt5/__pycache__/__init__.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/mt5/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6e41c6467e55c44ce09aec90c6db9d12a1b21afd
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/mt5/__pycache__/__init__.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/mt5/__pycache__/configuration_mt5.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/mt5/__pycache__/configuration_mt5.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1fa3a5781ef82d39aa5185fb1fa41861834209cf
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/mt5/__pycache__/configuration_mt5.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/mt5/__pycache__/modeling_flax_mt5.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/mt5/__pycache__/modeling_flax_mt5.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..290344906306832d2023f22fe17c2b82f55e4467
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/mt5/__pycache__/modeling_flax_mt5.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/mt5/__pycache__/modeling_mt5.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/mt5/__pycache__/modeling_mt5.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e86141285a2941caa6451bbff8a51451d08f5718
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/mt5/__pycache__/modeling_mt5.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/mt5/__pycache__/modeling_tf_mt5.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/mt5/__pycache__/modeling_tf_mt5.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1c6ec5ee5885a667c0e514943b7358f3aefdf242
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/mt5/__pycache__/modeling_tf_mt5.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/mt5/__pycache__/tokenization_mt5.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/mt5/__pycache__/tokenization_mt5.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4a2fdd21c6f9f5cf863a174cd67e82afd760bcb6
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/mt5/__pycache__/tokenization_mt5.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/mt5/__pycache__/tokenization_mt5_fast.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/mt5/__pycache__/tokenization_mt5_fast.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7c8579b6540c7b7c14c92b3239294fcc034b897e
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/mt5/__pycache__/tokenization_mt5_fast.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/mt5/configuration_mt5.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/mt5/configuration_mt5.py
new file mode 100644
index 0000000000000000000000000000000000000000..8b903b908e42f0682b266f0a192a9f4528d8e1a0
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/mt5/configuration_mt5.py
@@ -0,0 +1,182 @@
+# coding=utf-8
+# Copyright 2020, The T5 Authors and HuggingFace Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""mT5 model configuration"""
+
+from typing import Mapping
+
+from ...configuration_utils import PretrainedConfig
+from ...onnx import OnnxSeq2SeqConfigWithPast
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+class MT5Config(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`MT5Model`] or a [`TFMT5Model`]. It is used to
+ instantiate a mT5 model according to the specified arguments, defining the model architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the mT5
+ [google/mt5-small](https://huggingface.co/google/mt5-small) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Arguments:
+ vocab_size (`int`, *optional*, defaults to 250112):
+ Vocabulary size of the T5 model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`T5Model`] or [`TFT5Model`].
+ d_model (`int`, *optional*, defaults to 512):
+ Size of the encoder layers and the pooler layer.
+ d_kv (`int`, *optional*, defaults to 64):
+ Size of the key, query, value projections per attention head. In the conventional context, it is typically expected that `d_kv` has to be equal to `d_model // num_heads`.
+ But in the architecture of mt5-small, `d_kv` is not equal to `d_model //num_heads`. The `inner_dim` of the projection layer will be defined as `num_heads * d_kv`.
+ d_ff (`int`, *optional*, defaults to 1024):
+ Size of the intermediate feed forward layer in each `T5Block`.
+ num_layers (`int`, *optional*, defaults to 8):
+ Number of hidden layers in the Transformer encoder.
+ num_decoder_layers (`int`, *optional*):
+ Number of hidden layers in the Transformer decoder. Will use the same value as `num_layers` if not set.
+ num_heads (`int`, *optional*, defaults to 6):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ relative_attention_num_buckets (`int`, *optional*, defaults to 32):
+ The number of buckets to use for each attention layer.
+ relative_attention_max_distance (`int`, *optional*, defaults to 128):
+ The maximum distance of the longer sequences for the bucket separation.
+ dropout_rate (`float`, *optional*, defaults to 0.1):
+ The ratio for all dropout layers.
+ classifier_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for classifier.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-6):
+ The epsilon used by the layer normalization layers.
+ initializer_factor (`float`, *optional*, defaults to 1):
+ A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
+ testing).
+ feed_forward_proj (`string`, *optional*, defaults to `"gated-gelu"`):
+ Type of feed forward layer to be used. Should be one of `"relu"` or `"gated-gelu"`.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models).
+ """
+
+ model_type = "mt5"
+ keys_to_ignore_at_inference = ["past_key_values"]
+ attribute_map = {
+ "hidden_size": "d_model",
+ "num_attention_heads": "num_heads",
+ "num_hidden_layers": "num_layers",
+ "head_dim": "d_kv",
+ }
+
+ def __init__(
+ self,
+ vocab_size=250112,
+ d_model=512,
+ d_kv=64,
+ d_ff=1024,
+ num_layers=8,
+ num_decoder_layers=None,
+ num_heads=6,
+ relative_attention_num_buckets=32,
+ relative_attention_max_distance=128,
+ dropout_rate=0.1,
+ layer_norm_epsilon=1e-6,
+ initializer_factor=1.0,
+ feed_forward_proj="gated-gelu",
+ is_encoder_decoder=True,
+ use_cache=True,
+ tokenizer_class="T5Tokenizer",
+ tie_word_embeddings=False,
+ pad_token_id=0,
+ eos_token_id=1,
+ decoder_start_token_id=0,
+ classifier_dropout=0.0,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.d_model = d_model
+ self.d_kv = d_kv
+ self.d_ff = d_ff
+ self.num_layers = num_layers
+ self.num_decoder_layers = (
+ num_decoder_layers if num_decoder_layers is not None else self.num_layers
+ ) # default = symmetry
+ self.num_heads = num_heads
+ self.relative_attention_num_buckets = relative_attention_num_buckets
+ self.relative_attention_max_distance = relative_attention_max_distance
+ self.dropout_rate = dropout_rate
+ self.classifier_dropout = classifier_dropout
+ self.layer_norm_epsilon = layer_norm_epsilon
+ self.initializer_factor = initializer_factor
+ self.feed_forward_proj = feed_forward_proj
+ self.use_cache = use_cache
+
+ act_info = self.feed_forward_proj.split("-")
+ self.dense_act_fn = act_info[-1]
+ self.is_gated_act = act_info[0] == "gated"
+
+ if len(act_info) > 1 and act_info[0] != "gated" or len(act_info) > 2:
+ raise ValueError(
+ f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer. "
+ "Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
+ "'gated-gelu' or 'relu'"
+ )
+
+ # for backwards compatibility
+ if feed_forward_proj == "gated-gelu":
+ self.dense_act_fn = "gelu_new"
+
+ super().__init__(
+ is_encoder_decoder=is_encoder_decoder,
+ tokenizer_class=tokenizer_class,
+ tie_word_embeddings=tie_word_embeddings,
+ pad_token_id=pad_token_id,
+ eos_token_id=eos_token_id,
+ decoder_start_token_id=decoder_start_token_id,
+ **kwargs,
+ )
+
+
+class MT5OnnxConfig(OnnxSeq2SeqConfigWithPast):
+ @property
+ # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
+ common_inputs = {
+ "input_ids": {0: "batch", 1: "encoder_sequence"},
+ "attention_mask": {0: "batch", 1: "encoder_sequence"},
+ }
+ if self.use_past:
+ common_inputs["attention_mask"][1] = "past_encoder_sequence + sequence"
+ common_inputs["decoder_input_ids"] = {0: "batch"}
+ common_inputs["decoder_attention_mask"] = {0: "batch", 1: "past_decoder_sequence + sequence"}
+ else:
+ common_inputs["decoder_input_ids"] = {0: "batch", 1: "decoder_sequence"}
+ common_inputs["decoder_attention_mask"] = {0: "batch", 1: "decoder_sequence"}
+
+ if self.use_past:
+ self.fill_with_past_key_values_(common_inputs, direction="inputs")
+
+ return common_inputs
+
+ @property
+ # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
+ def default_onnx_opset(self) -> int:
+ return 13
+
+ @property
+ def atol_for_validation(self) -> float:
+ return 5e-4
+
+
+__all__ = ["MT5Config", "MT5OnnxConfig"]
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/mt5/modeling_flax_mt5.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/mt5/modeling_flax_mt5.py
new file mode 100644
index 0000000000000000000000000000000000000000..13bd83b75034ba6de1260a0bd754f289979e4fa7
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/mt5/modeling_flax_mt5.py
@@ -0,0 +1,123 @@
+# coding=utf-8
+# Copyright 2021 Mesh TensorFlow authors, T5 Authors and HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Flax mT5 model."""
+
+import jax.numpy as jnp
+
+from ...utils import logging
+from ..t5.modeling_flax_t5 import FlaxT5EncoderModel, FlaxT5ForConditionalGeneration, FlaxT5Model
+from .configuration_mt5 import MT5Config
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "T5Config"
+
+
+# Copied from transformers.models.bart.modeling_flax_bart.shift_tokens_right
+def shift_tokens_right(input_ids: jnp.ndarray, pad_token_id: int, decoder_start_token_id: int) -> jnp.ndarray:
+ """
+ Shift input ids one token to the right.
+ """
+ shifted_input_ids = jnp.zeros_like(input_ids)
+ shifted_input_ids = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1])
+ shifted_input_ids = shifted_input_ids.at[:, 0].set(decoder_start_token_id)
+
+ shifted_input_ids = jnp.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids)
+ return shifted_input_ids
+
+
+class FlaxMT5Model(FlaxT5Model):
+ r"""
+ This class overrides [`FlaxT5Model`]. Please check the superclass for the appropriate documentation alongside usage
+ examples.
+
+ Examples:
+
+ ```python
+ >>> from transformers import FlaxMT5Model, AutoTokenizer
+
+ >>> model = FlaxMT5Model.from_pretrained("google/mt5-small")
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small")
+
+ >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien."
+ >>> summary = "Weiter Verhandlung in Syrien."
+ >>> inputs = tokenizer(article, return_tensors="np")
+
+ >>> decoder_input_ids = tokenizer(text_target=summary, return_tensors="np").input_ids
+
+ >>> outputs = model(input_ids=inputs["input_ids"], decoder_input_ids=decoder_input_ids)
+ >>> hidden_states = outputs.last_hidden_state
+ ```"""
+
+ model_type = "mt5"
+ config_class = MT5Config
+
+
+class FlaxMT5EncoderModel(FlaxT5EncoderModel):
+ r"""
+ This class overrides [`FlaxT5EncoderModel`]. Please check the superclass for the appropriate documentation
+ alongside usage examples.
+
+ Examples:
+
+ ```python
+ >>> from transformers import FlaxT5EncoderModel, AutoTokenizer
+
+ >>> model = FlaxT5EncoderModel.from_pretrained("google/mt5-small")
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small")
+
+ >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien."
+ >>> summary = "Weiter Verhandlung in Syrien."
+ >>> inputs = tokenizer(article, return_tensors="np")
+
+ >>> decoder_input_ids = tokenizer(text_target=summary, return_tensors="np").input_ids
+
+ >>> outputs = model(input_ids=inputs["input_ids"])
+ >>> hidden_states = outputs.last_hidden_state
+ ```"""
+
+ model_type = "mt5"
+ config_class = MT5Config
+
+
+class FlaxMT5ForConditionalGeneration(FlaxT5ForConditionalGeneration):
+ r"""
+ This class overrides [`FlaxT5ForConditionalGeneration`]. Please check the superclass for the appropriate
+ documentation alongside usage examples.
+
+ Examples:
+
+ ```python
+ >>> from transformers import FlaxMT5ForConditionalGeneration, AutoTokenizer
+
+ >>> model = FlaxMT5ForConditionalGeneration.from_pretrained("google/mt5-small")
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small")
+
+ >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien."
+ >>> summary = "Weiter Verhandlung in Syrien."
+ >>> inputs = tokenizer(article, return_tensors="np")
+
+ >>> decoder_input_ids = tokenizer(text_target=summary, return_tensors="np").input_ids
+
+ >>> outputs = model(**inputs, decoder_input_ids=decoder_input_ids)
+ >>> logits = outputs.logits
+ ```"""
+
+ model_type = "mt5"
+ config_class = MT5Config
+
+
+__all__ = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"]
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/mt5/modeling_mt5.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/mt5/modeling_mt5.py
new file mode 100644
index 0000000000000000000000000000000000000000..5667d2635b7c15b050a0c760014d21d7806255e5
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/mt5/modeling_mt5.py
@@ -0,0 +1,2557 @@
+# coding=utf-8
+# Copyright 2020 Mesh TensorFlow authors, T5 Authors and HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch mT5 model."""
+
+import copy
+import math
+import os
+import warnings
+from typing import List, Optional, Tuple, Union
+
+import torch
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN
+from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache, StaticCache
+from ...generation import GenerationMixin
+from ...modeling_attn_mask_utils import AttentionMaskConverter
+from ...modeling_outputs import (
+ BaseModelOutput,
+ BaseModelOutputWithPastAndCrossAttentions,
+ Seq2SeqLMOutput,
+ Seq2SeqModelOutput,
+ Seq2SeqQuestionAnsweringModelOutput,
+ Seq2SeqSequenceClassifierOutput,
+ TokenClassifierOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
+from ...utils import (
+ DUMMY_INPUTS,
+ DUMMY_MASK,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ is_torch_fx_proxy,
+ is_torchdynamo_compiling,
+ logging,
+ replace_return_docstrings,
+)
+from ...utils.model_parallel_utils import assert_device_map, get_device_map
+from .configuration_mt5 import MT5Config
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "MT5Config"
+_CHECKPOINT_FOR_DOC = "mt5-small"
+
+
+####################################################
+# This dict contains ids and associated url
+# for the pretrained weights provided with the models
+####################################################
+
+PARALLELIZE_DOCSTRING = r"""
+ This is an experimental feature and is a subject to change at a moment's notice.
+
+ Uses a device map to distribute attention modules of the model across several devices. If no device map is given,
+ it will evenly distribute blocks across all devices.
+
+ Args:
+ device_map (`Dict[int, list]`, *optional*):
+ A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always
+ automatically mapped to the first device (for esoteric reasons). That means that the first device should
+ have fewer attention modules mapped to it than other devices. For reference, the mt5 models have the
+ following number of attention modules:
+
+ - mt5-small: 6
+ - mt5-base: 12
+ - mt5-large: 24
+ - mt5-xl: 24
+ - mt5-xxl: 24
+
+ Example:
+
+ ```python
+ # Here is an example of a device map on a machine with 4 GPUs using mt5-xl, which has a total of 24 attention modules:
+ model = MT5ForConditionalGeneration.from_pretrained("mt5-xl")
+ device_map = {
+ 0: [0, 1, 2],
+ 1: [3, 4, 5, 6, 7, 8, 9],
+ 2: [10, 11, 12, 13, 14, 15, 16],
+ 3: [17, 18, 19, 20, 21, 22, 23],
+ }
+ model.parallelize(device_map)
+ ```
+"""
+DEPARALLELIZE_DOCSTRING = r"""
+ Moves the model to cpu from a model parallel state.
+
+ Example:
+
+ ```python
+ # On a 4 GPU machine with mt5-xl:
+ model = MT5ForConditionalGeneration.from_pretrained("Mt5-xl")
+ device_map = {
+ 0: [0, 1, 2],
+ 1: [3, 4, 5, 6, 7, 8, 9],
+ 2: [10, 11, 12, 13, 14, 15, 16],
+ 3: [17, 18, 19, 20, 21, 22, 23],
+ }
+ model.parallelize(device_map) # Splits the model across several devices
+ model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache()
+ ```
+"""
+
+
+# Copied from transformers.models.t5.modeling_t5.T5LayerNorm with T5->MT5
+class MT5LayerNorm(nn.Module):
+ def __init__(self, hidden_size, eps=1e-6):
+ """
+ Construct a layernorm module in the MT5 style. No bias and no subtraction of mean.
+ """
+ super().__init__()
+ self.weight = nn.Parameter(torch.ones(hidden_size))
+ self.variance_epsilon = eps
+
+ def forward(self, hidden_states):
+ # MT5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
+ # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus varience is calculated
+ # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
+ # half-precision inputs is done in fp32
+
+ variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
+
+ # convert into half-precision if necessary
+ if self.weight.dtype in [torch.float16, torch.bfloat16]:
+ hidden_states = hidden_states.to(self.weight.dtype)
+
+ return self.weight * hidden_states
+
+
+# Copied from transformers.models.t5.modeling_t5.T5DenseActDense with T5->MT5
+class MT5DenseActDense(nn.Module):
+ def __init__(self, config: MT5Config):
+ super().__init__()
+ self.wi = nn.Linear(config.d_model, config.d_ff, bias=False)
+ self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
+ self.dropout = nn.Dropout(config.dropout_rate)
+ self.act = ACT2FN[config.dense_act_fn]
+
+ def forward(self, hidden_states):
+ hidden_states = self.wi(hidden_states)
+ hidden_states = self.act(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ if (
+ isinstance(self.wo.weight, torch.Tensor)
+ and hidden_states.dtype != self.wo.weight.dtype
+ and self.wo.weight.dtype != torch.int8
+ ):
+ hidden_states = hidden_states.to(self.wo.weight.dtype)
+ hidden_states = self.wo(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.t5.modeling_t5.T5DenseGatedActDense with T5->MT5
+class MT5DenseGatedActDense(nn.Module):
+ def __init__(self, config: MT5Config):
+ super().__init__()
+ self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False)
+ self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False)
+ self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
+ self.dropout = nn.Dropout(config.dropout_rate)
+ self.act = ACT2FN[config.dense_act_fn]
+
+ def forward(self, hidden_states):
+ hidden_gelu = self.act(self.wi_0(hidden_states))
+ hidden_linear = self.wi_1(hidden_states)
+ hidden_states = hidden_gelu * hidden_linear
+ hidden_states = self.dropout(hidden_states)
+
+ # To make 8bit quantization work for google/flan-t5-xxl, self.wo is kept in float32.
+ # See https://github.com/huggingface/transformers/issues/20287
+ # we also make sure the weights are not in `int8` in case users will force `_keep_in_fp32_modules` to be `None``
+ if (
+ isinstance(self.wo.weight, torch.Tensor)
+ and hidden_states.dtype != self.wo.weight.dtype
+ and self.wo.weight.dtype != torch.int8
+ ):
+ hidden_states = hidden_states.to(self.wo.weight.dtype)
+
+ hidden_states = self.wo(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.t5.modeling_t5.T5LayerFF with T5->MT5
+class MT5LayerFF(nn.Module):
+ def __init__(self, config: MT5Config):
+ super().__init__()
+ if config.is_gated_act:
+ self.DenseReluDense = MT5DenseGatedActDense(config)
+ else:
+ self.DenseReluDense = MT5DenseActDense(config)
+
+ self.layer_norm = MT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
+ self.dropout = nn.Dropout(config.dropout_rate)
+
+ def forward(self, hidden_states):
+ forwarded_states = self.layer_norm(hidden_states)
+ forwarded_states = self.DenseReluDense(forwarded_states)
+ hidden_states = hidden_states + self.dropout(forwarded_states)
+ return hidden_states
+
+
+# Copied from transformers.models.t5.modeling_t5.T5Attention with T5->MT5
+class MT5Attention(nn.Module):
+ def __init__(
+ self,
+ config: MT5Config,
+ has_relative_attention_bias=False,
+ layer_idx: Optional[int] = None,
+ ):
+ super().__init__()
+ self.is_decoder = config.is_decoder
+ self.has_relative_attention_bias = has_relative_attention_bias
+ self.relative_attention_num_buckets = config.relative_attention_num_buckets
+ self.relative_attention_max_distance = config.relative_attention_max_distance
+ self.d_model = config.d_model
+ self.key_value_proj_dim = config.d_kv
+ self.n_heads = config.num_heads
+ self.dropout = config.dropout_rate
+ self.inner_dim = self.n_heads * self.key_value_proj_dim
+ self.layer_idx = layer_idx
+ if layer_idx is None and self.is_decoder:
+ logger.warning_once(
+ f"Instantiating a decoder {self.__class__.__name__} without passing `layer_idx` is not recommended and "
+ "will to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
+ "when creating this class."
+ )
+
+ # Mesh TensorFlow initialization to avoid scaling before softmax
+ self.q = nn.Linear(self.d_model, self.inner_dim, bias=False)
+ self.k = nn.Linear(self.d_model, self.inner_dim, bias=False)
+ self.v = nn.Linear(self.d_model, self.inner_dim, bias=False)
+ self.o = nn.Linear(self.inner_dim, self.d_model, bias=False)
+
+ if self.has_relative_attention_bias:
+ self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)
+ self.pruned_heads = set()
+ self.gradient_checkpointing = False
+
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads
+ )
+ # Prune linear layers
+ self.q = prune_linear_layer(self.q, index)
+ self.k = prune_linear_layer(self.k, index)
+ self.v = prune_linear_layer(self.v, index)
+ self.o = prune_linear_layer(self.o, index, dim=1)
+ # Update hyper params
+ self.n_heads = self.n_heads - len(heads)
+ self.inner_dim = self.key_value_proj_dim * self.n_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ @staticmethod
+ def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
+ """
+ Adapted from Mesh Tensorflow:
+ https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
+
+ Translate relative position to a bucket number for relative attention. The relative position is defined as
+ memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
+ position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
+ small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
+ positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
+ This should allow for more graceful generalization to longer sequences than the model has been trained on
+
+ Args:
+ relative_position: an int32 Tensor
+ bidirectional: a boolean - whether the attention is bidirectional
+ num_buckets: an integer
+ max_distance: an integer
+
+ Returns:
+ a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
+ """
+ relative_buckets = 0
+ if bidirectional:
+ num_buckets //= 2
+ relative_buckets += (relative_position > 0).to(torch.long) * num_buckets
+ relative_position = torch.abs(relative_position)
+ else:
+ relative_position = -torch.min(relative_position, torch.zeros_like(relative_position))
+ # now relative_position is in the range [0, inf)
+
+ # half of the buckets are for exact increments in positions
+ max_exact = num_buckets // 2
+ is_small = relative_position < max_exact
+
+ # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
+ relative_position_if_large = max_exact + (
+ torch.log(relative_position.float() / max_exact)
+ / math.log(max_distance / max_exact)
+ * (num_buckets - max_exact)
+ ).to(torch.long)
+ relative_position_if_large = torch.min(
+ relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1)
+ )
+
+ relative_buckets += torch.where(is_small, relative_position, relative_position_if_large)
+ return relative_buckets
+
+ def compute_bias(self, query_length, key_length, device=None, cache_position=None):
+ """Compute binned relative position bias"""
+ if device is None:
+ device = self.relative_attention_bias.weight.device
+ if cache_position is None:
+ context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None]
+ else:
+ context_position = cache_position[:, None].to(device)
+ memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :]
+ relative_position = memory_position - context_position # shape (query_length, key_length)
+ relative_position_bucket = self._relative_position_bucket(
+ relative_position, # shape (query_length, key_length)
+ bidirectional=(not self.is_decoder),
+ num_buckets=self.relative_attention_num_buckets,
+ max_distance=self.relative_attention_max_distance,
+ )
+ values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads)
+ values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length)
+ return values
+
+ def forward(
+ self,
+ hidden_states,
+ mask=None,
+ key_value_states=None,
+ position_bias=None,
+ past_key_value=None,
+ layer_head_mask=None,
+ query_length=None,
+ use_cache=False,
+ output_attentions=False,
+ cache_position=None,
+ ):
+ """
+ Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states).
+ """
+ # Input is (batch_size, seq_length, dim)
+ # Mask is (batch_size, 1, 1, key_length) (non-causal encoder) or (batch_size, 1, seq_length, key_length) (causal decoder)
+ batch_size, seq_length = hidden_states.shape[:2]
+
+ # if key_value_states are provided this layer is used as a cross-attention layer for the decoder
+ is_cross_attention = key_value_states is not None
+
+ query_states = self.q(hidden_states)
+ query_states = query_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
+
+ if past_key_value is not None:
+ is_updated = past_key_value.is_updated.get(self.layer_idx)
+ if is_cross_attention:
+ # after the first generated id, we can subsequently re-use all key/value_states from cache
+ curr_past_key_value = past_key_value.cross_attention_cache
+ else:
+ curr_past_key_value = past_key_value.self_attention_cache
+
+ current_states = key_value_states if is_cross_attention else hidden_states
+ if is_cross_attention and past_key_value is not None and is_updated:
+ # reuse k,v, cross_attentions
+ key_states = curr_past_key_value.key_cache[self.layer_idx]
+ value_states = curr_past_key_value.value_cache[self.layer_idx]
+ else:
+ key_states = self.k(current_states)
+ value_states = self.v(current_states)
+ key_states = key_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
+ value_states = value_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
+
+ if past_key_value is not None:
+ # save all key/value_states to cache to be re-used for fast auto-regressive generation
+ cache_position = cache_position if not is_cross_attention else None
+ key_states, value_states = curr_past_key_value.update(
+ key_states, value_states, self.layer_idx, {"cache_position": cache_position}
+ )
+ # set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls
+ if is_cross_attention:
+ past_key_value.is_updated[self.layer_idx] = True
+
+ # compute scores, equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9
+ scores = torch.matmul(query_states, key_states.transpose(3, 2))
+
+ if position_bias is None:
+ key_length = key_states.shape[-2]
+ # cache position is 0-indexed so we add 1 to get the real length of queries (aka with past)
+ real_seq_length = query_length if query_length is not None else cache_position[-1] + 1
+ if not self.has_relative_attention_bias:
+ position_bias = torch.zeros(
+ (1, self.n_heads, seq_length, key_length), device=scores.device, dtype=scores.dtype
+ )
+ if self.gradient_checkpointing and self.training:
+ position_bias.requires_grad = True
+ else:
+ position_bias = self.compute_bias(
+ real_seq_length, key_length, device=scores.device, cache_position=cache_position
+ )
+ position_bias = position_bias[:, :, -seq_length:, :]
+
+ if mask is not None:
+ causal_mask = mask[:, :, :, : key_states.shape[-2]]
+ position_bias = position_bias + causal_mask
+
+ if self.pruned_heads:
+ mask = torch.ones(position_bias.shape[1])
+ mask[list(self.pruned_heads)] = 0
+ position_bias_masked = position_bias[:, mask.bool()]
+ else:
+ position_bias_masked = position_bias
+
+ scores += position_bias_masked
+
+ # (batch_size, n_heads, seq_length, key_length)
+ attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores)
+ attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
+
+ # Mask heads if we want to
+ if layer_head_mask is not None:
+ attn_weights = attn_weights * layer_head_mask
+
+ attn_output = torch.matmul(attn_weights, value_states)
+
+ attn_output = attn_output.transpose(1, 2).contiguous()
+ attn_output = attn_output.view(batch_size, -1, self.inner_dim)
+ attn_output = self.o(attn_output)
+
+ outputs = (attn_output, past_key_value, position_bias)
+
+ if output_attentions:
+ outputs = outputs + (attn_weights,)
+ return outputs
+
+
+# Copied from transformers.models.t5.modeling_t5.T5LayerSelfAttention with T5->MT5
+class MT5LayerSelfAttention(nn.Module):
+ def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int] = None):
+ super().__init__()
+ self.SelfAttention = MT5Attention(
+ config, has_relative_attention_bias=has_relative_attention_bias, layer_idx=layer_idx
+ )
+ self.layer_norm = MT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
+ self.dropout = nn.Dropout(config.dropout_rate)
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ position_bias=None,
+ layer_head_mask=None,
+ past_key_value=None,
+ use_cache=False,
+ output_attentions=False,
+ cache_position=None,
+ ):
+ normed_hidden_states = self.layer_norm(hidden_states)
+ attention_output = self.SelfAttention(
+ normed_hidden_states,
+ mask=attention_mask,
+ position_bias=position_bias,
+ layer_head_mask=layer_head_mask,
+ past_key_value=past_key_value,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ cache_position=cache_position,
+ )
+ hidden_states = hidden_states + self.dropout(attention_output[0])
+ outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them
+ return outputs
+
+
+# Copied from transformers.models.t5.modeling_t5.T5LayerCrossAttention with T5->MT5
+class MT5LayerCrossAttention(nn.Module):
+ def __init__(self, config, layer_idx: Optional[int] = None):
+ super().__init__()
+ self.EncDecAttention = MT5Attention(config, has_relative_attention_bias=False, layer_idx=layer_idx)
+ self.layer_norm = MT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
+ self.dropout = nn.Dropout(config.dropout_rate)
+
+ def forward(
+ self,
+ hidden_states,
+ key_value_states,
+ attention_mask=None,
+ position_bias=None,
+ layer_head_mask=None,
+ past_key_value=None,
+ use_cache=False,
+ query_length=None,
+ output_attentions=False,
+ cache_position=None,
+ ):
+ normed_hidden_states = self.layer_norm(hidden_states)
+ attention_output = self.EncDecAttention(
+ normed_hidden_states,
+ mask=attention_mask,
+ key_value_states=key_value_states,
+ position_bias=position_bias,
+ layer_head_mask=layer_head_mask,
+ past_key_value=past_key_value,
+ use_cache=use_cache,
+ query_length=query_length,
+ output_attentions=output_attentions,
+ cache_position=cache_position,
+ )
+ layer_output = hidden_states + self.dropout(attention_output[0])
+ outputs = (layer_output,) + attention_output[1:] # add attentions if we output them
+ return outputs
+
+
+# Copied from transformers.models.t5.modeling_t5.T5Block with T5->MT5
+class MT5Block(nn.Module):
+ def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int] = None):
+ super().__init__()
+ self.is_decoder = config.is_decoder
+ self.layer = nn.ModuleList()
+ self.layer.append(
+ MT5LayerSelfAttention(config, has_relative_attention_bias=has_relative_attention_bias, layer_idx=layer_idx)
+ )
+ if self.is_decoder:
+ self.layer.append(MT5LayerCrossAttention(config, layer_idx=layer_idx))
+
+ self.layer.append(MT5LayerFF(config))
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ position_bias=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ encoder_decoder_position_bias=None,
+ layer_head_mask=None,
+ cross_attn_layer_head_mask=None,
+ past_key_value=None,
+ use_cache=False,
+ output_attentions=False,
+ return_dict=True,
+ cache_position=None,
+ ):
+ self_attention_outputs = self.layer[0](
+ hidden_states,
+ attention_mask=attention_mask,
+ position_bias=position_bias,
+ layer_head_mask=layer_head_mask,
+ past_key_value=past_key_value,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ cache_position=cache_position,
+ )
+ hidden_states, past_key_value = self_attention_outputs[:2]
+ attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs and relative position weights
+
+ # clamp inf values to enable fp16 training
+ if hidden_states.dtype == torch.float16:
+ clamp_value = torch.where(
+ torch.isinf(hidden_states).any(),
+ torch.finfo(hidden_states.dtype).max - 1000,
+ torch.finfo(hidden_states.dtype).max,
+ )
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
+
+ do_cross_attention = self.is_decoder and encoder_hidden_states is not None
+ if do_cross_attention:
+ cross_attention_outputs = self.layer[1](
+ hidden_states,
+ key_value_states=encoder_hidden_states,
+ attention_mask=encoder_attention_mask,
+ position_bias=encoder_decoder_position_bias,
+ layer_head_mask=cross_attn_layer_head_mask,
+ past_key_value=past_key_value,
+ query_length=cache_position[-1] + 1,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ )
+ hidden_states, past_key_value = cross_attention_outputs[:2]
+
+ # clamp inf values to enable fp16 training
+ if hidden_states.dtype == torch.float16:
+ clamp_value = torch.where(
+ torch.isinf(hidden_states).any(),
+ torch.finfo(hidden_states.dtype).max - 1000,
+ torch.finfo(hidden_states.dtype).max,
+ )
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
+
+ # Keep cross-attention outputs and relative position weights
+ attention_outputs = attention_outputs + cross_attention_outputs[2:]
+
+ # Apply Feed Forward layer
+ hidden_states = self.layer[-1](hidden_states)
+
+ # clamp inf values to enable fp16 training
+ if hidden_states.dtype == torch.float16:
+ clamp_value = torch.where(
+ torch.isinf(hidden_states).any(),
+ torch.finfo(hidden_states.dtype).max - 1000,
+ torch.finfo(hidden_states.dtype).max,
+ )
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
+
+ outputs = (hidden_states,)
+
+ if use_cache:
+ outputs = outputs + (past_key_value,) + attention_outputs
+ else:
+ outputs = outputs + attention_outputs
+
+ return outputs # hidden-states, past_key_value, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights)
+
+
+def load_tf_weights_in_mt5(model, config, tf_checkpoint_path):
+ """Load tf checkpoints in a pytorch model."""
+ try:
+ import re
+
+ import numpy as np
+ import tensorflow as tf
+ except ImportError:
+ logger.error(
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
+ "https://www.tensorflow.org/install/ for installation instructions."
+ )
+ raise
+ tf_path = os.path.abspath(tf_checkpoint_path)
+ logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
+ # Load weights from TF model
+ init_vars = tf.train.list_variables(tf_path)
+ names = []
+ tf_weights = {}
+ for name, shape in init_vars:
+ logger.info(f"Loading TF weight {name} with shape {shape}")
+ array = tf.train.load_variable(tf_path, name)
+ names.append(name)
+ tf_weights[name] = array
+
+ for txt_name in names:
+ name = txt_name.split("/")
+ # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
+ # which are not required for using pretrained model
+ if any(
+ n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
+ for n in name
+ ):
+ logger.info(f"Skipping {'/'.join(name)}")
+ tf_weights.pop(txt_name, None)
+ continue
+ if "_slot_" in name[-1]:
+ logger.info(f"Skipping {'/'.join(name)}")
+ tf_weights.pop(txt_name, None)
+ continue
+ pointer = model
+ array = tf_weights[txt_name]
+
+ for m_name in name:
+ if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
+ scope_names = re.split(r"_(\d+)", m_name)
+ else:
+ scope_names = [m_name]
+ if scope_names[0] in ["kernel", "scale", "embedding"]:
+ pointer = getattr(pointer, "weight")
+ elif scope_names[0] == "self_attention":
+ pointer = getattr(pointer, "layer")
+ pointer = pointer[0]
+ elif scope_names[0] == "enc_dec_attention":
+ pointer = getattr(pointer, "layer")
+ pointer = pointer[1]
+ elif scope_names[0] == "dense_relu_dense":
+ pointer = getattr(pointer, "layer")
+ pointer = pointer[2]
+ elif scope_names[0] == "rms_norm":
+ if hasattr(pointer, "layer_norm"):
+ pointer = getattr(pointer, "layer_norm")
+ elif hasattr(pointer, "final_layer_norm"):
+ pointer = getattr(pointer, "final_layer_norm")
+ elif scope_names[0] == "scale":
+ pointer = getattr(pointer, "weight")
+ elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
+ pointer = getattr(pointer, "bias")
+ elif scope_names[0] == "squad":
+ pointer = getattr(pointer, "classifier")
+ elif scope_names[0] == "decoder" and name[1] == "logits":
+ continue
+ elif scope_names[0] == "logits":
+ pointer = getattr(pointer, "lm_head")
+ elif scope_names[0] == "wi" and len(scope_names) > 1 and scope_names[1].isdigit():
+ pointer = getattr(pointer, f"wi_{scope_names[1]}")
+ continue
+ else:
+ try:
+ pointer = getattr(pointer, scope_names[0])
+ except AttributeError:
+ logger.info(f"Skipping {'/'.join(name)}")
+ continue
+ if len(scope_names) >= 2:
+ num = int(scope_names[1])
+ pointer = pointer[num]
+ if scope_names[0] not in ["kernel", "scale", "embedding"]:
+ pointer = getattr(pointer, "weight")
+ if scope_names[0] != "embedding":
+ logger.info(f"Transposing numpy weight of shape {array.shape} for {name}")
+ array = np.transpose(array)
+ try:
+ assert (
+ pointer.shape == array.shape
+ ), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
+ except AssertionError as e:
+ e.args += (pointer.shape, array.shape)
+ raise
+ logger.info(f"Initialize PyTorch weight {name}")
+ pointer.data = torch.from_numpy(array.astype(np.float32))
+ tf_weights.pop(txt_name, None)
+
+ logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}.")
+ return model
+
+
+# Copied from transformers.models.t5.modeling_t5.T5ClassificationHead with T5->MT5
+class MT5ClassificationHead(nn.Module):
+ """Head for sentence-level classification tasks."""
+
+ def __init__(self, config: MT5Config):
+ super().__init__()
+ self.dense = nn.Linear(config.d_model, config.d_model)
+ self.dropout = nn.Dropout(p=config.classifier_dropout)
+ self.out_proj = nn.Linear(config.d_model, config.num_labels)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.dense(hidden_states)
+ hidden_states = torch.tanh(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.out_proj(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel with T5->MT5, t5->mt5
+class MT5PreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = MT5Config
+ load_tf_weights = load_tf_weights_in_mt5
+ base_model_prefix = "transformer"
+ is_parallelizable = True
+ supports_gradient_checkpointing = True
+ _supports_quantized_cache = False # enc-dec models don't support yet
+ _supports_static_cache = True
+ _supports_cache_class = True
+ _no_split_modules = ["MT5Block"]
+ _keep_in_fp32_modules = ["wo"]
+
+ @property
+ def dummy_inputs(self):
+ input_ids = torch.tensor(DUMMY_INPUTS)
+ input_mask = torch.tensor(DUMMY_MASK)
+ dummy_inputs = {
+ "decoder_input_ids": input_ids,
+ "input_ids": input_ids,
+ "decoder_attention_mask": input_mask,
+ }
+ return dummy_inputs
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ factor = self.config.initializer_factor # Used for testing weights initialization
+ if isinstance(module, MT5LayerNorm):
+ module.weight.data.fill_(factor * 1.0)
+ elif isinstance(
+ module,
+ (MT5Model, MT5ForConditionalGeneration, MT5EncoderModel, MT5ForQuestionAnswering),
+ ):
+ # Mesh TensorFlow embeddings initialization
+ # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624
+ module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0)
+ if hasattr(module, "lm_head") and not self.config.tie_word_embeddings:
+ module.lm_head.weight.data.normal_(mean=0.0, std=factor * 1.0)
+ if hasattr(module, "qa_outputs"):
+ module.qa_outputs.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
+ module.qa_outputs.bias.data.zero_()
+ elif isinstance(module, MT5ForTokenClassification):
+ if hasattr(module, "classifier"):
+ module.classifier.weight.data.normal_(mean=0.0, std=factor * 1.0)
+ module.classifier.bias.data.zero_()
+ elif isinstance(module, MT5ClassificationHead):
+ module.dense.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
+ if hasattr(module.dense, "bias") and module.dense.bias is not None:
+ module.dense.bias.data.zero_()
+ module.out_proj.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
+ if hasattr(module.out_proj, "bias") and module.out_proj.bias is not None:
+ module.out_proj.bias.data.zero_()
+ elif isinstance(module, MT5DenseActDense):
+ # Mesh TensorFlow FF initialization
+ # See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56
+ # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89
+ module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
+ if hasattr(module.wi, "bias") and module.wi.bias is not None:
+ module.wi.bias.data.zero_()
+ module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5))
+ if hasattr(module.wo, "bias") and module.wo.bias is not None:
+ module.wo.bias.data.zero_()
+ elif isinstance(module, MT5DenseGatedActDense):
+ module.wi_0.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
+ if hasattr(module.wi_0, "bias") and module.wi_0.bias is not None:
+ module.wi_0.bias.data.zero_()
+ module.wi_1.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
+ if hasattr(module.wi_1, "bias") and module.wi_1.bias is not None:
+ module.wi_1.bias.data.zero_()
+ module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5))
+ if hasattr(module.wo, "bias") and module.wo.bias is not None:
+ module.wo.bias.data.zero_()
+ elif isinstance(module, MT5Attention):
+ # Mesh TensorFlow attention initialization to avoid scaling before softmax
+ # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136
+ d_model = self.config.d_model
+ key_value_proj_dim = self.config.d_kv
+ n_heads = self.config.num_heads
+ module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5))
+ module.k.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5))
+ module.v.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5))
+ module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5))
+ if module.has_relative_attention_bias:
+ module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model) ** -0.5))
+
+ def _shift_right(self, input_ids):
+ decoder_start_token_id = self.config.decoder_start_token_id
+ pad_token_id = self.config.pad_token_id
+
+ if decoder_start_token_id is None:
+ raise ValueError(
+ "self.model.config.decoder_start_token_id has to be defined. In MT5 it is usually set to the pad_token_id. "
+ "See MT5 docs for more information."
+ )
+
+ # shift inputs to the right
+ if is_torch_fx_proxy(input_ids):
+ # Item assignment is not supported natively for proxies.
+ shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id)
+ shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1)
+ else:
+ shifted_input_ids = input_ids.new_zeros(input_ids.shape)
+ shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
+ shifted_input_ids[..., 0] = decoder_start_token_id
+
+ if pad_token_id is None:
+ raise ValueError("self.model.config.pad_token_id has to be defined.")
+ # replace possible -100 values in labels by `pad_token_id`
+ shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
+
+ return shifted_input_ids
+
+
+# Copied from transformers.models.t5.modeling_t5.T5Stack with T5->MT5
+class MT5Stack(MT5PreTrainedModel):
+ def __init__(self, config, embed_tokens=None):
+ super().__init__(config)
+
+ self.embed_tokens = embed_tokens
+ self.is_decoder = config.is_decoder
+
+ self.block = nn.ModuleList(
+ [MT5Block(config, has_relative_attention_bias=bool(i == 0), layer_idx=i) for i in range(config.num_layers)]
+ )
+ self.final_layer_norm = MT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
+ self.dropout = nn.Dropout(config.dropout_rate)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+ # Model parallel
+ self.model_parallel = False
+ self.device_map = None
+ self.gradient_checkpointing = False
+
+ @add_start_docstrings(PARALLELIZE_DOCSTRING)
+ def parallelize(self, device_map=None):
+ warnings.warn(
+ "`MT5Stack.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your model"
+ " with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own"
+ " `device_map` but it needs to be a dictionary module_name to device, so for instance {'block.0': 0,"
+ " 'block.1': 1, ...}",
+ FutureWarning,
+ )
+ # Check validity of device_map
+ self.device_map = (
+ get_device_map(len(self.block), range(torch.cuda.device_count())) if device_map is None else device_map
+ )
+ assert_device_map(self.device_map, len(self.block))
+ self.model_parallel = True
+ self.first_device = "cpu" if "cpu" in self.device_map.keys() else "cuda:" + str(min(self.device_map.keys()))
+ self.last_device = "cuda:" + str(max(self.device_map.keys()))
+ # Load onto devices
+ for k, v in self.device_map.items():
+ for layer in v:
+ cuda_device = "cuda:" + str(k)
+ self.block[layer] = self.block[layer].to(cuda_device)
+
+ # Set embed_tokens to first layer
+ self.embed_tokens = self.embed_tokens.to(self.first_device)
+ # Set final layer norm to last device
+ self.final_layer_norm = self.final_layer_norm.to(self.last_device)
+
+ @add_start_docstrings(DEPARALLELIZE_DOCSTRING)
+ def deparallelize(self):
+ warnings.warn(
+ "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.",
+ FutureWarning,
+ )
+ self.model_parallel = False
+ self.device_map = None
+ self.first_device = "cpu"
+ self.last_device = "cpu"
+ for i in range(len(self.block)):
+ self.block[i] = self.block[i].to("cpu")
+ self.embed_tokens = self.embed_tokens.to("cpu")
+ self.final_layer_norm = self.final_layer_norm.to("cpu")
+ torch.cuda.empty_cache()
+
+ def get_input_embeddings(self):
+ return self.embed_tokens
+
+ def set_input_embeddings(self, new_embeddings):
+ self.embed_tokens = new_embeddings
+
+ def forward(
+ self,
+ input_ids=None,
+ attention_mask=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ inputs_embeds=None,
+ head_mask=None,
+ cross_attn_head_mask=None,
+ past_key_values=None,
+ use_cache=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ cache_position=None,
+ ):
+ # Model parallel
+ if self.model_parallel:
+ torch.cuda.set_device(self.first_device)
+ self.embed_tokens = self.embed_tokens.to(self.first_device)
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is not None and inputs_embeds is not None:
+ err_msg_prefix = "decoder_" if self.is_decoder else ""
+ raise ValueError(
+ f"You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time"
+ )
+ elif input_ids is not None:
+ input_shape = input_ids.size()
+ input_ids = input_ids.view(-1, input_shape[-1])
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ err_msg_prefix = "decoder_" if self.is_decoder else ""
+ raise ValueError(f"You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds")
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ if inputs_embeds is None:
+ if self.embed_tokens is None:
+ raise ValueError("You have to initialize the model with valid token embeddings")
+ inputs_embeds = self.embed_tokens(input_ids)
+
+ batch_size, seq_length = input_shape
+
+ if use_cache is True:
+ if not self.is_decoder:
+ raise ValueError(f"`use_cache` can only be set to `True` if {self} is used as a decoder")
+
+ # initialize past_key_values
+ return_legacy_cache = False
+ return_self_attention_cache = False
+ if self.is_decoder and (use_cache or past_key_values is not None):
+ if isinstance(past_key_values, Cache) and not isinstance(past_key_values, EncoderDecoderCache):
+ return_self_attention_cache = True
+ past_key_values = EncoderDecoderCache(past_key_values, DynamicCache())
+ elif not isinstance(past_key_values, EncoderDecoderCache):
+ return_legacy_cache = True
+ logger.warning_once(
+ "Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.48.0. "
+ "You should pass an instance of `EncoderDecoderCache` instead, e.g. "
+ "`past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`."
+ )
+ past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values)
+ elif past_key_values is None:
+ past_key_values = EncoderDecoderCache(DynamicCache(), DynamicCache())
+ elif not self.is_decoder:
+ # do not pass cache object down the line for encoder stack
+ # it messes indexing later in decoder-stack because cache object is modified in-place
+ past_key_values = None
+
+ past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
+ if cache_position is None:
+ cache_position = torch.arange(
+ past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device
+ )
+
+ if attention_mask is None and not is_torchdynamo_compiling():
+ # required mask seq length can be calculated via length of past cache
+ mask_seq_length = past_key_values_length + seq_length
+ attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device)
+
+ if self.config.is_decoder:
+ causal_mask = self._update_causal_mask(
+ attention_mask,
+ inputs_embeds,
+ cache_position,
+ past_key_values.self_attention_cache if past_key_values is not None else None,
+ output_attentions,
+ )
+ elif attention_mask is not None:
+ causal_mask = attention_mask[:, None, None, :]
+ causal_mask = causal_mask.to(dtype=inputs_embeds.dtype)
+ causal_mask = (1.0 - causal_mask) * torch.finfo(inputs_embeds.dtype).min
+ else:
+ causal_mask = None
+
+ # If a 2D or 3D attention mask is provided for the cross-attention
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
+ if self.is_decoder and encoder_hidden_states is not None:
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
+ if encoder_attention_mask is None:
+ encoder_attention_mask = torch.ones(
+ encoder_hidden_shape, device=inputs_embeds.device, dtype=torch.long
+ )
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
+ else:
+ encoder_extended_attention_mask = None
+
+ # Prepare head mask if needed
+ head_mask = self.get_head_mask(head_mask, self.config.num_layers)
+ cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers)
+ all_hidden_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+ all_cross_attentions = () if (output_attentions and self.is_decoder) else None
+ position_bias = None
+ encoder_decoder_position_bias = None
+
+ hidden_states = self.dropout(inputs_embeds)
+
+ for i, layer_module in enumerate(self.block):
+ layer_head_mask = head_mask[i]
+ cross_attn_layer_head_mask = cross_attn_head_mask[i]
+ # Model parallel
+ if self.model_parallel:
+ torch.cuda.set_device(hidden_states.device)
+ # Ensure that attention_mask is always on the same device as hidden_states
+ if causal_mask is not None:
+ causal_mask = causal_mask.to(hidden_states.device)
+ if position_bias is not None:
+ position_bias = position_bias.to(hidden_states.device)
+ if encoder_hidden_states is not None:
+ encoder_hidden_states = encoder_hidden_states.to(hidden_states.device)
+ if encoder_extended_attention_mask is not None:
+ encoder_extended_attention_mask = encoder_extended_attention_mask.to(hidden_states.device)
+ if encoder_decoder_position_bias is not None:
+ encoder_decoder_position_bias = encoder_decoder_position_bias.to(hidden_states.device)
+ if layer_head_mask is not None:
+ layer_head_mask = layer_head_mask.to(hidden_states.device)
+ if cross_attn_layer_head_mask is not None:
+ cross_attn_layer_head_mask = cross_attn_layer_head_mask.to(hidden_states.device)
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.forward,
+ hidden_states,
+ causal_mask,
+ position_bias,
+ encoder_hidden_states,
+ encoder_extended_attention_mask,
+ encoder_decoder_position_bias,
+ layer_head_mask,
+ cross_attn_layer_head_mask,
+ None, # past_key_value is always None with gradient checkpointing
+ use_cache,
+ output_attentions,
+ return_dict,
+ cache_position,
+ )
+ else:
+ layer_outputs = layer_module(
+ hidden_states,
+ attention_mask=causal_mask,
+ position_bias=position_bias,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_extended_attention_mask,
+ encoder_decoder_position_bias=encoder_decoder_position_bias,
+ layer_head_mask=layer_head_mask,
+ cross_attn_layer_head_mask=cross_attn_layer_head_mask,
+ past_key_value=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ return_dict=return_dict,
+ cache_position=cache_position,
+ )
+
+ # layer_outputs is a tuple with:
+ # hidden-states, key-value-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights)
+ if use_cache is False:
+ layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:]
+
+ hidden_states, next_decoder_cache = layer_outputs[:2]
+
+ # We share the position biases between the layers - the first layer store them
+ # layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights),
+ # (cross-attention position bias), (cross-attention weights)
+ position_bias = layer_outputs[2]
+ if self.is_decoder and encoder_hidden_states is not None:
+ encoder_decoder_position_bias = layer_outputs[4 if output_attentions else 3]
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[3],)
+ if self.is_decoder:
+ all_cross_attentions = all_cross_attentions + (layer_outputs[5],)
+
+ # Model Parallel: If it's the last layer for that device, put things on the next device
+ if self.model_parallel:
+ for k, v in self.device_map.items():
+ if i == v[-1] and "cuda:" + str(k) != self.last_device:
+ hidden_states = hidden_states.to("cuda:" + str(k + 1))
+
+ hidden_states = self.final_layer_norm(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+
+ # Add last layer
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ next_cache = next_decoder_cache if use_cache else None
+ if return_self_attention_cache:
+ next_cache = past_key_values.self_attention_cache
+ if return_legacy_cache:
+ next_cache = past_key_values.to_legacy_cache()
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [
+ hidden_states,
+ next_cache,
+ all_hidden_states,
+ all_attentions,
+ all_cross_attentions,
+ ]
+ if v is not None
+ )
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=next_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_attentions,
+ cross_attentions=all_cross_attentions,
+ )
+
+ # Copied from transformers.models.llama.modeling_llama.LlamaModel._update_causal_mask
+ def _update_causal_mask(
+ self,
+ attention_mask: torch.Tensor,
+ input_tensor: torch.Tensor,
+ cache_position: torch.Tensor,
+ past_key_values: Cache,
+ output_attentions: bool,
+ ):
+ if self.config._attn_implementation == "flash_attention_2":
+ if attention_mask is not None and (attention_mask == 0.0).any():
+ return attention_mask
+ return None
+
+ # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
+ # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
+ # to infer the attention mask.
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
+ using_static_cache = isinstance(past_key_values, StaticCache)
+
+ # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
+ if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions:
+ if AttentionMaskConverter._ignore_causal_mask_sdpa(
+ attention_mask,
+ inputs_embeds=input_tensor,
+ past_key_values_length=past_seen_tokens,
+ is_training=self.training,
+ ):
+ return None
+
+ dtype, device = input_tensor.dtype, input_tensor.device
+ sequence_length = input_tensor.shape[1]
+ if using_static_cache:
+ target_length = past_key_values.get_max_cache_shape()
+ else:
+ target_length = (
+ attention_mask.shape[-1]
+ if isinstance(attention_mask, torch.Tensor)
+ else past_seen_tokens + sequence_length + 1
+ )
+
+ # In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
+ causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
+ attention_mask,
+ sequence_length=sequence_length,
+ target_length=target_length,
+ dtype=dtype,
+ device=device,
+ cache_position=cache_position,
+ batch_size=input_tensor.shape[0],
+ )
+
+ if (
+ self.config._attn_implementation == "sdpa"
+ and attention_mask is not None
+ and attention_mask.device.type == "cuda"
+ and not output_attentions
+ ):
+ # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
+ # Details: https://github.com/pytorch/pytorch/issues/110213
+ min_dtype = torch.finfo(dtype).min
+ causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
+
+ return causal_mask
+
+ @staticmethod
+ # Copied from transformers.models.llama.modeling_llama.LlamaPreTrainedModel._prepare_4d_causal_attention_mask_with_cache_position
+ def _prepare_4d_causal_attention_mask_with_cache_position(
+ attention_mask: torch.Tensor,
+ sequence_length: int,
+ target_length: int,
+ dtype: torch.dtype,
+ device: torch.device,
+ cache_position: torch.Tensor,
+ batch_size: int,
+ **kwargs,
+ ):
+ """
+ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
+ `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
+
+ Args:
+ attention_mask (`torch.Tensor`):
+ A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
+ `(batch_size, 1, query_length, key_value_length)`.
+ sequence_length (`int`):
+ The sequence length being processed.
+ target_length (`int`):
+ The target length: when generating with static cache, the mask should be as long as the static cache,
+ to account for the 0 padding, the part of the cache that is not filled yet.
+ dtype (`torch.dtype`):
+ The dtype to use for the 4D attention mask.
+ device (`torch.device`):
+ The device to plcae the 4D attention mask on.
+ cache_position (`torch.Tensor`):
+ Indices depicting the position of the input sequence tokens in the sequence.
+ batch_size (`torch.Tensor`):
+ Batch size.
+ """
+ if attention_mask is not None and attention_mask.dim() == 4:
+ # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
+ causal_mask = attention_mask
+ else:
+ min_dtype = torch.finfo(dtype).min
+ causal_mask = torch.full(
+ (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device
+ )
+ if sequence_length != 1:
+ causal_mask = torch.triu(causal_mask, diagonal=1)
+ causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
+ causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
+ if attention_mask is not None:
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
+ mask_length = attention_mask.shape[-1]
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
+ padding_mask = padding_mask == 0
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
+ padding_mask, min_dtype
+ )
+
+ return causal_mask
+
+
+MT5_START_DOCSTRING = r"""
+
+ The MT5 model was proposed in [Exploring the Limits of Transfer Learning with a Unified Text-to-Text
+ Transformer](https://arxiv.org/abs/1910.10683) by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan
+ Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu. It's an encoder decoder transformer pre-trained in a
+ text-to-text denoising generative setting.
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`MT5Config`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+MT5_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. MT5 is a model with relative position embeddings so you
+ should be able to pad the inputs on both the right and the left.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for detail.
+
+ [What are input IDs?](../glossary#input-ids)
+
+ To know more on how to prepare `input_ids` for pretraining take a look a [MT5 Training](./mt5#training).
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Indices of decoder input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
+
+ MT5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`
+ is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).
+
+ To know more on how to prepare `decoder_input_ids` for pretraining take a look at [MT5
+ Training](./mt5#training).
+ decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
+ be used by default.
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in `[0,
+ 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0,
+ 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in
+ `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
+ Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*)
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at
+ the output of the last layer of the encoder. Used in the cross-attention of the decoder.
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
+ representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
+ input (see `past_key_values`). This is useful if you want more control over how to convert
+ `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
+
+ If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
+ of `inputs_embeds`.
+
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+MT5_ENCODER_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. MT5 is a model with relative position embeddings so you
+ should be able to pad the inputs on both the right and the left.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for detail.
+
+ To know more on how to prepare `input_ids` for pretraining take a look a [MT5 Training](./mt5#training).
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+# Warning message for FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask
+__HEAD_MASK_WARNING_MSG = """
+The input argument `head_mask` was split into two arguments `head_mask` and `decoder_head_mask`. Currently,
+`decoder_head_mask` is set to copy `head_mask`, but this feature is deprecated and will be removed in future versions.
+If you do not want to use any `decoder_head_mask` now, please set `decoder_head_mask = torch.ones(num_layers,
+num_heads)`.
+"""
+
+
+@add_start_docstrings(
+ "The bare MT5 Model transformer outputting raw hidden-states without any specific head on top.",
+ MT5_START_DOCSTRING,
+)
+class MT5Model(MT5PreTrainedModel):
+ r"""
+ Examples:
+
+ ```python
+ >>> from transformers import MT5Model, AutoTokenizer
+
+ >>> model = MT5Model.from_pretrained("google/mt5-small")
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small")
+ >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien."
+ >>> summary = "Weiter Verhandlung in Syrien."
+ >>> inputs = tokenizer(article, return_tensors="pt")
+ >>> labels = tokenizer(text_target=summary, return_tensors="pt")
+
+ >>> outputs = model(input_ids=inputs["input_ids"], decoder_input_ids=labels["input_ids"])
+ >>> hidden_states = outputs.last_hidden_state
+ ```"""
+
+ model_type = "mt5"
+ config_class = MT5Config
+ _keys_to_ignore_on_load_unexpected = ["decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight"]
+ _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"]
+
+ # Copied from transformers.models.t5.modeling_t5.T5Model.__init__ with T5->MT5
+ def __init__(self, config: MT5Config):
+ super().__init__(config)
+ self.shared = nn.Embedding(config.vocab_size, config.d_model)
+
+ encoder_config = copy.deepcopy(config)
+ encoder_config.is_decoder = False
+ encoder_config.use_cache = False
+ encoder_config.is_encoder_decoder = False
+ self.encoder = MT5Stack(encoder_config, self.shared)
+
+ decoder_config = copy.deepcopy(config)
+ decoder_config.is_decoder = True
+ decoder_config.is_encoder_decoder = False
+ decoder_config.num_layers = config.num_decoder_layers
+ self.decoder = MT5Stack(decoder_config, self.shared)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ # Model parallel
+ self.model_parallel = False
+ self.device_map = None
+
+ @add_start_docstrings(PARALLELIZE_DOCSTRING)
+ # Copied from transformers.models.t5.modeling_t5.T5Model.parallelize
+ def parallelize(self, device_map=None):
+ warnings.warn(
+ "`T5Model.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your model"
+ " with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own"
+ " `device_map` but it needs to be a dictionary module_name to device, so for instance {'encoder.block.0':"
+ " 0, 'encoder.block.1': 1, ...}",
+ FutureWarning,
+ )
+ self.device_map = (
+ get_device_map(len(self.encoder.block), range(torch.cuda.device_count()))
+ if device_map is None
+ else device_map
+ )
+ assert_device_map(self.device_map, len(self.encoder.block))
+ self.encoder.parallelize(self.device_map)
+ self.decoder.parallelize(self.device_map)
+ self.model_parallel = True
+
+ @add_start_docstrings(DEPARALLELIZE_DOCSTRING)
+ # Copied from transformers.models.t5.modeling_t5.T5Model.deparallelize
+ def deparallelize(self):
+ warnings.warn(
+ "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.",
+ FutureWarning,
+ )
+ self.encoder.deparallelize()
+ self.decoder.deparallelize()
+ self.encoder = self.encoder.to("cpu")
+ self.decoder = self.decoder.to("cpu")
+ self.model_parallel = False
+ self.device_map = None
+ torch.cuda.empty_cache()
+
+ # Copied from transformers.models.t5.modeling_t5.T5Model.get_input_embeddings
+ def get_input_embeddings(self):
+ return self.shared
+
+ # Copied from transformers.models.t5.modeling_t5.T5Model.set_input_embeddings
+ def set_input_embeddings(self, new_embeddings):
+ self.shared = new_embeddings
+ self.encoder.set_input_embeddings(new_embeddings)
+ self.decoder.set_input_embeddings(new_embeddings)
+
+ # Copied from transformers.models.t5.modeling_t5.T5Model.get_encoder
+ def get_encoder(self):
+ return self.encoder
+
+ # Copied from transformers.models.t5.modeling_t5.T5Model.get_decoder
+ def get_decoder(self):
+ return self.decoder
+
+ # Copied from transformers.models.t5.modeling_t5.T5Model._prune_heads
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(MT5_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)
+ # Copied from transformers.models.t5.modeling_t5.T5Model.forward with google-t5/->google/, T5->MT5, t5->mt5
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ decoder_head_mask: Optional[torch.FloatTensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ decoder_inputs_embeds: Optional[torch.Tensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ ) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]:
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, MT5Model
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small")
+ >>> model = MT5Model.from_pretrained("google/mt5-small")
+
+ >>> input_ids = tokenizer(
+ ... "Studies have been shown that owning a dog is good for you", return_tensors="pt"
+ ... ).input_ids # Batch size 1
+ >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
+
+ >>> # preprocess: Prepend decoder_input_ids with start token which is pad token for MT5Model.
+ >>> # This is not needed for torch's MT5ForConditionalGeneration as it does this internally using labels arg.
+ >>> decoder_input_ids = model._shift_right(decoder_input_ids)
+
+ >>> # forward pass
+ >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
+ >>> last_hidden_states = outputs.last_hidden_state
+ ```"""
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask
+ if head_mask is not None and decoder_head_mask is None:
+ if self.config.num_layers == self.config.num_decoder_layers:
+ warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning)
+ decoder_head_mask = head_mask
+
+ # Encode if needed (training, first prediction pass)
+ if encoder_outputs is None:
+ encoder_outputs = self.encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ inputs_embeds=inputs_embeds,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
+ encoder_outputs = BaseModelOutput(
+ last_hidden_state=encoder_outputs[0],
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
+ )
+
+ hidden_states = encoder_outputs[0]
+
+ # Set device for model parallelism
+ if self.model_parallel:
+ torch.cuda.set_device(self.decoder.first_device)
+ hidden_states = hidden_states.to(self.decoder.first_device)
+ if decoder_input_ids is not None:
+ decoder_input_ids = decoder_input_ids.to(self.decoder.first_device)
+ if attention_mask is not None:
+ attention_mask = attention_mask.to(self.decoder.first_device)
+ if decoder_attention_mask is not None:
+ decoder_attention_mask = decoder_attention_mask.to(self.decoder.first_device)
+
+ # Decode
+ decoder_outputs = self.decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ inputs_embeds=decoder_inputs_embeds,
+ past_key_values=past_key_values,
+ encoder_hidden_states=hidden_states,
+ encoder_attention_mask=attention_mask,
+ head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ cache_position=cache_position,
+ )
+
+ if not return_dict:
+ return decoder_outputs + encoder_outputs
+
+ return Seq2SeqModelOutput(
+ last_hidden_state=decoder_outputs.last_hidden_state,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+
+@add_start_docstrings("""MT5 Model with a `language modeling` head on top.""", MT5_START_DOCSTRING)
+class MT5ForConditionalGeneration(MT5PreTrainedModel, GenerationMixin):
+ r"""
+ Examples:
+
+ ```python
+ >>> from transformers import MT5ForConditionalGeneration, AutoTokenizer
+
+ >>> model = MT5ForConditionalGeneration.from_pretrained("google/mt5-small")
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small")
+ >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien."
+ >>> summary = "Weiter Verhandlung in Syrien."
+ >>> inputs = tokenizer(article, text_target=summary, return_tensors="pt")
+
+ >>> outputs = model(**inputs)
+ >>> loss = outputs.loss
+ ```"""
+
+ model_type = "mt5"
+ config_class = MT5Config
+ _keys_to_ignore_on_load_unexpected = ["decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight"]
+ _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "lm_head.weight"]
+
+ # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.__init__ with T5->MT5
+ def __init__(self, config: MT5Config):
+ super().__init__(config)
+ self.model_dim = config.d_model
+
+ self.shared = nn.Embedding(config.vocab_size, config.d_model)
+
+ encoder_config = copy.deepcopy(config)
+ encoder_config.is_decoder = False
+ encoder_config.use_cache = False
+ encoder_config.is_encoder_decoder = False
+ self.encoder = MT5Stack(encoder_config, self.shared)
+
+ decoder_config = copy.deepcopy(config)
+ decoder_config.is_decoder = True
+ decoder_config.is_encoder_decoder = False
+ decoder_config.num_layers = config.num_decoder_layers
+ self.decoder = MT5Stack(decoder_config, self.shared)
+
+ self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ # Model parallel
+ self.model_parallel = False
+ self.device_map = None
+
+ @add_start_docstrings(PARALLELIZE_DOCSTRING)
+ # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.parallelize
+ def parallelize(self, device_map=None):
+ warnings.warn(
+ "`T5ForConditionalGeneration.parallelize` is deprecated and will be removed in v5 of Transformers, you"
+ " should load your model with `device_map='balanced'` in the call to `from_pretrained`. You can also"
+ " provide your own `device_map` but it needs to be a dictionary module_name to device, so for instance"
+ " {'encoder.block.0': 0, 'encoder.block.1': 1, ...}",
+ FutureWarning,
+ )
+ self.device_map = (
+ get_device_map(len(self.encoder.block), range(torch.cuda.device_count()))
+ if device_map is None
+ else device_map
+ )
+ assert_device_map(self.device_map, len(self.encoder.block))
+ self.encoder.parallelize(self.device_map)
+ self.decoder.parallelize(self.device_map)
+ self.lm_head = self.lm_head.to(self.decoder.first_device)
+ self.model_parallel = True
+
+ @add_start_docstrings(DEPARALLELIZE_DOCSTRING)
+ # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.deparallelize
+ def deparallelize(self):
+ warnings.warn(
+ "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.",
+ FutureWarning,
+ )
+ self.encoder.deparallelize()
+ self.decoder.deparallelize()
+ self.encoder = self.encoder.to("cpu")
+ self.decoder = self.decoder.to("cpu")
+ self.lm_head = self.lm_head.to("cpu")
+ self.model_parallel = False
+ self.device_map = None
+ torch.cuda.empty_cache()
+
+ # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.get_input_embeddings
+ def get_input_embeddings(self):
+ return self.shared
+
+ # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.set_input_embeddings
+ def set_input_embeddings(self, new_embeddings):
+ self.shared = new_embeddings
+ self.encoder.set_input_embeddings(new_embeddings)
+ self.decoder.set_input_embeddings(new_embeddings)
+
+ # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.set_output_embeddings
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.get_output_embeddings
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.get_encoder
+ def get_encoder(self):
+ return self.encoder
+
+ # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.get_decoder
+ def get_decoder(self):
+ return self.decoder
+
+ @add_start_docstrings_to_model_forward(MT5_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
+ # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.forward with google-t5/->google/, T5->MT5, t5->mt5
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ decoder_head_mask: Optional[torch.FloatTensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for
+ labels in `[0, ..., config.vocab_size]`
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer, MT5ForConditionalGeneration
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small")
+ >>> model = MT5ForConditionalGeneration.from_pretrained("google/mt5-small")
+
+ >>> # training
+ >>> input_ids = tokenizer("The walks in park", return_tensors="pt").input_ids
+ >>> labels = tokenizer(" cute dog the ", return_tensors="pt").input_ids
+ >>> outputs = model(input_ids=input_ids, labels=labels)
+ >>> loss = outputs.loss
+ >>> logits = outputs.logits
+
+ >>> # inference
+ >>> input_ids = tokenizer(
+ ... "summarize: studies have shown that owning a dog is good for you", return_tensors="pt"
+ ... ).input_ids # Batch size 1
+ >>> outputs = model.generate(input_ids)
+ >>> print(tokenizer.decode(outputs[0], skip_special_tokens=True))
+ >>> # studies have shown that owning a dog is good for you.
+ ```"""
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask
+ if head_mask is not None and decoder_head_mask is None:
+ if self.config.num_layers == self.config.num_decoder_layers:
+ warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning)
+ decoder_head_mask = head_mask
+
+ # Encode if needed (training, first prediction pass)
+ if encoder_outputs is None:
+ # Convert encoder inputs in embeddings if needed
+ encoder_outputs = self.encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ inputs_embeds=inputs_embeds,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
+ encoder_outputs = BaseModelOutput(
+ last_hidden_state=encoder_outputs[0],
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
+ )
+
+ hidden_states = encoder_outputs[0]
+
+ if self.model_parallel:
+ torch.cuda.set_device(self.decoder.first_device)
+
+ if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:
+ # get decoder inputs from shifting lm labels to the right
+ decoder_input_ids = self._shift_right(labels)
+
+ # Set device for model parallelism
+ if self.model_parallel:
+ torch.cuda.set_device(self.decoder.first_device)
+ hidden_states = hidden_states.to(self.decoder.first_device)
+ if decoder_input_ids is not None:
+ decoder_input_ids = decoder_input_ids.to(self.decoder.first_device)
+ if attention_mask is not None:
+ attention_mask = attention_mask.to(self.decoder.first_device)
+ if decoder_attention_mask is not None:
+ decoder_attention_mask = decoder_attention_mask.to(self.decoder.first_device)
+
+ # Decode
+ decoder_outputs = self.decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ inputs_embeds=decoder_inputs_embeds,
+ past_key_values=past_key_values,
+ encoder_hidden_states=hidden_states,
+ encoder_attention_mask=attention_mask,
+ head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ cache_position=cache_position,
+ )
+
+ sequence_output = decoder_outputs[0]
+
+ # Set device for model parallelism
+ if self.model_parallel:
+ torch.cuda.set_device(self.encoder.first_device)
+ self.lm_head = self.lm_head.to(self.encoder.first_device)
+ sequence_output = sequence_output.to(self.lm_head.weight.device)
+
+ if self.config.tie_word_embeddings:
+ # Rescale output before projecting on vocab
+ # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586
+ sequence_output = sequence_output * (self.model_dim**-0.5)
+
+ lm_logits = self.lm_head(sequence_output)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss(ignore_index=-100)
+ # move labels to correct device to enable PP
+ labels = labels.to(lm_logits.device)
+ loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))
+ # TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666
+
+ if not return_dict:
+ output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs
+ return ((loss,) + output) if loss is not None else output
+
+ return Seq2SeqLMOutput(
+ loss=loss,
+ logits=lm_logits,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+ # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.prepare_decoder_input_ids_from_labels
+ def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
+ return self._shift_right(labels)
+
+ # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration._reorder_cache
+ def _reorder_cache(self, past_key_values, beam_idx):
+ # if decoder past is not included in output
+ # speedy decoding is disabled and no need to reorder
+ if past_key_values is None:
+ logger.warning("You might want to consider setting `use_cache=True` to speed up decoding")
+ return past_key_values
+
+ reordered_decoder_past = ()
+ for layer_past_states in past_key_values:
+ # get the correct batch idx from layer past batch dim
+ # batch dim of `past` is at 2nd position
+ reordered_layer_past_states = ()
+ for layer_past_state in layer_past_states:
+ # need to set correct `past` for each of the four key / value states
+ reordered_layer_past_states = reordered_layer_past_states + (
+ layer_past_state.index_select(0, beam_idx.to(layer_past_state.device)),
+ )
+
+ if reordered_layer_past_states[0].shape != layer_past_states[0].shape:
+ raise ValueError(
+ f"reordered_layer_past_states[0] shape {reordered_layer_past_states[0].shape} and layer_past_states[0] shape {layer_past_states[0].shape} mismatched"
+ )
+ if len(reordered_layer_past_states) != len(layer_past_states):
+ raise ValueError(
+ f"length of reordered_layer_past_states {len(reordered_layer_past_states)} and length of layer_past_states {len(layer_past_states)} mismatched"
+ )
+
+ reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,)
+ return reordered_decoder_past
+
+
+@add_start_docstrings(
+ "The bare MT5 Model transformer outputting encoder's raw hidden-states without any specific head on top.",
+ MT5_START_DOCSTRING,
+)
+class MT5EncoderModel(MT5PreTrainedModel):
+ r"""
+ Examples:
+
+ ```python
+ >>> from transformers import MT5EncoderModel, AutoTokenizer
+
+ >>> model = MT5EncoderModel.from_pretrained("google/mt5-small")
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small")
+ >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien."
+ >>> input_ids = tokenizer(article, return_tensors="pt").input_ids
+ >>> outputs = model(input_ids)
+ >>> hidden_state = outputs.last_hidden_state
+ ```"""
+
+ model_type = "mt5"
+ config_class = MT5Config
+ _tied_weights_keys = ["encoder.embed_tokens.weight"]
+
+ # Copied from transformers.models.t5.modeling_t5.T5EncoderModel.__init__ with T5->MT5
+ def __init__(self, config: MT5Config):
+ super().__init__(config)
+ self.shared = nn.Embedding(config.vocab_size, config.d_model)
+
+ encoder_config = copy.deepcopy(config)
+ encoder_config.use_cache = False
+ encoder_config.is_encoder_decoder = False
+ self.encoder = MT5Stack(encoder_config, self.shared)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ # Model parallel
+ self.model_parallel = False
+ self.device_map = None
+
+ @add_start_docstrings(PARALLELIZE_DOCSTRING)
+ # Copied from transformers.models.t5.modeling_t5.T5EncoderModel.parallelize
+ def parallelize(self, device_map=None):
+ warnings.warn(
+ "`T5EncoderModel.parallelize` is deprecated and will be removed in v5 of Transformers, you should load"
+ " your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own"
+ " `device_map` but it needs to be a dictionary module_name to device, so for instance {'block.0': 0,"
+ " 'block.1': 1, ...}",
+ FutureWarning,
+ )
+ self.device_map = (
+ get_device_map(len(self.encoder.block), range(torch.cuda.device_count()))
+ if device_map is None
+ else device_map
+ )
+ assert_device_map(self.device_map, len(self.encoder.block))
+ self.encoder.parallelize(self.device_map)
+ self.model_parallel = True
+
+ @add_start_docstrings(DEPARALLELIZE_DOCSTRING)
+ # Copied from transformers.models.t5.modeling_t5.T5EncoderModel.deparallelize
+ def deparallelize(self):
+ warnings.warn(
+ "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.",
+ FutureWarning,
+ )
+ self.encoder.deparallelize()
+ self.encoder = self.encoder.to("cpu")
+ self.model_parallel = False
+ self.device_map = None
+ torch.cuda.empty_cache()
+
+ # Copied from transformers.models.t5.modeling_t5.T5EncoderModel.get_input_embeddings
+ def get_input_embeddings(self):
+ return self.shared
+
+ # Copied from transformers.models.t5.modeling_t5.T5EncoderModel.set_input_embeddings
+ def set_input_embeddings(self, new_embeddings):
+ self.shared = new_embeddings
+ self.encoder.set_input_embeddings(new_embeddings)
+
+ # Copied from transformers.models.t5.modeling_t5.T5EncoderModel.get_encoder
+ def get_encoder(self):
+ return self.encoder
+
+ # Copied from transformers.models.t5.modeling_t5.T5EncoderModel._prune_heads
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.block[layer].layer[0].SelfAttention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(MT5_ENCODER_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC)
+ # Copied from transformers.models.t5.modeling_t5.T5EncoderModel.forward with google-t5/->google/, T5->MT5, t5->mt5
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.FloatTensor], BaseModelOutput]:
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, MT5EncoderModel
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small")
+ >>> model = MT5EncoderModel.from_pretrained("google/mt5-small")
+ >>> input_ids = tokenizer(
+ ... "Studies have been shown that owning a dog is good for you", return_tensors="pt"
+ ... ).input_ids # Batch size 1
+ >>> outputs = model(input_ids=input_ids)
+ >>> last_hidden_states = outputs.last_hidden_state
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ encoder_outputs = self.encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ inputs_embeds=inputs_embeds,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ return encoder_outputs
+
+
+@add_start_docstrings(
+ """
+ MT5 model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE
+ tasks.
+ """,
+ MT5_START_DOCSTRING,
+)
+class MT5ForSequenceClassification(MT5PreTrainedModel):
+ _keys_to_ignore_on_load_unexpected = ["decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight"]
+ _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"]
+
+ # Copied from transformers.models.t5.modeling_t5.T5ForSequenceClassification.__init__ with T5->MT5
+ def __init__(self, config: MT5Config):
+ super().__init__(config)
+ self.transformer = MT5Model(config)
+ self.classification_head = MT5ClassificationHead(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ self.model_parallel = False
+
+ @add_start_docstrings_to_model_forward(MT5_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Seq2SeqSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
+ # Copied from transformers.models.t5.modeling_t5.T5ForSequenceClassification.forward
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ decoder_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, Seq2SeqSequenceClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ Returns:
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ if labels is not None:
+ use_cache = False
+
+ if input_ids is None and inputs_embeds is not None:
+ raise NotImplementedError(
+ f"Passing input embeddings is currently not supported for {self.__class__.__name__}"
+ )
+
+ # Copied from models.bart.modeling_bart.BartModel.forward different to other models, T5 automatically creates
+ # decoder_input_ids from input_ids if no decoder_input_ids are provided
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
+ if input_ids is None:
+ raise ValueError(
+ "If no `decoder_input_ids` or `decoder_inputs_embeds` are "
+ "passed, `input_ids` cannot be `None`. Please pass either "
+ "`input_ids` or `decoder_input_ids` or `decoder_inputs_embeds`."
+ )
+ decoder_input_ids = self._shift_right(input_ids)
+
+ outputs = self.transformer(
+ input_ids,
+ attention_mask=attention_mask,
+ decoder_input_ids=decoder_input_ids,
+ decoder_attention_mask=decoder_attention_mask,
+ head_mask=head_mask,
+ decoder_head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ encoder_outputs=encoder_outputs,
+ inputs_embeds=inputs_embeds,
+ decoder_inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = outputs[0]
+
+ eos_mask = input_ids.eq(self.config.eos_token_id).to(sequence_output.device)
+
+ if len(torch.unique_consecutive(eos_mask.sum(1))) > 1:
+ raise ValueError("All examples must have the same number of tokens.")
+ batch_size, _, hidden_size = sequence_output.shape
+ sentence_representation = sequence_output[eos_mask, :].view(batch_size, -1, hidden_size)[:, -1, :]
+ logits = self.classification_head(sentence_representation)
+
+ loss = None
+ if labels is not None:
+ labels = labels.to(logits.device)
+ if self.config.problem_type is None:
+ if self.config.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.config.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return Seq2SeqSequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ decoder_hidden_states=outputs.decoder_hidden_states,
+ decoder_attentions=outputs.decoder_attentions,
+ cross_attentions=outputs.cross_attentions,
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
+ encoder_hidden_states=outputs.encoder_hidden_states,
+ encoder_attentions=outputs.encoder_attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ MT5 Encoder Model with a token classification head on top (a linear layer on top of the hidden-states output)
+ e.g. for Named-Entity-Recognition (NER) tasks.
+ """,
+ MT5_START_DOCSTRING,
+)
+class MT5ForTokenClassification(MT5PreTrainedModel):
+ _tied_weights_keys = ["transformer.encoder.embed_tokens.weight"]
+
+ # Copied from transformers.models.t5.modeling_t5.T5ForTokenClassification.__init__ with T5->MT5
+ def __init__(self, config: MT5Config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.transformer = MT5EncoderModel(config)
+ self.dropout = nn.Dropout(config.classifier_dropout)
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(MT5_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC)
+ # Copied from transformers.models.t5.modeling_t5.T5ForTokenClassification.forward with T5->MT5
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
+ Returns:
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.transformer(
+ input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = outputs[0]
+ hidden_states = self.dropout(hidden_states)
+ logits = self.classifier(hidden_states)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+
+ if not return_dict:
+ output = (logits, outputs[2:-1])
+ return ((loss,) + output) if loss is not None else output
+
+ return TokenClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ MT5 Model with a span classification head on top for extractive question-answering tasks like SQuAD (linear layers
+ on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ MT5_START_DOCSTRING,
+)
+class MT5ForQuestionAnswering(MT5PreTrainedModel):
+ _keys_to_ignore_on_load_unexpected = ["decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight"]
+ _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"]
+
+ # Copied from transformers.models.t5.modeling_t5.T5ForQuestionAnswering.__init__ with T5->MT5
+ def __init__(self, config: MT5Config):
+ super().__init__(config)
+ self.model_dim = config.d_model
+
+ self.shared = nn.Embedding(config.vocab_size, config.d_model)
+
+ encoder_config = copy.deepcopy(config)
+ encoder_config.is_decoder = False
+ encoder_config.use_cache = False
+ encoder_config.is_encoder_decoder = False
+ self.encoder = MT5Stack(encoder_config, self.shared)
+
+ decoder_config = copy.deepcopy(config)
+ decoder_config.is_decoder = True
+ decoder_config.is_encoder_decoder = False
+ decoder_config.num_layers = config.num_decoder_layers
+ self.decoder = MT5Stack(decoder_config, self.shared)
+
+ self.num_labels = config.num_labels
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ self.model_parallel = False
+
+ # Copied from transformers.models.t5.modeling_t5.T5ForQuestionAnswering.get_input_embeddings
+ def get_input_embeddings(self):
+ return self.shared
+
+ # Copied from transformers.models.t5.modeling_t5.T5ForQuestionAnswering.set_input_embeddings
+ def set_input_embeddings(self, new_embeddings):
+ self.shared = new_embeddings
+ self.encoder.set_input_embeddings(new_embeddings)
+ self.decoder.set_input_embeddings(new_embeddings)
+
+ # Copied from transformers.models.t5.modeling_t5.T5ForQuestionAnswering.get_encoder
+ def get_encoder(self):
+ return self.encoder
+
+ # Copied from transformers.models.t5.modeling_t5.T5ForQuestionAnswering.get_decoder
+ def get_decoder(self):
+ return self.decoder
+
+ @add_start_docstrings_to_model_forward(MT5_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Seq2SeqQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC)
+ # Copied from transformers.models.t5.modeling_t5.T5ForQuestionAnswering.forward
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ decoder_head_mask: Optional[torch.FloatTensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ start_positions: Optional[torch.LongTensor] = None,
+ end_positions: Optional[torch.LongTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.FloatTensor], Seq2SeqQuestionAnsweringModelOutput]:
+ r"""
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence
+ are not taken into account for computing the loss.
+ Returns:
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ if start_positions is not None and end_positions is not None:
+ use_cache = False
+
+ # Copied from models.bart.modeling_bart.BartModel.forward
+ # different to other models, T5 automatically creates decoder_input_ids from
+ # input_ids if no decoder_input_ids are provided
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
+ if input_ids is None:
+ raise ValueError(
+ "If no `decoder_input_ids` or `decoder_inputs_embeds` are "
+ "passed, `input_ids` cannot be `None`. Please pass either "
+ "`input_ids` or `decoder_input_ids` or `decoder_inputs_embeds`."
+ )
+ decoder_input_ids = self._shift_right(input_ids)
+
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask
+ if head_mask is not None and decoder_head_mask is None:
+ if self.config.num_layers == self.config.num_decoder_layers:
+ warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning)
+ decoder_head_mask = head_mask
+
+ # Encode if needed (training, first prediction pass)
+ if encoder_outputs is None:
+ encoder_outputs = self.encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ inputs_embeds=inputs_embeds,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
+ encoder_outputs = BaseModelOutput(
+ last_hidden_state=encoder_outputs[0],
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
+ )
+
+ hidden_states = encoder_outputs[0]
+
+ # Decode
+ decoder_outputs = self.decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ inputs_embeds=decoder_inputs_embeds,
+ past_key_values=None,
+ encoder_hidden_states=hidden_states,
+ encoder_attention_mask=attention_mask,
+ head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = decoder_outputs[0]
+
+ logits = self.qa_outputs(sequence_output)
+ start_logits, end_logits = logits.split(1, dim=-1)
+ start_logits = start_logits.squeeze(-1).contiguous()
+ end_logits = end_logits.squeeze(-1).contiguous()
+
+ total_loss = None
+ if start_positions is not None and end_positions is not None:
+ # If we are on multi-GPU, split add a dimension
+ if len(start_positions.size()) > 1:
+ start_positions = start_positions.squeeze(-1).to(start_logits.device)
+ if len(end_positions.size()) > 1:
+ end_positions = end_positions.squeeze(-1).to(end_logits.device)
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
+ ignored_index = start_logits.size(1)
+ start_positions = start_positions.clamp(0, ignored_index)
+ end_positions = end_positions.clamp(0, ignored_index)
+
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
+ start_loss = loss_fct(start_logits, start_positions)
+ end_loss = loss_fct(end_logits, end_positions)
+ total_loss = (start_loss + end_loss) / 2
+
+ if not return_dict:
+ output = (start_logits, end_logits) + decoder_outputs[1:] + encoder_outputs
+ return ((total_loss,) + output) if total_loss is not None else output
+
+ return Seq2SeqQuestionAnsweringModelOutput(
+ loss=total_loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+
+__all__ = [
+ "MT5EncoderModel",
+ "MT5ForConditionalGeneration",
+ "MT5ForQuestionAnswering",
+ "MT5ForSequenceClassification",
+ "MT5ForTokenClassification",
+ "MT5Model",
+ "MT5PreTrainedModel",
+ "MT5Stack",
+]
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/mt5/modeling_tf_mt5.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/mt5/modeling_tf_mt5.py
new file mode 100644
index 0000000000000000000000000000000000000000..6152aea0a5acad5d1d6db9b7ee044cbde2bca976
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/mt5/modeling_tf_mt5.py
@@ -0,0 +1,98 @@
+# coding=utf-8
+# Copyright 2020 Mesh TensorFlow authors, T5 Authors and HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tensorflow mT5 model."""
+
+from ...utils import logging
+from ..t5.modeling_tf_t5 import TFT5EncoderModel, TFT5ForConditionalGeneration, TFT5Model
+from .configuration_mt5 import MT5Config
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "T5Config"
+
+
+class TFMT5Model(TFT5Model):
+ r"""
+ This class overrides [`TFT5Model`]. Please check the superclass for the appropriate documentation alongside usage
+ examples.
+
+ Examples:
+
+ ```python
+ >>> from transformers import TFMT5Model, AutoTokenizer
+
+ >>> model = TFMT5Model.from_pretrained("google/mt5-small")
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small")
+ >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien."
+ >>> summary = "Weiter Verhandlung in Syrien."
+ >>> inputs = tokenizer(article, return_tensors="tf")
+ >>> labels = tokenizer(text_target=summary, return_tensors="tf")
+
+ >>> outputs = model(input_ids=inputs["input_ids"], decoder_input_ids=labels["input_ids"])
+ >>> hidden_states = outputs.last_hidden_state
+ ```"""
+
+ model_type = "mt5"
+ config_class = MT5Config
+
+
+class TFMT5ForConditionalGeneration(TFT5ForConditionalGeneration):
+ r"""
+ This class overrides [`TFT5ForConditionalGeneration`]. Please check the superclass for the appropriate
+ documentation alongside usage examples.
+
+ Examples:
+
+ ```python
+ >>> from transformers import TFMT5ForConditionalGeneration, AutoTokenizer
+
+ >>> model = TFMT5ForConditionalGeneration.from_pretrained("google/mt5-small")
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small")
+ >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien."
+ >>> summary = "Weiter Verhandlung in Syrien."
+ >>> inputs = tokenizer(article, text_target=summary, return_tensors="tf")
+
+ >>> outputs = model(**inputs)
+ >>> loss = outputs.loss
+ ```"""
+
+ model_type = "mt5"
+ config_class = MT5Config
+
+
+class TFMT5EncoderModel(TFT5EncoderModel):
+ r"""
+ This class overrides [`TFT5EncoderModel`]. Please check the superclass for the appropriate documentation alongside
+ usage examples.
+
+ Examples:
+
+ ```python
+ >>> from transformers import TFMT5EncoderModel, AutoTokenizer
+
+ >>> model = TFMT5EncoderModel.from_pretrained("google/mt5-small")
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small")
+ >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien."
+ >>> input_ids = tokenizer(article, return_tensors="tf").input_ids
+ >>> outputs = model(input_ids)
+ >>> hidden_state = outputs.last_hidden_state
+ ```"""
+
+ model_type = "mt5"
+ config_class = MT5Config
+
+
+__all__ = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"]
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/mt5/tokenization_mt5.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/mt5/tokenization_mt5.py
new file mode 100644
index 0000000000000000000000000000000000000000..a3058816ff2032c41e2b0cdb6940705a7572faa0
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/mt5/tokenization_mt5.py
@@ -0,0 +1,24 @@
+# coding=utf-8
+# Copyright 2020, The T5 Authors and HuggingFace Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""mT5 tokenization file"""
+
+from ..t5 import T5Tokenizer
+
+
+class MT5Tokenizer(T5Tokenizer):
+ pass
+
+
+__all__ = ["MT5Tokenizer"]
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/mt5/tokenization_mt5_fast.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/mt5/tokenization_mt5_fast.py
new file mode 100644
index 0000000000000000000000000000000000000000..8737088cc44206bea1e2f9d1793ae49692ae35e8
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/mt5/tokenization_mt5_fast.py
@@ -0,0 +1,24 @@
+# coding=utf-8
+# Copyright 2020, The T5 Authors and HuggingFace Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""mT5 tokenization file"""
+
+from ..t5 import T5TokenizerFast
+
+
+class MT5TokenizerFast(T5TokenizerFast):
+ pass
+
+
+__all__ = ["MT5TokenizerFast"]
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/openai/__pycache__/__init__.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/openai/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..889ce840e2e5dd2730794873ea48a1a02a6ba3d7
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/openai/__pycache__/__init__.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/openai/__pycache__/configuration_openai.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/openai/__pycache__/configuration_openai.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9f248317fce815434eb7ae1029cb3a8cd3c5bd8d
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/openai/__pycache__/configuration_openai.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/openai/__pycache__/convert_openai_original_tf_checkpoint_to_pytorch.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/openai/__pycache__/convert_openai_original_tf_checkpoint_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a7eed09cef76a85d7bca134680f1bd1c6e104a89
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/openai/__pycache__/convert_openai_original_tf_checkpoint_to_pytorch.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/openai/__pycache__/modeling_openai.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/openai/__pycache__/modeling_openai.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5cd6088aaec84a5a0cb2151efdfeafde0412a0ad
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/openai/__pycache__/modeling_openai.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/openai/__pycache__/modeling_tf_openai.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/openai/__pycache__/modeling_tf_openai.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..94019f457b1587445a14bfa5342f502620248e54
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/openai/__pycache__/modeling_tf_openai.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/openai/__pycache__/tokenization_openai.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/openai/__pycache__/tokenization_openai.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e41ccb804e54e6eaba23dcd2b944f3296671ad49
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/openai/__pycache__/tokenization_openai.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/openai/__pycache__/tokenization_openai_fast.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/openai/__pycache__/tokenization_openai_fast.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..494965814735b8aad35f535d96e242325fc9f724
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/openai/__pycache__/tokenization_openai_fast.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/openai/configuration_openai.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/openai/configuration_openai.py
new file mode 100644
index 0000000000000000000000000000000000000000..b4f2fae9d304bc63b6e69a3e456141531a644bfd
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/openai/configuration_openai.py
@@ -0,0 +1,156 @@
+# coding=utf-8
+# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""OpenAI GPT configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+class OpenAIGPTConfig(PretrainedConfig):
+ """
+ This is the configuration class to store the configuration of a [`OpenAIGPTModel`] or a [`TFOpenAIGPTModel`]. It is
+ used to instantiate a GPT model according to the specified arguments, defining the model architecture.
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the GPT
+ [openai-community/openai-gpt](https://huggingface.co/openai-community/openai-gpt) architecture from OpenAI.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 40478):
+ Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`OpenAIGPTModel`] or [`TFOpenAIGPTModel`].
+ n_positions (`int`, *optional*, defaults to 512):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ n_embd (`int`, *optional*, defaults to 768):
+ Dimensionality of the embeddings and hidden states.
+ n_layer (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ n_head (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ afn (`str` or `Callable`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ resid_pdrop (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ embd_pdrop (`int`, *optional*, defaults to 0.1):
+ The dropout ratio for the embeddings.
+ attn_pdrop (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention.
+ layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
+ The epsilon to use in the layer normalization layers
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ summary_type (`str`, *optional*, defaults to `"cls_index"`):
+ Argument used when doing sequence summary, used in the models [`OpenAIGPTDoubleHeadsModel`] and
+ [`OpenAIGPTDoubleHeadsModel`].
+
+ Has to be one of the following options:
+
+ - `"last"`: Take the last token hidden state (like XLNet).
+ - `"first"`: Take the first token hidden state (like BERT).
+ - `"mean"`: Take the mean of all tokens hidden states.
+ - `"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2).
+ - `"attn"`: Not implemented now, use multi-head attention.
+ summary_use_proj (`bool`, *optional*, defaults to `True`):
+ Argument used when doing sequence summary, used in the models [`OpenAIGPTDoubleHeadsModel`] and
+ [`OpenAIGPTDoubleHeadsModel`].
+
+ Whether or not to add a projection after the vector extraction.
+ summary_activation (`str`, *optional*):
+ Argument used when doing sequence summary, used in the models [`OpenAIGPTDoubleHeadsModel`] and
+ [`OpenAIGPTDoubleHeadsModel`].
+
+ Pass `"tanh"` for a tanh activation to the output, any other value will result in no activation.
+ summary_proj_to_labels (`bool`, *optional*, defaults to `True`):
+ Argument used when doing sequence summary, used in the models [`OpenAIGPTDoubleHeadsModel`] and
+ [`OpenAIGPTDoubleHeadsModel`].
+
+ Whether the projection outputs should have `config.num_labels` or `config.hidden_size` classes.
+ summary_first_dropout (`float`, *optional*, defaults to 0.1):
+ Argument used when doing sequence summary, used in the models [`OpenAIGPTDoubleHeadsModel`] and
+ [`OpenAIGPTDoubleHeadsModel`].
+
+ The dropout ratio to be used after the projection and activation.
+
+
+ Examples:
+
+ ```python
+ >>> from transformers import OpenAIGPTConfig, OpenAIGPTModel
+
+ >>> # Initializing a GPT configuration
+ >>> configuration = OpenAIGPTConfig()
+
+ >>> # Initializing a model (with random weights) from the configuration
+ >>> model = OpenAIGPTModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "openai-gpt"
+ attribute_map = {
+ "max_position_embeddings": "n_positions",
+ "hidden_size": "n_embd",
+ "num_attention_heads": "n_head",
+ "num_hidden_layers": "n_layer",
+ }
+
+ def __init__(
+ self,
+ vocab_size=40478,
+ n_positions=512,
+ n_embd=768,
+ n_layer=12,
+ n_head=12,
+ afn="gelu",
+ resid_pdrop=0.1,
+ embd_pdrop=0.1,
+ attn_pdrop=0.1,
+ layer_norm_epsilon=1e-5,
+ initializer_range=0.02,
+ summary_type="cls_index",
+ summary_use_proj=True,
+ summary_activation=None,
+ summary_proj_to_labels=True,
+ summary_first_dropout=0.1,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.n_positions = n_positions
+ self.n_embd = n_embd
+ self.n_layer = n_layer
+ self.n_head = n_head
+ self.afn = afn
+ self.resid_pdrop = resid_pdrop
+ self.embd_pdrop = embd_pdrop
+ self.attn_pdrop = attn_pdrop
+ self.layer_norm_epsilon = layer_norm_epsilon
+ self.initializer_range = initializer_range
+ self.summary_type = summary_type
+ self.summary_use_proj = summary_use_proj
+ self.summary_activation = summary_activation
+ self.summary_first_dropout = summary_first_dropout
+ self.summary_proj_to_labels = summary_proj_to_labels
+ super().__init__(**kwargs)
+
+
+__all__ = ["OpenAIGPTConfig"]
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/openai/convert_openai_original_tf_checkpoint_to_pytorch.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/openai/convert_openai_original_tf_checkpoint_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..3d5218c204262f639a0b862c4106a3a04dc27d0b
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/openai/convert_openai_original_tf_checkpoint_to_pytorch.py
@@ -0,0 +1,74 @@
+# coding=utf-8
+# Copyright 2018 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert OpenAI GPT checkpoint."""
+
+import argparse
+
+import torch
+
+from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
+from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
+
+
+logging.set_verbosity_info()
+
+
+def convert_openai_checkpoint_to_pytorch(openai_checkpoint_folder_path, openai_config_file, pytorch_dump_folder_path):
+ # Construct model
+ if openai_config_file == "":
+ config = OpenAIGPTConfig()
+ else:
+ config = OpenAIGPTConfig.from_json_file(openai_config_file)
+ model = OpenAIGPTModel(config)
+
+ # Load weights from numpy
+ load_tf_weights_in_openai_gpt(model, config, openai_checkpoint_folder_path)
+
+ # Save pytorch-model
+ pytorch_weights_dump_path = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
+ pytorch_config_dump_path = pytorch_dump_folder_path + "/" + CONFIG_NAME
+ print(f"Save PyTorch model to {pytorch_weights_dump_path}")
+ torch.save(model.state_dict(), pytorch_weights_dump_path)
+ print(f"Save configuration file to {pytorch_config_dump_path}")
+ with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
+ f.write(config.to_json_string())
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--openai_checkpoint_folder_path",
+ default=None,
+ type=str,
+ required=True,
+ help="Path to the TensorFlow checkpoint path.",
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
+ )
+ parser.add_argument(
+ "--openai_config_file",
+ default="",
+ type=str,
+ help=(
+ "An optional config json file corresponding to the pre-trained OpenAI model. \n"
+ "This specifies the model architecture."
+ ),
+ )
+ args = parser.parse_args()
+ convert_openai_checkpoint_to_pytorch(
+ args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
+ )
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/openai/modeling_openai.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/openai/modeling_openai.py
new file mode 100644
index 0000000000000000000000000000000000000000..156f778e1ce6253fe7b1f0401114cf68c7e3d263
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/openai/modeling_openai.py
@@ -0,0 +1,867 @@
+# coding=utf-8
+# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch OpenAI GPT model."""
+
+import json
+import math
+import os
+from dataclasses import dataclass
+from typing import Any, Dict, Optional, Tuple, Union
+
+import torch
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import gelu_new, silu
+from ...generation import GenerationMixin
+from ...modeling_outputs import BaseModelOutput, CausalLMOutput, SequenceClassifierOutput
+from ...modeling_utils import PreTrainedModel, SequenceSummary
+from ...pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_conv1d_layer
+from ...utils import (
+ ModelOutput,
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_openai import OpenAIGPTConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "openai-community/openai-gpt"
+_CONFIG_FOR_DOC = "OpenAIGPTConfig"
+
+
+def load_tf_weights_in_openai_gpt(model, config, openai_checkpoint_folder_path):
+ """Load tf pre-trained weights in a pytorch model (from NumPy arrays here)"""
+ import re
+
+ import numpy as np
+
+ if ".ckpt" in openai_checkpoint_folder_path:
+ openai_checkpoint_folder_path = os.path.dirname(openai_checkpoint_folder_path)
+
+ logger.info(f"Loading weights from {openai_checkpoint_folder_path}")
+
+ with open(openai_checkpoint_folder_path + "/parameters_names.json", "r", encoding="utf-8") as names_handle:
+ names = json.load(names_handle)
+ with open(openai_checkpoint_folder_path + "/params_shapes.json", "r", encoding="utf-8") as shapes_handle:
+ shapes = json.load(shapes_handle)
+ offsets = np.cumsum([np.prod(shape) for shape in shapes])
+ init_params = [np.load(openai_checkpoint_folder_path + f"/params_{n}.npy") for n in range(10)]
+ init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1]
+ init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)]
+
+ # This was used when we had a single embedding matrix for positions and tokens
+ # init_params[0] = np.concatenate([init_params[1], init_params[0]], 0)
+ # del init_params[1]
+ init_params = [arr.squeeze() for arr in init_params]
+
+ # Check that the token and position embeddings weight dimensions map those of the init parameters.
+ if model.tokens_embed.weight.shape != init_params[1].shape:
+ raise ValueError(
+ f"tokens_embed.weight.shape: {model.tokens_embed.weight.shape} does not match init_param[1].shape:"
+ f" {init_params[1].shape}"
+ )
+
+ if model.positions_embed.weight.shape != init_params[0].shape:
+ raise ValueError(
+ f"positions_embed.weight.shape: {model.positions_embed.weight.shape} does not match init_param[0].shape:"
+ f" {init_params[0].shape}"
+ )
+
+ model.tokens_embed.weight.data = torch.from_numpy(init_params[1])
+ model.positions_embed.weight.data = torch.from_numpy(init_params[0])
+ names.pop(0)
+ # Pop position and token embedding arrays
+ init_params.pop(0)
+ init_params.pop(0)
+
+ for name, array in zip(names, init_params): # names[1:n_transfer], init_params[1:n_transfer]):
+ name = name[6:] # skip "model/"
+ if name[-2:] != ":0":
+ raise ValueError(f"Layer {name} does not end with :0")
+ name = name[:-2]
+ name = name.split("/")
+ pointer = model
+ for m_name in name:
+ if re.fullmatch(r"[A-Za-z]+\d+", m_name):
+ scope_names = re.split(r"(\d+)", m_name)
+ else:
+ scope_names = [m_name]
+ if scope_names[0] == "g":
+ pointer = getattr(pointer, "weight")
+ elif scope_names[0] == "b":
+ pointer = getattr(pointer, "bias")
+ elif scope_names[0] == "w":
+ pointer = getattr(pointer, "weight")
+ else:
+ pointer = getattr(pointer, scope_names[0])
+ if len(scope_names) >= 2:
+ num = int(scope_names[1])
+ pointer = pointer[num]
+
+ # Ensure that the pointer and array have compatible shapes.
+ if pointer.shape != array.shape:
+ raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
+
+ logger.info(f"Initialize PyTorch weight {name}")
+ pointer.data = torch.from_numpy(array)
+ return model
+
+
+ACT_FNS = {"relu": nn.ReLU(), "silu": silu, "gelu": gelu_new, "swish": silu}
+
+
+class Attention(nn.Module):
+ def __init__(self, nx, n_positions, config, scale=False):
+ super().__init__()
+ n_state = nx # in Attention: n_state=768 (nx=n_embd)
+ # [switch nx => n_state from Block to Attention to keep identical to TF implementation]
+ if n_state % config.n_head != 0:
+ raise ValueError(f"Attention n_state shape: {n_state} must be divisible by config.n_head {config.n_head}")
+ self.register_buffer(
+ "bias",
+ torch.tril(torch.ones(n_positions, n_positions)).view(1, 1, n_positions, n_positions),
+ persistent=False,
+ )
+ self.n_head = config.n_head
+ self.split_size = n_state
+ self.scale = scale
+
+ self.c_attn = Conv1D(n_state * 3, nx)
+ self.c_proj = Conv1D(n_state, nx)
+ self.attn_dropout = nn.Dropout(config.attn_pdrop)
+ self.resid_dropout = nn.Dropout(config.resid_pdrop)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.n_head, self.split_size // self.n_head, self.pruned_heads
+ )
+ index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)])
+ # Prune conv1d layers
+ self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
+ self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
+ # Update hyper params
+ self.split_size = (self.split_size // self.n_head) * (self.n_head - len(heads))
+ self.n_head = self.n_head - len(heads)
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def _attn(self, q, k, v, attention_mask=None, head_mask=None, output_attentions=False):
+ w = torch.matmul(q, k)
+ if self.scale:
+ w = w / math.sqrt(v.size(-1))
+ # w = w * self.bias + -1e9 * (1 - self.bias) # TF implementation method: mask_attn_weights
+ # XD: self.b may be larger than w, so we need to crop it
+ b = self.bias[:, :, : w.size(-2), : w.size(-1)]
+ w = w * b + -1e4 * (1 - b)
+
+ if attention_mask is not None:
+ # Apply the attention mask
+ w = w + attention_mask
+
+ w = nn.functional.softmax(w, dim=-1)
+ w = self.attn_dropout(w)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ w = w * head_mask
+
+ outputs = [torch.matmul(w, v)]
+ if output_attentions:
+ outputs.append(w)
+ return outputs
+
+ def merge_heads(self, x):
+ x = x.permute(0, 2, 1, 3).contiguous()
+ new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
+ return x.view(*new_x_shape) # in Tensorflow implementation: fct merge_states
+
+ def split_heads(self, x, k=False):
+ new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
+ x = x.view(*new_x_shape) # in Tensorflow implementation: fct split_states
+ if k:
+ return x.permute(0, 2, 3, 1)
+ else:
+ return x.permute(0, 2, 1, 3)
+
+ def forward(self, x, attention_mask=None, head_mask=None, output_attentions=False):
+ x = self.c_attn(x)
+ query, key, value = x.split(self.split_size, dim=2)
+ query = self.split_heads(query)
+ key = self.split_heads(key, k=True)
+ value = self.split_heads(value)
+
+ attn_outputs = self._attn(query, key, value, attention_mask, head_mask, output_attentions)
+ a = attn_outputs[0]
+
+ a = self.merge_heads(a)
+ a = self.c_proj(a)
+ a = self.resid_dropout(a)
+
+ outputs = [a] + attn_outputs[1:]
+ return outputs # a, (attentions)
+
+
+class MLP(nn.Module):
+ def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)
+ super().__init__()
+ nx = config.n_embd
+ self.c_fc = Conv1D(n_state, nx)
+ self.c_proj = Conv1D(nx, n_state)
+ self.act = ACT_FNS[config.afn]
+ self.dropout = nn.Dropout(config.resid_pdrop)
+
+ def forward(self, x):
+ h = self.act(self.c_fc(x))
+ h2 = self.c_proj(h)
+ return self.dropout(h2)
+
+
+class Block(nn.Module):
+ def __init__(self, n_positions, config, scale=False):
+ super().__init__()
+ nx = config.n_embd
+ self.attn = Attention(nx, n_positions, config, scale)
+ self.ln_1 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)
+ self.mlp = MLP(4 * nx, config)
+ self.ln_2 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)
+
+ def forward(self, x, attention_mask=None, head_mask=None, output_attentions=False):
+ attn_outputs = self.attn(
+ x,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ )
+ a = attn_outputs[0]
+
+ n = self.ln_1(x + a)
+ m = self.mlp(n)
+ h = self.ln_2(n + m)
+
+ outputs = [h] + attn_outputs[1:]
+ return outputs
+
+
+class OpenAIGPTPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = OpenAIGPTConfig
+ load_tf_weights = load_tf_weights_in_openai_gpt
+ base_model_prefix = "transformer"
+
+ def _init_weights(self, module):
+ """Initialize the weights."""
+ if isinstance(module, (nn.Linear, Conv1D)):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+@dataclass
+class OpenAIGPTDoubleHeadsModelOutput(ModelOutput):
+ """
+ Base class for outputs of models predicting if two sentences are consecutive or not.
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Language modeling loss.
+ mc_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mc_labels` is provided):
+ Multiple choice classification loss.
+ logits (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
+ mc_logits (`torch.FloatTensor` of shape `(batch_size, num_choices)`):
+ Prediction scores of the multiple choice classification head (scores for each choice before SoftMax).
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ mc_loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ mc_logits: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+OPENAI_GPT_START_DOCSTRING = r"""
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`OpenAIGPTConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+OPENAI_GPT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare OpenAI GPT transformer model outputting raw hidden-states without any specific head on top.",
+ OPENAI_GPT_START_DOCSTRING,
+)
+class OpenAIGPTModel(OpenAIGPTPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.tokens_embed = nn.Embedding(config.vocab_size, config.n_embd)
+ self.positions_embed = nn.Embedding(config.n_positions, config.n_embd)
+ self.drop = nn.Dropout(config.embd_pdrop)
+ self.h = nn.ModuleList([Block(config.n_positions, config, scale=True) for _ in range(config.n_layer)])
+
+ self.register_buffer("position_ids", torch.arange(config.n_positions), persistent=False)
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.tokens_embed
+
+ def set_input_embeddings(self, new_embeddings):
+ self.tokens_embed = new_embeddings
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
+ """
+ for layer, heads in heads_to_prune.items():
+ self.h[layer].attn.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutput]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
+ input_shape = input_ids.size()
+ input_ids = input_ids.view(-1, input_shape[-1])
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ if position_ids is None:
+ # Code is different from when we had a single embedding matrix from position and token embeddings
+ position_ids = self.position_ids[None, : input_shape[-1]]
+
+ # Attention mask.
+ if attention_mask is not None:
+ # We create a 3D attention mask from a 2D tensor mask.
+ # Sizes are [batch_size, 1, 1, to_seq_length]
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
+ # this attention mask is more simple than the triangular masking of causal attention
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
+ attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
+
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
+ # masked positions, this operation will create a tensor which is 0.0 for
+ # positions we want to attend and the dtype's smallest value for masked positions.
+ # Since we are adding it to the raw scores before the softmax, this is
+ # effectively the same as removing these entirely.
+ attention_mask = attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
+ attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
+
+ # Prepare head mask if needed
+ head_mask = self.get_head_mask(head_mask, self.config.n_layer)
+
+ if inputs_embeds is None:
+ inputs_embeds = self.tokens_embed(input_ids)
+ position_embeds = self.positions_embed(position_ids)
+ if token_type_ids is not None:
+ token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
+ token_type_embeds = self.tokens_embed(token_type_ids)
+ else:
+ token_type_embeds = 0
+ hidden_states = inputs_embeds + position_embeds + token_type_embeds
+ hidden_states = self.drop(hidden_states)
+
+ output_shape = input_shape + (hidden_states.size(-1),)
+
+ all_attentions = () if output_attentions else None
+ all_hidden_states = () if output_hidden_states else None
+ for i, block in enumerate(self.h):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ outputs = block(hidden_states, attention_mask, head_mask[i], output_attentions=output_attentions)
+ hidden_states = outputs[0]
+ if output_attentions:
+ all_attentions = all_attentions + (outputs[1],)
+
+ hidden_states = hidden_states.view(*output_shape)
+ # Add last layer
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
+
+ return BaseModelOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ OpenAI GPT Model transformer with a language modeling head on top (linear layer with weights tied to the input
+ embeddings).
+ """,
+ OPENAI_GPT_START_DOCSTRING,
+)
+class OpenAIGPTLMHeadModel(OpenAIGPTPreTrainedModel, GenerationMixin):
+ _tied_weights_keys = ["lm_head.weight"]
+
+ def __init__(self, config):
+ super().__init__(config)
+ self.transformer = OpenAIGPTModel(config)
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ @add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=CausalLMOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], CausalLMOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ transformer_outputs = self.transformer(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_states = transformer_outputs[0]
+ lm_logits = self.lm_head(hidden_states)
+
+ loss = None
+ if labels is not None:
+ # Shift so that tokens < n predict n
+ shift_logits = lm_logits[..., :-1, :].contiguous()
+ shift_labels = labels[..., 1:].contiguous()
+ # Flatten the tokens
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
+
+ if not return_dict:
+ output = (lm_logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return CausalLMOutput(
+ loss=loss,
+ logits=lm_logits,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+ def prepare_inputs_for_generation(self, input_ids: torch.LongTensor, **kwargs) -> Dict[str, Any]:
+ # Overwritten -- old model with reduced inputs
+ return {"input_ids": input_ids}
+
+
+@add_start_docstrings(
+ """
+OpenAI GPT Model transformer with a language modeling and a multiple-choice classification head on top e.g. for
+RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the
+input embeddings, the classification head takes as input the input of a specified classification token index in the
+input sequence).
+""",
+ OPENAI_GPT_START_DOCSTRING,
+)
+class OpenAIGPTDoubleHeadsModel(OpenAIGPTPreTrainedModel):
+ _tied_weights_keys = ["lm_head.weight"]
+
+ def __init__(self, config):
+ super().__init__(config)
+
+ config.num_labels = 1
+ self.transformer = OpenAIGPTModel(config)
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
+ self.multiple_choice_head = SequenceSummary(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ @add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=OpenAIGPTDoubleHeadsModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ mc_token_ids: Optional[torch.LongTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ mc_labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], OpenAIGPTDoubleHeadsModelOutput]:
+ r"""
+ mc_token_ids (`torch.LongTensor` of shape `(batch_size, num_choices)`, *optional*, default to index of the last token of the input):
+ Index of the classification token in each input sequence. Selected in the range `[0, input_ids.size(-1) -
+ 1]`.
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
+ `labels = input_ids` Indices are selected in `[-1, 0, ..., config.vocab_size]` All labels set to `-100` are
+ ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
+ mc_labels (`torch.LongTensor` of shape `(batch_size)`, *optional*):
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
+ where *num_choices* is the size of the second dimension of the input tensors. (see *input_ids* above)
+
+ Return:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer, OpenAIGPTDoubleHeadsModel
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/openai-gpt")
+ >>> model = OpenAIGPTDoubleHeadsModel.from_pretrained("openai-community/openai-gpt")
+ >>> tokenizer.add_special_tokens(
+ ... {"cls_token": "[CLS]"}
+ ... ) # Add a [CLS] to the vocabulary (we should train it also!)
+ >>> model.resize_token_embeddings(len(tokenizer))
+
+ >>> choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
+ >>> input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices
+ >>> mc_token_ids = torch.tensor([input_ids.size(-1) - 1, input_ids.size(-1) - 1]).unsqueeze(0) # Batch size 1
+
+ >>> outputs = model(input_ids, mc_token_ids=mc_token_ids)
+ >>> lm_logits = outputs.logits
+ >>> mc_logits = outputs.mc_logits
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ transformer_outputs = self.transformer(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_states = transformer_outputs[0]
+
+ lm_logits = self.lm_head(hidden_states)
+ mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1)
+
+ lm_loss, mc_loss = None, None
+ if mc_labels is not None:
+ loss_fct = CrossEntropyLoss()
+ mc_loss = loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1))
+ if labels is not None:
+ shift_logits = lm_logits[..., :-1, :].contiguous()
+ shift_labels = labels[..., 1:].contiguous()
+ loss_fct = CrossEntropyLoss()
+ lm_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
+
+ if not return_dict:
+ output = (lm_logits, mc_logits) + transformer_outputs[1:]
+ if mc_loss is not None:
+ output = (mc_loss,) + output
+ return ((lm_loss,) + output) if lm_loss is not None else output
+
+ return OpenAIGPTDoubleHeadsModelOutput(
+ loss=lm_loss,
+ mc_loss=mc_loss,
+ logits=lm_logits,
+ mc_logits=mc_logits,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ The Original OpenAI GPT Model transformer with a sequence classification head on top (linear layer).
+ [`OpenAIGPTForSequenceClassification`] uses the last token in order to do the classification, as other causal
+ models (e.g. GPT-2) do. Since it does classification on the last token, it requires to know the position of the
+ last token. If a `pad_token_id` is defined in the configuration, it finds the last token that is not a padding
+ token in each row. If no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since
+ it cannot guess the padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take
+ the last value in each row of the batch).
+ """,
+ OPENAI_GPT_START_DOCSTRING,
+)
+class OpenAIGPTForSequenceClassification(OpenAIGPTPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.transformer = OpenAIGPTModel(config)
+ self.score = nn.Linear(config.n_embd, self.num_labels, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=SequenceClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ transformer_outputs = self.transformer(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = transformer_outputs[0]
+ logits = self.score(hidden_states)
+
+ if input_ids is not None:
+ batch_size, sequence_length = input_ids.shape[:2]
+ else:
+ batch_size, sequence_length = inputs_embeds.shape[:2]
+
+ # Ensure the batch size is > 1 if there is no padding.
+ if self.config.pad_token_id is None and batch_size != 1:
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
+
+ if self.config.pad_token_id is None:
+ sequence_lengths = -1
+ else:
+ if input_ids is not None:
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
+ sequence_lengths = sequence_lengths.to(logits.device)
+ else:
+ sequence_lengths = -1
+ logger.warning_once(
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
+ "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
+ )
+
+ pooled_logits = logits[range(batch_size), sequence_lengths]
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(pooled_logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(pooled_logits, labels)
+ if not return_dict:
+ output = (pooled_logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutput(
+ loss=loss,
+ logits=pooled_logits,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+
+__all__ = [
+ "OpenAIGPTDoubleHeadsModel",
+ "OpenAIGPTForSequenceClassification",
+ "OpenAIGPTLMHeadModel",
+ "OpenAIGPTModel",
+ "OpenAIGPTPreTrainedModel",
+ "load_tf_weights_in_openai_gpt",
+]
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/prophetnet/__pycache__/__init__.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/prophetnet/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..400d21c6c7591e104b2a1d5c4c27d5de183dab1f
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/prophetnet/__pycache__/__init__.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/prophetnet/__pycache__/configuration_prophetnet.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/prophetnet/__pycache__/configuration_prophetnet.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..96dacf4fabd910150adbded4a6b727c811b275ec
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/prophetnet/__pycache__/configuration_prophetnet.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/prophetnet/__pycache__/convert_prophetnet_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/prophetnet/__pycache__/convert_prophetnet_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..84d79e4254681aa755ab70c63097fe6306f3c605
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/prophetnet/__pycache__/convert_prophetnet_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/prophetnet/__pycache__/modeling_prophetnet.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/prophetnet/__pycache__/modeling_prophetnet.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a17e519597f9104199c3e142fb23c650838e8df7
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/prophetnet/__pycache__/modeling_prophetnet.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/prophetnet/__pycache__/tokenization_prophetnet.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/models/prophetnet/__pycache__/tokenization_prophetnet.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e074caacca7b8b43a9edafb52ded60e7c4ee8d52
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/models/prophetnet/__pycache__/tokenization_prophetnet.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/prophetnet/configuration_prophetnet.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/prophetnet/configuration_prophetnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..1219e1faacd42b57402ffab352924c03c3b19623
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/prophetnet/configuration_prophetnet.py
@@ -0,0 +1,180 @@
+# coding=utf-8
+# Copyright 2020 The Microsoft Authors and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""ProphetNet model configuration"""
+
+from typing import Callable, Optional, Union
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+class ProphetNetConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`ProphetNetModel`]. It is used to instantiate a
+ ProphetNet model according to the specified arguments, defining the model architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the ProphetNet
+ [microsoft/prophetnet-large-uncased](https://huggingface.co/microsoft/prophetnet-large-uncased) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ activation_dropout (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for activations inside the fully connected layer.
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ vocab_size (`int`, *optional*, defaults to 30522):
+ Vocabulary size of the ProphetNET model. Defines the number of different tokens that can be represented by
+ the `inputs_ids` passed when calling [`ProphetNetModel`].
+ hidden_size (`int`, *optional*, defaults to 1024):
+ Dimensionality of the layers and the pooler layer.
+ encoder_ffn_dim (`int`, *optional*, defaults to 4096):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
+ num_encoder_layers (`int`, *optional*, defaults to 12):
+ Number of encoder layers.
+ num_encoder_attention_heads (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ decoder_ffn_dim (`int`, *optional*, defaults to 4096):
+ Dimensionality of the `intermediate` (often named feed-forward) layer in decoder.
+ num_decoder_layers (`int`, *optional*, defaults to 12):
+ Number of decoder layers.
+ num_decoder_attention_heads (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer decoder.
+ attention_dropout (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention probabilities.
+ dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ max_position_embeddings (`int`, *optional*, defaults to 512):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ init_std (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ add_cross_attention (`bool`, *optional*, defaults to `True`):
+ Whether cross-attention layers should be added to the model.
+ is_encoder_decoder (`bool`, *optional*, defaults to `True`):
+ Whether this is an encoder/decoder model.
+ pad_token_id (`int`, *optional*, defaults to 1)
+ Padding token id.
+ bos_token_id (`int`, *optional*, defaults to 0)
+ Beginning of stream token id.
+ eos_token_id (`int`, *optional*, defaults to 2)
+ End of stream token id.
+ ngram (`int`, *optional*, defaults to 2)
+ Number of future tokens to predict. Set to 1 to be same as traditional Language model to predict next first
+ token.
+ num_buckets (`int`, *optional*, defaults to 32)
+ The number of buckets to use for each attention layer. This is for relative position calculation. See the
+ [T5 paper](see https://arxiv.org/abs/1910.10683) for more details.
+ relative_max_distance (`int`, *optional*, defaults to 128)
+ Relative distances greater than this number will be put into the last same bucket. This is for relative
+ position calculation. See the [T5 paper](see https://arxiv.org/abs/1910.10683) for more details.
+ disable_ngram_loss (`bool`, *optional*, defaults to `False`):
+ Whether be trained predicting only the next first token.
+ eps (`float`, *optional*, defaults to 0.0):
+ Controls the `epsilon` parameter value for label smoothing in the loss calculation. If set to 0, no label
+ smoothing is performed.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models).
+ """
+
+ model_type = "prophetnet"
+ keys_to_ignore_at_inference = ["past_key_values"]
+ attribute_map = {
+ "num_attention_heads": "num_encoder_attention_heads",
+ }
+
+ def __init__(
+ self,
+ activation_dropout: Optional[float] = 0.1,
+ activation_function: Optional[Union[str, Callable]] = "gelu",
+ vocab_size: Optional[int] = 30522,
+ hidden_size: Optional[int] = 1024,
+ encoder_ffn_dim: Optional[int] = 4096,
+ num_encoder_layers: Optional[int] = 12,
+ num_encoder_attention_heads: Optional[int] = 16,
+ decoder_ffn_dim: Optional[int] = 4096,
+ num_decoder_layers: Optional[int] = 12,
+ num_decoder_attention_heads: Optional[int] = 16,
+ attention_dropout: Optional[float] = 0.1,
+ dropout: Optional[float] = 0.1,
+ max_position_embeddings: Optional[int] = 512,
+ init_std: Optional[float] = 0.02,
+ is_encoder_decoder: Optional[bool] = True,
+ add_cross_attention: Optional[bool] = True,
+ decoder_start_token_id: Optional[int] = 0,
+ ngram: Optional[int] = 2,
+ num_buckets: Optional[int] = 32,
+ relative_max_distance: Optional[int] = 128,
+ disable_ngram_loss: Optional[bool] = False,
+ eps: Optional[float] = 0.0,
+ use_cache: Optional[bool] = True,
+ pad_token_id: Optional[int] = 0,
+ bos_token_id: Optional[int] = 1,
+ eos_token_id: Optional[int] = 2,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.hidden_size = hidden_size
+ self.encoder_ffn_dim = encoder_ffn_dim
+ self.num_encoder_layers = num_encoder_layers
+ self.num_encoder_attention_heads = num_encoder_attention_heads
+ self.decoder_ffn_dim = decoder_ffn_dim
+ self.num_decoder_layers = num_decoder_layers
+ self.num_decoder_attention_heads = num_decoder_attention_heads
+ self.max_position_embeddings = max_position_embeddings
+ self.init_std = init_std # Normal(0, this parameter)
+ self.activation_function = activation_function
+
+ # parameters for prophetnet
+ self.ngram = ngram
+ self.num_buckets = num_buckets
+ self.relative_max_distance = relative_max_distance
+ self.disable_ngram_loss = disable_ngram_loss
+ self.eps = eps
+
+ # 3 Types of Dropout
+ self.attention_dropout = attention_dropout
+ self.activation_dropout = activation_dropout
+ self.dropout = dropout
+
+ self.use_cache = use_cache
+
+ super().__init__(
+ pad_token_id=pad_token_id,
+ bos_token_id=bos_token_id,
+ eos_token_id=eos_token_id,
+ is_encoder_decoder=is_encoder_decoder,
+ add_cross_attention=add_cross_attention,
+ decoder_start_token_id=decoder_start_token_id,
+ **kwargs,
+ )
+
+ @property
+ def num_hidden_layers(self) -> int:
+ return self.num_encoder_layers + self.num_decoder_layers
+
+ @num_hidden_layers.setter
+ def num_hidden_layers(self, value):
+ raise NotImplementedError(
+ "This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"
+ " `num_decoder_layers`."
+ )
+
+
+__all__ = ["ProphetNetConfig"]
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/prophetnet/convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/prophetnet/convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..30390561169e1c71bcb86275ab16caec0d729e4f
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/prophetnet/convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py
@@ -0,0 +1,159 @@
+# coding=utf-8
+# Copyright 2020 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert ProphetNet checkpoint."""
+
+import argparse
+
+from torch import nn
+
+# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
+# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
+from transformers_old.modeling_prophetnet import (
+ ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
+)
+from transformers_old.modeling_xlm_prophetnet import (
+ XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
+)
+
+from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
+
+
+logger = logging.get_logger(__name__)
+logging.set_verbosity_info()
+
+
+def convert_prophetnet_checkpoint_to_pytorch(prophetnet_checkpoint_path: str, pytorch_dump_folder_path: str):
+ """
+ Copy/paste/tweak prohpetnet's weights to our prophetnet structure.
+ """
+ if "xprophetnet" in prophetnet_checkpoint_path:
+ prophet_old = XLMProphetNetForConditionalGenerationOld.from_pretrained(prophetnet_checkpoint_path)
+ prophet, loading_info = XLMProphetNetForConditionalGeneration.from_pretrained(
+ prophetnet_checkpoint_path, output_loading_info=True
+ )
+ else:
+ prophet_old = ProphetNetForConditionalGenerationOld.from_pretrained(prophetnet_checkpoint_path)
+ prophet, loading_info = ProphetNetForConditionalGeneration.from_pretrained(
+ prophetnet_checkpoint_path, output_loading_info=True
+ )
+
+ special_keys = ["key_proj", "value_proj", "query_proj"]
+
+ mapping = {
+ "self_attn": "ngram_self_attn",
+ "cross_attn": "encoder_attn",
+ "cross_attn_layer_norm": "encoder_attn_layer_norm",
+ "feed_forward_layer_norm": "final_layer_norm",
+ "feed_forward": "",
+ "intermediate": "fc1",
+ "output": "fc2",
+ "key_proj": "k_proj",
+ "query_proj": "q_proj",
+ "value_proj": "v_proj",
+ "word_embeddings": "embed_tokens",
+ "embeddings_layer_norm": "emb_layer_norm",
+ "relative_pos_embeddings": "relative_linear",
+ "ngram_embeddings": "ngram_input_embed",
+ "position_embeddings": "embed_positions",
+ }
+
+ for key in loading_info["missing_keys"]:
+ attributes = key.split(".")
+
+ if attributes[0] == "lm_head":
+ model = prophet
+ old_model = prophet_old
+ else:
+ model = prophet.prophetnet
+ old_model = prophet_old.model
+
+ is_key_init = False
+ for attribute in attributes:
+ if attribute in mapping:
+ old_attribute = mapping[attribute]
+ if not hasattr(old_model, old_attribute) and len(old_attribute) > 0:
+ old_attribute = attribute
+ elif hasattr(old_model, attribute):
+ old_attribute = attribute
+
+ if attribute == "weight":
+ assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
+ model.weight = old_model.weight
+ logger.info(f"{attribute} is initialized.")
+ is_key_init = True
+ break
+ elif attribute == "bias":
+ assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
+ model.bias = old_model.bias
+ logger.info(f"{attribute} is initialized")
+ is_key_init = True
+ break
+ elif attribute in special_keys and hasattr(old_model, "in_proj_weight"):
+ embed_dim = old_model.in_proj_weight.shape[0] // 3
+ param = getattr(model, attribute)
+ param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
+ param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
+ if attribute == "query_proj":
+ model.query_proj.weight = nn.Parameter(old_model.in_proj_weight[:embed_dim, :])
+ model.query_proj.bias = nn.Parameter(old_model.in_proj_bias[:embed_dim])
+
+ elif attribute == "key_proj":
+ model.key_proj.weight = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :])
+ model.key_proj.bias = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim])
+ elif attribute == "value_proj":
+ model.value_proj.weight = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :])
+ model.value_proj.bias = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :])
+ is_key_init = True
+ break
+ elif attribute == "position_embeddings":
+ assert (
+ model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
+ ), "Hidden size has to match"
+ assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
+ model.position_embeddings.weight = nn.Parameter(old_model.embed_positions.weight[:512, :])
+ is_key_init = True
+ break
+
+ if attribute.isdigit():
+ model = model[int(attribute)]
+ old_model = old_model[int(old_attribute)]
+ else:
+ model = getattr(model, attribute)
+
+ if old_attribute == "":
+ old_model = old_model
+ else:
+ if not hasattr(old_model, old_attribute):
+ raise ValueError(f"{old_model} does not have {old_attribute}")
+ old_model = getattr(old_model, old_attribute)
+
+ if not is_key_init:
+ raise ValueError(f"{key} was not correctly initialized!")
+
+ print(f"Saving model to {pytorch_dump_folder_path}")
+ prophet.save_pretrained(pytorch_dump_folder_path)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--prophetnet_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
+ )
+ args = parser.parse_args()
+ convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/prophetnet/modeling_prophetnet.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/prophetnet/modeling_prophetnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..fc148edbc49c2913bb5b3e0b128943587fa225bf
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/prophetnet/modeling_prophetnet.py
@@ -0,0 +1,2321 @@
+# coding=utf-8
+# Copyright 2020 The Microsoft Authors and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch ProphetNet model, ported from ProphetNet repo(fairsequery_states version)."""
+
+import copy
+import math
+import warnings
+from dataclasses import dataclass
+from typing import Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import Tensor, nn
+from torch.nn import LayerNorm
+
+from ...activations import ACT2FN
+from ...generation import GenerationMixin
+from ...modeling_outputs import BaseModelOutput
+from ...modeling_utils import PreTrainedModel
+from ...utils import (
+ ModelOutput,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_prophetnet import ProphetNetConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "ProphenetConfig"
+_CHECKPOINT_FOR_DOC = "microsoft/prophetnet-large-uncased"
+
+
+PROPHETNET_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ Original ProphetNet code can be found [here](https://github.com/microsoft/ProphetNet). Checkpoints were converted
+ from original Fairseq checkpoints. For more information on the checkpoint conversion, please take a look at the
+ file `convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py`.
+
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matters related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`ProphetNetConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+PROPHETNET_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Indices of decoder input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
+
+ ProphetNet uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If
+ `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
+ `past_key_values`).
+
+ decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
+ be used by default.
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
+ Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+PROPHETNET_STANDALONE_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+def softmax(hidden_state, dim, onnx_trace=False):
+ if onnx_trace:
+ return nn.functional.softmax(hidden_state.float(), dim=dim)
+ else:
+ return nn.functional.softmax(hidden_state, dim=dim, dtype=torch.float32)
+
+
+def ngram_attention_bias(sequence_length, ngram, device, dtype):
+ """
+ This function computes the bias for the predict stream
+ """
+ left_block = (
+ torch.ones((ngram, sequence_length, sequence_length), device=device, dtype=dtype) * torch.finfo(dtype).min
+ )
+ right_block = left_block.detach().clone()
+ # create bias
+ for stream_idx in range(ngram):
+ right_block[stream_idx].fill_diagonal_(0, wrap=False)
+ left_block[stream_idx].triu_(-stream_idx + 1)
+
+ left_block[:, :, 0] = 0
+ return torch.cat([left_block, right_block], dim=2)
+
+
+def compute_relative_buckets(num_buckets, max_distance, relative_positions, is_bidirectional=False):
+ """
+ This function computes individual parts of the relative position buckets. For more detail, see paper.
+ """
+ inv_relative_positions = -relative_positions
+ rel_positions_bucket = 0
+
+ if is_bidirectional:
+ num_buckets = num_buckets // 2
+ rel_positions_bucket = (
+ rel_positions_bucket
+ + torch.lt(inv_relative_positions, torch.zeros_like(inv_relative_positions)).int() * num_buckets
+ )
+ inv_relative_positions = torch.abs(inv_relative_positions)
+ else:
+ inv_relative_positions = torch.max(inv_relative_positions, torch.zeros_like(inv_relative_positions))
+
+ max_exact = num_buckets // 2
+ is_small = torch.lt(inv_relative_positions, max_exact)
+ val_if_large = max_exact + torch.log(inv_relative_positions.float() / max_exact) / math.log(
+ max_distance / max_exact
+ ) * (num_buckets - max_exact)
+ val_if_large = torch.min(val_if_large, torch.ones_like(val_if_large) * (num_buckets - 1)).int()
+ rel_positions_bucket = rel_positions_bucket + torch.where(is_small, inv_relative_positions.int(), val_if_large)
+ return rel_positions_bucket
+
+
+def compute_all_stream_relative_buckets(num_buckets, max_distance, position_ids):
+ """
+ This function computes both main and predict relative position buckets. For more detail, see paper.
+ """
+ # main stream
+ main_stream_relative_positions = position_ids.unsqueeze(1).repeat(1, position_ids.size(-1), 1)
+ main_stream_relative_positions = main_stream_relative_positions - position_ids.unsqueeze(-1)
+
+ # predicting stream
+ predicting_stream_relative_positions = torch.cat((position_ids - 1, position_ids), dim=-1).unsqueeze(1)
+ predicting_stream_relative_positions = predicting_stream_relative_positions.repeat(1, position_ids.size(-1), 1)
+ predicting_stream_relative_positions = predicting_stream_relative_positions - position_ids.unsqueeze(-1)
+
+ # get both position buckets
+ main_relative_position_buckets = compute_relative_buckets(
+ num_buckets, max_distance, main_stream_relative_positions, is_bidirectional=False
+ )
+ predict_relative_position_buckets = compute_relative_buckets(
+ num_buckets, max_distance, predicting_stream_relative_positions, is_bidirectional=False
+ )
+ return main_relative_position_buckets, predict_relative_position_buckets
+
+
+@dataclass
+class ProphetNetSeq2SeqLMOutput(ModelOutput):
+ """
+ Base class for sequence-to-sequence language models outputs.
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Language modeling loss.
+ logits (`torch.FloatTensor` of shape `(batch_size, decoder_sequence_length, config.vocab_size)`):
+ Prediction scores of the main stream language modeling head (scores for each vocabulary token before
+ SoftMax).
+ logits_ngram (`torch.FloatTensor` of shape `(batch_size, ngram * decoder_sequence_length, config.vocab_size)`):
+ Prediction scores of the predict stream language modeling head (scores for each vocabulary token before
+ SoftMax).
+ past_key_values (`List[torch.FloatTensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ List of `torch.FloatTensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size,
+ num_attn_heads, decoder_sequence_length, embed_size_per_head)`).
+
+ Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
+ used (see `past_key_values` input) to speed up sequential decoding.
+ decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, decoder_sequence_length, hidden_size)`.
+
+ Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs.
+ decoder_ngram_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, ngram * decoder_sequence_length, hidden_size)`.
+
+ Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding
+ outputs.
+ decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ decoder_sequence_length, decoder_sequence_length)`.
+
+ Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
+ self-attention heads.
+ decoder_ngram_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ decoder_sequence_length, decoder_sequence_length)`.
+
+ Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the
+ weighted average in the self-attention heads.
+ cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ encoder_sequence_length, decoder_sequence_length)`.
+
+ Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to
+ compute the weighted average in the
+ encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder of the model.
+ encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, encoder_sequence_length, hidden_size)`.
+
+ Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
+ encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ encoder_sequence_length, encoder_sequence_length)`. Attentions weights of the encoder, after the attention
+ softmax, used to compute the weighted average in the self-attention heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ logits_ngram: Optional[torch.FloatTensor] = None
+ past_key_values: Optional[Tuple[torch.FloatTensor]] = None
+ decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ decoder_ngram_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ decoder_ngram_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ encoder_last_hidden_state: Optional[torch.FloatTensor] = None
+ encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+ @property
+ def decoder_cross_attentions(self):
+ warnings.warn(
+ "`decoder_cross_attentions` is deprecated and will be removed soon. Please use `cross_attentions`"
+ " instead.",
+ FutureWarning,
+ )
+ return self.cross_attentions
+
+
+@dataclass
+class ProphetNetSeq2SeqModelOutput(ModelOutput):
+ """
+ Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential
+ decoding.
+
+ Args:
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, decoder_sequence_length, hidden_size)`):
+ Sequence of main stream hidden-states at the output of the last layer of the decoder of the model.
+
+ If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
+ hidden_size)` is output.
+ last_hidden_state_ngram (`torch.FloatTensor` of shape `(batch_size,ngram * decoder_sequence_length, config.vocab_size)`, *optional*):
+ Sequence of predict stream hidden-states at the output of the last layer of the decoder of the model.
+ past_key_values (`List[torch.FloatTensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ List of `torch.FloatTensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size,
+ num_attn_heads, decoder_sequence_length, embed_size_per_head)`).
+
+ Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
+ used (see `past_key_values` input) to speed up sequential decoding.
+ decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, decoder_sequence_length, hidden_size)`.
+
+ Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs.
+ decoder_ngram_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, ngram * decoder_sequence_length, hidden_size)`.
+
+ Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding
+ outputs.
+ decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ decoder_sequence_length, decoder_sequence_length)`.
+
+ Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
+ self-attention heads.
+ decoder_ngram_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ decoder_sequence_length, decoder_sequence_length)`.
+
+ Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the
+ weighted average in the
+ cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ encoder_sequence_length, decoder_sequence_length)`.
+
+ Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to
+ compute the weighted average in the
+ encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder of the model.
+ encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, encoder_sequence_length, hidden_size)`.
+
+ Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
+ encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ encoder_sequence_length, encoder_sequence_length)`.
+
+ Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
+ self-attention heads.
+ """
+
+ last_hidden_state: torch.FloatTensor
+ last_hidden_state_ngram: Optional[torch.FloatTensor] = None
+ past_key_values: Optional[Tuple[torch.FloatTensor]] = None
+ decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ decoder_ngram_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ decoder_ngram_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ encoder_last_hidden_state: Optional[torch.FloatTensor] = None
+ encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+ @property
+ def decoder_cross_attentions(self):
+ warnings.warn(
+ "`decoder_cross_attentions` is deprecated and will be removed soon. Please use `cross_attentions`"
+ " instead.",
+ FutureWarning,
+ )
+ return self.cross_attentions
+
+
+@dataclass
+class ProphetNetDecoderModelOutput(ModelOutput):
+ """
+ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
+
+ Args:
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, decoder_sequence_length, hidden_size)`):
+ Sequence of main stream hidden-states at the output of the last layer of the decoder of the model.
+
+ If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
+ hidden_size)` is output.
+ last_hidden_state_ngram (`torch.FloatTensor` of shape `(batch_size, ngram * decoder_sequence_length, config.vocab_size)`):
+ Sequence of predict stream hidden-states at the output of the last layer of the decoder of the model.
+ past_key_values (`List[torch.FloatTensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ List of `torch.FloatTensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size,
+ num_attn_heads, decoder_sequence_length, embed_size_per_head)`).
+
+ Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
+ used (see `past_key_values` input) to speed up sequential decoding.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, decoder_sequence_length, hidden_size)`.
+
+ Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs.
+ ngram_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, ngram * decoder_sequence_length, hidden_size)`.
+
+ Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding
+ outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ decoder_sequence_length, decoder_sequence_length)`.
+
+ Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
+ self-attention heads.
+ ngram_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ decoder_sequence_length, decoder_sequence_length)`.
+
+ Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the
+ weighted average in the
+ cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ encoder_sequence_length, decoder_sequence_length)`.
+
+ Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to
+ compute the weighted average in the
+ """
+
+ last_hidden_state: torch.FloatTensor
+ last_hidden_state_ngram: Optional[torch.FloatTensor] = None
+ past_key_values: Optional[Tuple[torch.FloatTensor]] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ hidden_states_ngram: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+ ngram_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+@dataclass
+class ProphetNetDecoderLMOutput(ModelOutput):
+ """
+ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Language modeling loss.
+ logits (`torch.FloatTensor` of shape `(batch_size, decoder_sequence_length, config.vocab_size)`):
+ Prediction scores of the main stream language modeling head (scores for each vocabulary token before
+ SoftMax).
+ logits_ngram (`torch.FloatTensor` of shape `(batch_size, ngram * decoder_sequence_length, config.vocab_size)`):
+ Prediction scores of the predict stream language modeling head (scores for each vocabulary token before
+ SoftMax).
+ past_key_values (`List[torch.FloatTensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ List of `torch.FloatTensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size,
+ num_attn_heads, decoder_sequence_length, embed_size_per_head)`).
+
+ Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
+ used (see `past_key_values` input) to speed up sequential decoding.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, decoder_sequence_length, hidden_size)`.
+
+ Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs.
+ ngram_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, ngram * decoder_sequence_length, hidden_size)`.
+
+ Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding
+ outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ decoder_sequence_length, decoder_sequence_length)`.
+
+ Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
+ self-attention heads.
+ ngram_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ decoder_sequence_length, decoder_sequence_length)`.
+
+ Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the
+ weighted average in the
+ cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ encoder_sequence_length, decoder_sequence_length)`.
+
+ Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to
+ compute the weighted average in the
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ logits_ngram: Optional[torch.FloatTensor] = None
+ past_key_values: Optional[Tuple[torch.FloatTensor]] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ hidden_states_ngram: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+ ngram_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+class ProphetNetPreTrainedModel(PreTrainedModel):
+ config_class = ProphetNetConfig
+ base_model_prefix = "prophetnet"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ if isinstance(module, nn.Linear):
+ module.weight.data.normal_(mean=0.0, std=self.config.init_std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.init_std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+
+ def _shift_right(self, input_ids):
+ decoder_start_token_id = self.config.decoder_start_token_id
+ pad_token_id = self.config.pad_token_id
+
+ assert decoder_start_token_id is not None, (
+ "self.model.config.decoder_start_token_id has to be defined. In ProphetNet it is usually set to the"
+ " pad_token_id. See ProphetNet docs for more information"
+ )
+
+ # shift inputs to the right
+ shifted_input_ids = input_ids.new_zeros(input_ids.shape)
+ shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
+ shifted_input_ids[..., 0] = decoder_start_token_id
+
+ assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined."
+ # replace possible -100 values in labels by `pad_token_id`
+ shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
+
+ assert torch.all(shifted_input_ids >= 0).item(), "Verify that `shifted_input_ids` has only positive values"
+
+ return shifted_input_ids
+
+
+class ProphetNetPositionalEmbeddings(nn.Embedding):
+ """
+ This module learns positional embeddings up to a fixed maximum size. Padding ids are ignored by either offsetting
+ based on padding_idx or by setting padding_idx to None and ensuring that the appropriate position ids are passed to
+ the forward function.
+ """
+
+ def __init__(self, config: ProphetNetConfig) -> None:
+ self.max_length = config.max_position_embeddings
+ super().__init__(config.max_position_embeddings, config.hidden_size, config.pad_token_id)
+
+ def forward(self, inputs_shape, device, attention_mask=None, past_key_values=None, position_ids=None):
+ assert (position_ids is None) or (
+ self.padding_idx is None
+ ), "If position_ids is pre-computed then padding_idx should not be set."
+
+ if position_ids is None:
+ if past_key_values is not None:
+ # position_ids is the same for every token when decoding a single step
+ # Without the int() cast, it doesn't work in some cases when exporting to ONNX
+ prev_num_input_ids = past_key_values[0][0].shape[2]
+ num_input_ids = inputs_shape[1] + prev_num_input_ids
+ position_ids = torch.ones((1, 1), dtype=torch.long, device=device) * (
+ int(self.padding_idx + num_input_ids)
+ )
+ else:
+ if attention_mask is None:
+ attention_mask = torch.ones(inputs_shape, dtype=torch.long, device=device)
+
+ # retrieve position_ids from input_ids / attention_mask
+ position_ids = (
+ torch.cumsum(attention_mask, dim=1).type_as(attention_mask) * attention_mask
+ ).long() + self.padding_idx
+
+ # make sure position_ids are not bigger then max_length
+ position_ids = position_ids.clamp(0, self.max_length - 1)
+
+ return super().forward(position_ids), position_ids
+
+ def _forward(self, position_ids):
+ return super().forward(position_ids)
+
+
+class ProphetNetAttention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(
+ self,
+ config: ProphetNetConfig,
+ num_attn_heads: int,
+ ):
+ super().__init__()
+ hidden_size = config.hidden_size
+
+ self.attention_dropout = config.attention_dropout
+ self.dropout = config.dropout
+ self.num_attn_heads = num_attn_heads
+ self.head_dim = hidden_size // num_attn_heads
+
+ assert self.head_dim * num_attn_heads == hidden_size, (
+ "`config.hidden_size` must be divisible by `config.num_encoder_attention_heads` and"
+ " `config.num_decoder_attention_heads`"
+ )
+
+ self.key_proj = nn.Linear(hidden_size, hidden_size)
+ self.value_proj = nn.Linear(hidden_size, hidden_size)
+ self.query_proj = nn.Linear(hidden_size, hidden_size)
+
+ self.out_proj = nn.Linear(hidden_size, hidden_size)
+
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+ return tensor.view(bsz, seq_len, self.num_attn_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def forward(
+ self,
+ hidden_states,
+ key_value_states: Optional[Tensor] = None,
+ attention_mask: Optional[Tensor] = None,
+ layer_head_mask: Optional[Tensor] = None,
+ past_key_value: Optional[Tuple[Tensor]] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[Tensor, Optional[Tensor]]:
+ batch_size, tgt_len, hidden_size = hidden_states.size()
+
+ # if key_value_states are provided this layer is used as a cross-attention layer
+ # for the decoder
+ is_cross_attention = key_value_states is not None
+ assert list(hidden_states.size()) == [
+ batch_size,
+ tgt_len,
+ hidden_size,
+ ], f"Size of hidden states should be {batch_size, tgt_len, hidden_size}, but is {hidden_states.size()}"
+
+ # previous time steps are cached - no need to recompute key and value if they are static
+ query_states = self.query_proj(hidden_states) / (self.head_dim**0.5)
+
+ if is_cross_attention and past_key_value is not None:
+ # reuse k,v, cross_attentions
+ key_states = past_key_value[0]
+ value_states = past_key_value[1]
+ elif is_cross_attention:
+ # cross_attentions
+ key_states = self._shape(self.key_proj(key_value_states), -1, batch_size)
+ value_states = self._shape(self.value_proj(key_value_states), -1, batch_size)
+ else:
+ # self_attention
+ key_states = self._shape(self.key_proj(hidden_states), -1, batch_size)
+ value_states = self._shape(self.value_proj(hidden_states), -1, batch_size)
+
+ if is_cross_attention:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_states, value_states)
+
+ # project states into the correct shape
+ proj_shape = (batch_size, self.num_attn_heads, -1, self.head_dim)
+ query_states = self._shape(query_states, tgt_len, batch_size).view(*proj_shape)
+ key_states = key_states.view(*proj_shape)
+ value_states = value_states.view(*proj_shape)
+ src_len = key_states.size(2)
+ attn_weights = torch.einsum("bsij,bsjk->bsik", query_states, key_states.transpose(2, 3))
+ expected_shape = (batch_size, self.num_attn_heads, tgt_len, src_len)
+ if attn_weights.size() != expected_shape:
+ raise ValueError(f"Attention weights should have size {expected_shape}, but is {attn_weights.size()}")
+
+ # This is part of a workaround to get around fork/join parallelism not supporting Optional types.
+ if attention_mask is not None and attention_mask.dim() == 0:
+ attention_mask = None
+
+ expected_shape = (batch_size, self.num_attn_heads, 1, src_len)
+ if attention_mask is not None and attention_mask.size() != expected_shape:
+ raise ValueError(f"Attention mask should have size {expected_shape}, but is {attention_mask.size()}")
+ if attention_mask is not None: # don't attend to padding symbols
+ attn_weights = attn_weights + attention_mask
+ if output_attentions:
+ attn_weights_reshaped = attn_weights
+ else:
+ attn_weights_reshaped = None
+
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
+
+ if layer_head_mask is not None:
+ assert layer_head_mask.size() == (self.num_attn_heads,), (
+ f"Head mask for a single layer should be of size {(self.num_attn_heads,)}, but is"
+ f" {layer_head_mask.size()}"
+ )
+ attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(
+ batch_size, self.num_attn_heads, tgt_len, src_len
+ )
+
+ # apply head_mask also on attn_weights_reshaped which is used for n-gram attention inside the model
+ attn_weights_reshaped = layer_head_mask.view(1, -1, 1, 1) * attn_weights_reshaped
+
+ attn_probs = nn.functional.dropout(
+ attn_weights,
+ p=self.attention_dropout,
+ training=self.training,
+ )
+ attn_output = torch.einsum("bsij,bsjk->bsik", attn_probs, value_states)
+ expected_shape = (batch_size, self.num_attn_heads, tgt_len, self.head_dim)
+ if attn_output.size() != expected_shape:
+ raise ValueError(f"`attn_output` should have shape {expected_shape}, but is of shape {attn_output.size()}")
+
+ attn_output = attn_output.transpose(1, 2).reshape(batch_size, tgt_len, hidden_size)
+ attn_output = self.out_proj(attn_output)
+
+ attn_output = nn.functional.dropout(attn_output, p=self.dropout, training=self.training)
+ return attn_output, attn_weights_reshaped, past_key_value
+
+
+class ProphetNetFeedForward(nn.Module):
+ """
+ This is the residual two feed-forward layer block based on the original Transformer implementation.
+ """
+
+ def __init__(self, config: ProphetNetConfig, ffn_dim: int):
+ super().__init__()
+ self.activation_fn = ACT2FN[config.activation_function]
+ self.intermediate = nn.Linear(config.hidden_size, ffn_dim)
+ self.output = nn.Linear(ffn_dim, config.hidden_size)
+ self.activation_dropout = config.activation_dropout
+ self.dropout = config.dropout
+
+ def forward(self, hidden_states):
+ hidden_states = self.intermediate(hidden_states)
+ hidden_states = self.activation_fn(hidden_states)
+
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
+ hidden_states = self.output(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ return hidden_states
+
+
+class ProphetNetNgramSelfAttention(nn.Module):
+ def __init__(self, config: ProphetNetConfig):
+ super().__init__()
+ self.hidden_size = config.hidden_size
+
+ self.num_buckets = config.num_buckets
+ self.relative_max_distance = config.relative_max_distance
+ self.num_attn_heads = config.num_decoder_attention_heads
+ self.dropout = config.dropout
+ self.attention_dropout = config.attention_dropout
+ self.head_dim = config.hidden_size // self.num_attn_heads
+ self.ngram = config.ngram
+
+ assert (
+ self.head_dim * self.num_attn_heads == config.hidden_size
+ ), "config.hidden_size must be divisible by num_attn_heads"
+ # key, value, query projection
+ self.key_proj = nn.Linear(config.hidden_size, config.hidden_size)
+ self.value_proj = nn.Linear(config.hidden_size, config.hidden_size)
+ self.query_proj = nn.Linear(config.hidden_size, config.hidden_size)
+
+ # out projection
+ self.out_proj = nn.Linear(config.hidden_size, config.hidden_size)
+
+ # rel position embeddings
+ self.relative_pos_embeddings = nn.Linear(config.hidden_size, self.num_buckets * self.num_attn_heads)
+
+ # for onnx runtime
+ self.onnx_trace = False
+
+ def _shape(self, tensor, seq_len, batch_size):
+ return tensor.view(batch_size, seq_len, self.num_attn_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def prepare_for_onnx_export_(self):
+ self.onnx_trace = True
+
+ def forward(
+ self,
+ hidden_states,
+ past_key_value: Optional[Tuple[Tensor]] = None,
+ attention_mask=None,
+ layer_head_mask=None,
+ extended_predict_attention_mask=None,
+ main_relative_position_buckets=None,
+ predict_relative_position_buckets=None,
+ position_ids=None,
+ ):
+ batch_size, ngram_sequence_length, hidden_size = hidden_states.size()
+ assert list(hidden_states.size()) == [batch_size, ngram_sequence_length, hidden_size], (
+ f"`hidden_states` should be of shape {batch_size, ngram_sequence_length, hidden_size}, but is of shape"
+ f" {hidden_states.shape}"
+ )
+
+ # project
+ query_states = self.query_proj(hidden_states)
+ key_states = self.key_proj(hidden_states)
+ value_states = self.value_proj(hidden_states)
+
+ # normalize
+ query_states = query_states / (self.head_dim**0.5)
+
+ # reshape
+ query_states = self._shape(query_states, ngram_sequence_length, batch_size)
+ key_states = self._shape(key_states, -1, batch_size)
+ value_states = self._shape(value_states, -1, batch_size)
+ proj_shape = (batch_size, self.num_attn_heads, -1, self.head_dim)
+
+ query_states = query_states.view(*proj_shape)
+ key_states = key_states.view(*proj_shape)
+ value_states = value_states.view(*proj_shape)
+
+ # chunk into main stream and predict stream
+ hidden_states_list = hidden_states.chunk(1 + self.ngram, dim=1)
+ query_states_list = query_states.chunk(1 + self.ngram, dim=2)
+ key_states_list = key_states.chunk(1 + self.ngram, dim=2)
+ value_states_list = value_states.chunk(1 + self.ngram, dim=2)
+
+ main_hidden_states, hidden_states_predict_list = hidden_states_list[0], hidden_states_list[1:]
+ main_query_states, predict_query_states_list = query_states_list[0], query_states_list[1:]
+ main_key_states, predict_key_states_list = key_states_list[0], key_states_list[1:]
+ main_value_states, predict_value_states_list = value_states_list[0], value_states_list[1:]
+
+ # saved states are stored with shape (batch_size, num_attn_heads, seq_len, head_dim)
+ if past_key_value is not None:
+ prev_main_key_states = past_key_value[0]
+ main_key_states = torch.cat((prev_main_key_states, main_key_states), dim=2)
+ prev_main_value_states = past_key_value[1]
+ main_value_states = torch.cat((prev_main_value_states, main_value_states), dim=2)
+
+ # Update cache
+ past_key_value = (main_key_states, main_value_states)
+
+ # get seq_length of main stream only
+ sequence_length = ngram_sequence_length // (1 + self.ngram)
+
+ # MAIN-STREAM
+ # main attn weights
+ # [batch_size, number_heads, sequence_length, head_dimesion]
+ # x [batch_size, number_heads, head_dimesion, sequence_length]
+ # -> [batch_size, number_heads, sequence_length, sequence_length]
+ main_attn_weights = torch.einsum("bntc,bncs->bnts", main_query_states, main_key_states.transpose(2, 3))
+
+ # retrieve relative position embeddings for each layer -> see paper for more details
+ main_relative_pos_embeddings = self.get_main_relative_pos_embeddings(
+ main_hidden_states, main_attn_weights, position_ids, main_relative_position_buckets
+ )
+
+ main_attn_weights = main_attn_weights + main_relative_pos_embeddings
+
+ if attention_mask is not None:
+ main_attn_weights = main_attn_weights + attention_mask
+
+ main_attn_probs = softmax(
+ main_attn_weights,
+ dim=-1,
+ onnx_trace=self.onnx_trace,
+ ).type_as(main_attn_weights)
+
+ if layer_head_mask is not None:
+ assert layer_head_mask.size() == (self.num_attn_heads,), (
+ f"Head mask for a single layer should be of size {(self.num_attn_heads,)}, but is"
+ f" {layer_head_mask.size()}"
+ )
+ main_attn_probs = layer_head_mask.view(1, -1, 1, 1) * main_attn_probs.view(
+ batch_size, self.num_attn_heads, -1, sequence_length
+ )
+
+ main_attn_probs = nn.functional.dropout(main_attn_probs, p=self.attention_dropout, training=self.training)
+ # project to attn_output
+ # [batch_size, number_heads, sequence_length, sequence_length]
+ # x [batch_size, number_heads, sequence_length, head_dimesion]
+ # -> [batch_size, number_heads, sequence_length, head_dimesion]
+ main_attn_output = torch.einsum("bntc,bncs->bnts", main_attn_probs, main_value_states)
+ # reshape so that num_heads dim is merged into last `head_dim` axis
+ main_attn_output = main_attn_output.transpose(1, 2).reshape(batch_size, 1, sequence_length, hidden_size)
+ main_attn_output = self.out_proj(main_attn_output)
+
+ # PREDICT-STREAM
+ # [batch_size, ngram, number_heads, sequence_length, head_dimesion]
+ predict_query_states = torch.stack(predict_query_states_list, 1).view(
+ batch_size, self.ngram, self.num_attn_heads, sequence_length, self.head_dim
+ )
+
+ # [batch_size, ngram, number_heads, 2*sequence_length, head_dimesion]
+ predict_key_states = torch.stack([torch.cat([main_key_states, key], 2) for key in predict_key_states_list], 1)
+
+ # [batch_size, sequence_length, ngram, hidden_size]
+ predict_hidden_states = torch.stack(hidden_states_predict_list, dim=2)
+
+ # [batch_size, number_heads, ngram, 2*sequence_length, head_dimesion]
+ predict_value_states = torch.cat(
+ [torch.cat([main_value_states, v_p], 2).unsqueeze(2) for v_p in predict_value_states_list], 2
+ )
+
+ # [batch_size, ngram, number_heads, sequence_length, head_dimesion]
+ # x [batch_size, ngram, number_heads, 2*sequence_length, head_dimesion]
+ # -> [batch_size, ngram, number_heads, sequence_length, 2*sequence_length]
+ predict_attn_weights = torch.einsum("bnhtc,bnhsc->bnhts", (predict_query_states, predict_key_states))
+
+ # retrieve relative position embeddings for each layer -> see paper for more details
+ # [batch_size, ngram, number_heads, sequence_length, predict_relative_pos_embeddings]
+ predict_relative_pos_embeddings = self.get_predict_relative_pos_embeddings(
+ predict_hidden_states, predict_attn_weights, position_ids, predict_relative_position_buckets
+ )
+
+ # [batch_size, ngram, number_heads, sequence_length, 2*sequence_length]
+ predict_attn_weights = predict_attn_weights + predict_relative_pos_embeddings
+
+ if extended_predict_attention_mask is not None:
+ # Permuting Predict attention mask to [batch_size, ngram, number_heads, sequence_length, 2*sequence_length]
+ extended_predict_attention_mask = extended_predict_attention_mask.permute(0, 2, 1, 3, 4)
+ extended_predict_attention_mask = extended_predict_attention_mask.to(predict_attn_weights.dtype)
+ predict_attn_weights = predict_attn_weights + extended_predict_attention_mask
+
+ predict_attn_probs = softmax(
+ predict_attn_weights,
+ dim=-1,
+ onnx_trace=self.onnx_trace,
+ ).type_as(predict_attn_weights)
+
+ if layer_head_mask is not None:
+ assert layer_head_mask.size() == (self.num_attn_heads,), (
+ f"Head mask for a single layer should be of size {(self.num_attn_heads,)}, but is"
+ f" {layer_head_mask.size()}"
+ )
+ predict_attn_probs = layer_head_mask.view(1, 1, -1, 1, 1) * predict_attn_probs
+
+ predict_attn_probs = nn.functional.dropout(
+ predict_attn_probs, p=self.attention_dropout, training=self.training
+ )
+ # project to attention output
+ # [batch_size, ngram, number_heads, sequence_length, 2*sequence_length]
+ # x [batch_size, ngram, number_heads, 2*sequence_length, head_dimesion]
+ # -> [batch_size, ngram, number_heads, sequence_length, head_dimesion]
+ predict_attn_output = torch.einsum(
+ "bnhts,bnhsc->bnhtc", (predict_attn_probs, predict_value_states.transpose(1, 2))
+ )
+
+ # reshape so that num_heads dim is merged into last `head_dim` axis
+ # [batch_size, ngram, number_heads, sequence_length, head_dimesion] -> [batch_size, ngram, sequence_length, hidden_size]
+ predict_attn_output = predict_attn_output.transpose(2, 3)
+ predict_attn_output = predict_attn_output.reshape(batch_size, self.ngram, sequence_length, hidden_size)
+ predict_attn_output = self.out_proj(predict_attn_output)
+
+ # concat to single attn output
+ # [batch_size, (1+ngram)*sequence_length, hidden_size]
+ attn_output = torch.cat([main_attn_output, predict_attn_output], 1).view(batch_size, -1, hidden_size)
+ # reshape into better form for `config.output_attentions`
+ main_attn_probs = main_attn_probs.view(batch_size, self.num_attn_heads, sequence_length, -1)
+
+ attn_output = nn.functional.dropout(attn_output, p=self.dropout, training=self.training)
+
+ return attn_output, main_attn_probs, predict_attn_probs, past_key_value
+
+ def get_main_relative_pos_embeddings(
+ self, hidden_states, attn_weights, position_ids, main_relative_position_buckets
+ ):
+ # input hidden_states [batch_size, sequence_length, hidden_size]
+ # input attn_weights [batch_size, num_heads, sequence_length, sequence_length]
+ # input position_ids [batch_size, sequence_length] or [1,1]
+ batch_size, num_attn_heads, tgt_len, src_len = attn_weights.shape
+ attn_weights = attn_weights.view(batch_size, num_attn_heads, tgt_len, src_len)
+ if main_relative_position_buckets is None:
+ batch_size, sequence_length = hidden_states.shape[:2]
+ relative_positions = (
+ torch.arange(1, attn_weights.shape[-1] + 1)
+ .unsqueeze(0)
+ .unsqueeze(0)
+ .repeat(batch_size, sequence_length, 1)
+ .to(position_ids.device)
+ )
+ # [batch_size, sequence_length, sequence_length+1]
+ relative_positions = relative_positions - position_ids.unsqueeze(0).repeat(batch_size, sequence_length, 1)
+ main_relative_position_buckets = compute_relative_buckets(
+ self.num_buckets, self.relative_max_distance, relative_positions, False
+ )
+
+ # [batch_size, sequence_length, num_buckets * num_heads]
+ rel_pos_embeddings = self.relative_pos_embeddings(hidden_states)
+ rel_pos_embeddings = rel_pos_embeddings.view(
+ rel_pos_embeddings.shape[:2] + (self.num_buckets, self.num_attn_heads)
+ )
+ rel_pos_embeddings = rel_pos_embeddings.permute(0, 3, 1, 2)
+ # [batch_size, num_heads, sequence_length, num_buckets]
+ rel_pos_embeddings = rel_pos_embeddings.reshape(attn_weights.shape[:3] + (-1,))
+
+ main_relative_position_buckets = main_relative_position_buckets.repeat(1, self.num_attn_heads, 1)
+ # [batch_size * num_heads * sequence_length, sequence_length]
+ main_relative_position_buckets = main_relative_position_buckets.view(
+ -1, main_relative_position_buckets.shape[-1]
+ )
+ main_relative_position_buckets = main_relative_position_buckets.long()
+ # [batch_size * num_heads * sequence_length, sequence_length]
+ rel_pos_embeddings = rel_pos_embeddings.reshape(-1, rel_pos_embeddings.size(-1))
+
+ main_relative_pos_embeddings = torch.gather(rel_pos_embeddings, dim=1, index=main_relative_position_buckets)
+ main_relative_pos_embeddings = main_relative_pos_embeddings.view(batch_size, num_attn_heads, tgt_len, -1)
+ return main_relative_pos_embeddings
+
+ def get_predict_relative_pos_embeddings(
+ self, hidden_states, attn_weights, position_ids, predict_relative_position_buckets
+ ):
+ # input hidden_states [batch_size, sequence_length, ngram, hidden_size]
+ # input attn_weights [batch_size, ngram, num_heads, sequence_length, 2*sequence_length]
+ # input position_ids [batch_size, sequence_length] or [1,1]
+ # input predict_relative_position_buckets [batch_size, sequence_length, 2*sequence_length] or None
+ batch_size, sequence_length = hidden_states.shape[0:2]
+
+ if predict_relative_position_buckets is None:
+ key_sequence_length = attn_weights.shape[-1]
+ assert (
+ position_ids[0][0] == key_sequence_length - 1
+ ), "`position_ids` are incorrect. They should be of the format 1 2 3 4 5 ... (key_sequence_length - 1)"
+ relative_positions = (
+ torch.arange(0, key_sequence_length)
+ .unsqueeze(0)
+ .unsqueeze(0)
+ .repeat(batch_size, sequence_length, 1)
+ .to(position_ids.device)
+ )
+
+ relative_positions = relative_positions - position_ids.unsqueeze(0).repeat(batch_size, sequence_length, 1)
+ predict_relative_position_buckets = compute_relative_buckets(
+ self.num_buckets, self.relative_max_distance, relative_positions, False
+ )
+
+ # [batch_size, ngram, sequence_length, hidden_size]
+ hidden_states = hidden_states.transpose(1, 2)
+ rel_pos_embeddings = self.relative_pos_embeddings(hidden_states)
+
+ # [batch_size, ngram, sequence_length, num_buckets, num_heads]
+ rel_pos_embeddings = rel_pos_embeddings.view(
+ hidden_states.shape[:-1] + (self.num_buckets, self.num_attn_heads)
+ )
+ rel_pos_embeddings = rel_pos_embeddings.permute(0, 2, 1, 4, 3)
+ # [batch_size * ngram * sequence_length * num_heads, num_buckets]
+ rel_pos_embeddings = rel_pos_embeddings.reshape(-1, self.num_buckets)
+ # [ngram, batch_size, num_heads * sequence_length, -1]
+ predict_relative_position_buckets = predict_relative_position_buckets.unsqueeze(0)
+ predict_relative_position_buckets = predict_relative_position_buckets.repeat(
+ self.ngram, 1, self.num_attn_heads, 1
+ )
+ # [ngram * batch_size * num_heads * sequence_length, -1]
+ predict_relative_position_buckets = predict_relative_position_buckets.view(
+ -1, predict_relative_position_buckets.size(-1)
+ ).long()
+
+ predict_relative_pos_embeddings = torch.gather(
+ rel_pos_embeddings, dim=1, index=predict_relative_position_buckets
+ )
+
+ # [batch_size, gram, num_heads, sequence_length, -1]
+ predict_relative_pos_embeddings = predict_relative_pos_embeddings.view(
+ batch_size, self.ngram, self.num_attn_heads, sequence_length, -1
+ )
+
+ return predict_relative_pos_embeddings
+
+
+class ProphetNetEncoderLayer(nn.Module):
+ """
+ Encoder block for Prophetnet
+ """
+
+ def __init__(self, config: ProphetNetConfig):
+ super().__init__()
+ # 1st residual block
+ self.self_attn = ProphetNetAttention(config, config.num_encoder_attention_heads)
+ self.self_attn_layer_norm = LayerNorm(config.hidden_size)
+
+ # 2nd residual block
+ self.feed_forward = ProphetNetFeedForward(config, config.encoder_ffn_dim)
+ self.feed_forward_layer_norm = LayerNorm(config.hidden_size)
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ output_attentions: bool = False,
+ ):
+ # 1st residual block
+ attention_output, attn_weights, _ = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = self.self_attn_layer_norm(attention_output + hidden_states)
+
+ # 2nd residual block
+ feed_forward_output = self.feed_forward(hidden_states)
+ hidden_states = self.feed_forward_layer_norm(feed_forward_output + hidden_states)
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs
+
+
+class ProphetNetDecoderLayer(nn.Module):
+ """
+ Decoder block for Prophetnet
+ """
+
+ def __init__(self, config: ProphetNetConfig):
+ super().__init__()
+ # 1st residual block
+ self.self_attn = ProphetNetNgramSelfAttention(config)
+ self.self_attn_layer_norm = LayerNorm(config.hidden_size)
+
+ # 2nd residual block
+ if config.add_cross_attention:
+ self.cross_attn = ProphetNetAttention(config, config.num_decoder_attention_heads)
+ self.cross_attn_layer_norm = LayerNorm(config.hidden_size)
+
+ # 3rd residual block
+ self.feed_forward = ProphetNetFeedForward(config, config.decoder_ffn_dim)
+ self.feed_forward_layer_norm = LayerNorm(config.hidden_size)
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ encoder_hidden_states=None,
+ encoder_attn_mask=None,
+ layer_head_mask=None,
+ cross_attn_layer_head_mask=None,
+ extended_predict_attention_mask=None,
+ main_relative_position_buckets=None,
+ predict_relative_position_buckets=None,
+ position_ids=None,
+ past_key_value=None,
+ use_cache: bool = True,
+ output_attentions: bool = False,
+ ):
+ # 1st residual block
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ ngram_attention_output, self_attn_weights, self_attn_weights_ngram, present_key_value = self.self_attn(
+ hidden_states=hidden_states,
+ past_key_value=self_attn_past_key_value,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ extended_predict_attention_mask=extended_predict_attention_mask,
+ main_relative_position_buckets=main_relative_position_buckets,
+ predict_relative_position_buckets=predict_relative_position_buckets,
+ position_ids=position_ids,
+ )
+ hidden_states = self.self_attn_layer_norm(hidden_states + ngram_attention_output)
+
+ # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
+ cross_attn_weights = None
+ if encoder_hidden_states is not None:
+ # 2nd residual block
+ attention_output, cross_attn_weights, cross_attn_present_key_value = self.cross_attn(
+ hidden_states=hidden_states,
+ key_value_states=encoder_hidden_states,
+ attention_mask=encoder_attn_mask,
+ layer_head_mask=cross_attn_layer_head_mask,
+ past_key_value=cross_attn_past_key_value,
+ output_attentions=output_attentions,
+ )
+ hidden_states = self.cross_attn_layer_norm(attention_output + hidden_states)
+
+ # add cross-attn to positions 3,4 of present_key_value tuple
+ present_key_value = present_key_value + cross_attn_present_key_value
+
+ # 3rd residual block
+ feed_forward_output = self.feed_forward(hidden_states)
+ hidden_states = self.feed_forward_layer_norm(feed_forward_output + hidden_states)
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (self_attn_weights, self_attn_weights_ngram, cross_attn_weights)
+
+ if use_cache:
+ outputs += (present_key_value,)
+
+ return outputs
+
+
+@add_start_docstrings(
+ "The standalone encoder part of the ProphetNetModel.",
+ PROPHETNET_START_DOCSTRING,
+)
+class ProphetNetEncoder(ProphetNetPreTrainedModel):
+ r"""
+ word_embeddings (`torch.nn.Embeddings` of shape `(config.vocab_size, config.hidden_size)`, *optional*):
+ The word embedding parameters. This can be used to initialize [`ProphetNetEncoder`] with pre-defined word
+ embeddings instead of randomly initialized word embeddings.
+ """
+
+ def __init__(self, config: ProphetNetConfig, word_embeddings: nn.Embedding = None):
+ super().__init__(config)
+
+ self.word_embeddings = (
+ word_embeddings
+ if word_embeddings is not None
+ else nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
+ )
+ self.position_embeddings = ProphetNetPositionalEmbeddings(config)
+ self.embeddings_layer_norm = LayerNorm(config.hidden_size)
+
+ self.layers = nn.ModuleList([ProphetNetEncoderLayer(config) for _ in range(config.num_encoder_layers)])
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.word_embeddings = value
+
+ @add_start_docstrings_to_model_forward(PROPHETNET_STANDALONE_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutput]:
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, ProphetNetEncoder
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/prophetnet-large-uncased")
+ >>> model = ProphetNetEncoder.from_pretrained("patrickvonplaten/prophetnet-large-uncased-standalone")
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
+ >>> outputs = model(**inputs)
+
+ >>> last_hidden_states = outputs.last_hidden_state
+ ```"""
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is None and inputs_embeds is None:
+ raise ValueError("Either input_ids or inputs_embeds has to be passed.")
+ elif input_ids is not None and inputs_embeds is not None:
+ raise ValueError("Make sure to only pass input_ids or inputs_embeds.")
+ elif input_ids is not None and inputs_embeds is None:
+ inputs_embeds = self.word_embeddings(input_ids)
+
+ # prepare attention mask
+ if attention_mask is not None:
+ extended_attention_mask = (
+ 1.0 - attention_mask[:, None, None, :].repeat(1, self.config.num_encoder_attention_heads, 1, 1)
+ ) * torch.finfo(self.dtype).min
+ extended_attention_mask = extended_attention_mask.to(inputs_embeds.dtype)
+ else:
+ extended_attention_mask = None
+
+ position_embeddings, position_ids = self.position_embeddings(inputs_embeds.shape[:2], inputs_embeds.device)
+
+ hidden_states = inputs_embeds + position_embeddings
+ hidden_states = self.embeddings_layer_norm(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.config.dropout, training=self.training)
+
+ encoder_hidden_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ # check if head_mask has a correct number of layers specified if desired
+ if head_mask is not None:
+ assert head_mask.size()[0] == (
+ len(self.layers)
+ ), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
+ for idx, encoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ encoder_hidden_states = encoder_hidden_states + (hidden_states,)
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ encoder_layer.__call__,
+ hidden_states,
+ extended_attention_mask,
+ (head_mask[idx] if head_mask is not None else None),
+ output_attentions,
+ )
+ else:
+ layer_outputs = encoder_layer(
+ hidden_states,
+ attention_mask=extended_attention_mask,
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ encoder_hidden_states = encoder_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, encoder_hidden_states, all_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=encoder_hidden_states, attentions=all_attentions
+ )
+
+
+@add_start_docstrings(
+ "The standalone decoder part of the ProphetNetModel.",
+ PROPHETNET_START_DOCSTRING,
+)
+class ProphetNetDecoder(ProphetNetPreTrainedModel):
+ r"""
+ word_embeddings (`torch.nn.Embeddings` of shape `(config.vocab_size, config.hidden_size)`, *optional*):
+ The word embedding parameters. This can be used to initialize [`ProphetNetEncoder`] with pre-defined word
+ embeddings instead of randomly initialized word embeddings.
+ """
+
+ def __init__(self, config: ProphetNetConfig, word_embeddings: Optional[nn.Embedding] = None):
+ super().__init__(config)
+
+ self.ngram = config.ngram
+ self.num_buckets = config.num_buckets
+ self.relative_max_distance = config.relative_max_distance
+ self.dropout = config.dropout
+ self.max_target_positions = config.max_position_embeddings
+
+ self.word_embeddings = (
+ word_embeddings
+ if word_embeddings is not None
+ else nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
+ )
+ self.position_embeddings = ProphetNetPositionalEmbeddings(config)
+
+ self.ngram_embeddings = nn.Embedding(self.ngram, config.hidden_size, None)
+ self.layers = nn.ModuleList([ProphetNetDecoderLayer(config) for _ in range(config.num_decoder_layers)])
+ self.embeddings_layer_norm = LayerNorm(config.hidden_size)
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.word_embeddings = value
+
+ @add_start_docstrings_to_model_forward(PROPHETNET_STANDALONE_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=ProphetNetDecoderModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, ProphetNetDecoderModelOutput]:
+ r"""
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
+ the model is configured as a decoder.
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, ProphetNetDecoder
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/prophetnet-large-uncased")
+ >>> model = ProphetNetDecoder.from_pretrained("microsoft/prophetnet-large-uncased", add_cross_attention=False)
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
+ >>> outputs = model(**inputs)
+
+ >>> last_hidden_states = outputs.last_hidden_state
+ ```"""
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is None and inputs_embeds is None:
+ raise ValueError("Either `decoder_input_ids` or `decoder_inputs_embeds` has to be passed.")
+ elif input_ids is not None and inputs_embeds is not None:
+ raise ValueError("Make sure to only pass `decoder_input_ids` or `decoder_inputs_embeds`.")
+ elif input_ids is not None and inputs_embeds is None:
+ inputs_embeds = self.word_embeddings(input_ids)
+
+ batch_size, sequence_length = inputs_embeds.shape[:2]
+
+ main_stream_pos_embed, position_ids = self.position_embeddings(
+ (batch_size, sequence_length),
+ device=inputs_embeds.device,
+ past_key_values=past_key_values,
+ )
+
+ if past_key_values is not None:
+ main_relative_position_buckets, predict_relative_position_buckets = None, None
+ else:
+ (
+ main_relative_position_buckets,
+ predict_relative_position_buckets,
+ ) = self.compute_buffered_relative_buckets(position_ids)
+ predicting_stream_pos_embed = self.position_embeddings._forward(position_ids + 1)
+
+ # add position embeddings
+ hidden_states = inputs_embeds + main_stream_pos_embed
+
+ ngram_embeddings = self.ngram_embeddings.weight
+
+ # prepare attention mask
+ if past_key_values is not None:
+ assert (
+ hidden_states.size(1) == 1
+ ), "At the moment `use_cache` is only supported for `decoder_input_ids` of length 1"
+
+ ngram_hidden_states = [
+ (ngram_embeddings[ngram - 1] + predicting_stream_pos_embed).repeat(batch_size, 1, 1)
+ for ngram in range(self.ngram)
+ ]
+ extended_attention_mask = None
+ extended_predict_attention_mask = None
+ else:
+ ngram_hidden_states = [
+ (ngram_embeddings[ngram - 1] + predicting_stream_pos_embed) for ngram in range(self.ngram)
+ ]
+ extended_attention_mask = self.prepare_attention_mask(hidden_states, attention_mask)
+ extended_predict_attention_mask = self.prepare_predict_attention_mask(hidden_states, attention_mask)
+
+ # prepare encoder attention mask
+ if encoder_attention_mask is not None:
+ extended_encoder_attention_mask = (
+ 1.0 - encoder_attention_mask[:, None, None, :].repeat(1, self.config.num_decoder_attention_heads, 1, 1)
+ ) * torch.finfo(self.dtype).min
+ extended_encoder_attention_mask = extended_encoder_attention_mask.to(inputs_embeds.dtype)
+ else:
+ extended_encoder_attention_mask = None
+
+ hidden_states = torch.cat([hidden_states] + ngram_hidden_states, 1)
+
+ if self.embeddings_layer_norm:
+ hidden_states = self.embeddings_layer_norm(hidden_states)
+
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+
+ # init attentions, hidden_states and cache with empty tuples
+ all_main_stream_hidden_states = () if output_hidden_states else None
+ all_ngram_stream_hidden_states = () if output_hidden_states and self.config.ngram > 0 else None
+
+ all_main_stream_attns = () if output_attentions else None
+ all_ngram_stream_attns = () if output_attentions else None
+ all_cross_attns = () if output_attentions and self.config.add_cross_attention else None
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ present_key_values = () if use_cache else None
+
+ # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
+ for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
+ if attn_mask is not None:
+ assert attn_mask.size()[0] == (len(self.layers)), (
+ f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
+ f" {head_mask.size()[0]}."
+ )
+ for idx, decoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ # grad cannot be kept because tensor is sliced
+ all_main_stream_hidden_states += (hidden_states[:, :sequence_length],)
+ if self.config.ngram > 0:
+ all_ngram_stream_hidden_states += (hidden_states[:, sequence_length:],)
+
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ decoder_layer.__call__,
+ hidden_states,
+ extended_attention_mask,
+ encoder_hidden_states,
+ extended_encoder_attention_mask,
+ (head_mask[idx] if head_mask is not None else None),
+ (cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None),
+ extended_predict_attention_mask,
+ main_relative_position_buckets,
+ predict_relative_position_buckets,
+ position_ids,
+ None,
+ use_cache,
+ output_attentions,
+ )
+ else:
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=extended_attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attn_mask=extended_encoder_attention_mask,
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
+ cross_attn_layer_head_mask=(
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
+ ),
+ extended_predict_attention_mask=extended_predict_attention_mask,
+ main_relative_position_buckets=main_relative_position_buckets,
+ predict_relative_position_buckets=predict_relative_position_buckets,
+ position_ids=position_ids,
+ past_key_value=past_key_value,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if use_cache:
+ present_key_values += (layer_outputs[4 if output_attentions else 1],)
+
+ if output_attentions:
+ all_main_stream_attns += (layer_outputs[1],)
+ all_ngram_stream_attns += (layer_outputs[2],)
+
+ if self.config.add_cross_attention:
+ all_cross_attns += (layer_outputs[3],)
+
+ if output_hidden_states:
+ all_main_stream_hidden_states += (hidden_states[:, :sequence_length],)
+ if self.config.ngram > 0:
+ all_ngram_stream_hidden_states += (hidden_states[:, sequence_length:],)
+
+ # split last_hidden_state for return
+ last_hidden_state = hidden_states[:, :sequence_length]
+ last_hidden_state_ngram = hidden_states[:, sequence_length:] if self.config.ngram > 0 else None
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [
+ last_hidden_state,
+ last_hidden_state_ngram,
+ present_key_values,
+ all_main_stream_hidden_states,
+ all_ngram_stream_hidden_states,
+ all_main_stream_attns,
+ all_ngram_stream_attns,
+ all_cross_attns,
+ ]
+ if v is not None
+ )
+ return ProphetNetDecoderModelOutput(
+ last_hidden_state=last_hidden_state,
+ last_hidden_state_ngram=last_hidden_state_ngram,
+ past_key_values=present_key_values,
+ hidden_states=all_main_stream_hidden_states,
+ hidden_states_ngram=all_ngram_stream_hidden_states,
+ attentions=all_main_stream_attns,
+ ngram_attentions=all_ngram_stream_attns,
+ cross_attentions=all_cross_attns,
+ )
+
+ def compute_buffered_relative_buckets(self, position_ids):
+ batch_size, sequence_length = position_ids.shape
+
+ position_ids = torch.arange(1, self.max_target_positions).to(position_ids.device).repeat(1, 1)
+ main_relative_buckets, predict_relative_buckets = compute_all_stream_relative_buckets(
+ self.num_buckets, self.relative_max_distance, position_ids
+ )
+
+ # buffer relative buckets
+ main_relative_buckets = main_relative_buckets[:, :sequence_length, :sequence_length].repeat(batch_size, 1, 1)
+ predict_relative_buckets = torch.cat(
+ [
+ predict_relative_buckets[:, :sequence_length, :sequence_length],
+ predict_relative_buckets[
+ :, :sequence_length, self.max_target_positions : self.max_target_positions + sequence_length
+ ],
+ ],
+ 2,
+ ).repeat(batch_size, 1, 1)
+
+ return main_relative_buckets, predict_relative_buckets
+
+ def prepare_attention_mask(self, hidden_states, attention_mask):
+ batch_size, seq_length = hidden_states.shape[:2]
+
+ # get causal mask
+ causal_mask = torch.full(
+ (seq_length, seq_length),
+ torch.finfo(hidden_states.dtype).min,
+ dtype=hidden_states.dtype,
+ device=hidden_states.device,
+ )
+ causal_mask = torch.triu(causal_mask, 1)
+
+ extended_causal_mask = causal_mask[:seq_length, :seq_length][None, None, :, :].expand(
+ (batch_size, self.config.num_decoder_attention_heads) + causal_mask.shape
+ )
+
+ # add usual attention mask
+ if attention_mask is not None:
+ extended_attention_mask = (1.0 - attention_mask[:, None, None, :]) * torch.finfo(self.dtype).min
+ extended_attention_mask = extended_causal_mask + extended_attention_mask
+ else:
+ extended_attention_mask = extended_causal_mask
+ return extended_attention_mask.to(hidden_states.dtype)
+
+ def prepare_predict_attention_mask(self, hidden_states, attention_mask):
+ batch_size, seq_length = hidden_states.shape[:2]
+
+ # get causal mask
+ predict_causal_mask = ngram_attention_bias(
+ self.max_target_positions, self.ngram, hidden_states.device, hidden_states.dtype
+ )
+ predict_causal_mask = torch.cat(
+ [
+ predict_causal_mask[:, :seq_length, :seq_length],
+ predict_causal_mask[
+ :, :seq_length, self.max_target_positions : self.max_target_positions + seq_length
+ ],
+ ],
+ dim=-1,
+ )
+ extended_predict_causal_mask = predict_causal_mask[None, None, :, :, :].expand(
+ (batch_size, self.config.num_decoder_attention_heads) + predict_causal_mask.shape
+ )
+
+ # add usual attention mask
+ if attention_mask is not None:
+ extended_attention_mask = (1.0 - attention_mask[:, None, None, None, :]) * torch.finfo(self.dtype).min
+ extended_attention_mask = extended_attention_mask.expand(
+ (batch_size, self.config.num_decoder_attention_heads, self.ngram, seq_length, seq_length)
+ )
+ # predicted stream attention_mask should always be 0
+ extended_attention_mask = torch.cat(
+ [extended_attention_mask, torch.zeros_like(extended_attention_mask)], dim=-1
+ )
+ extended_predict_attention_mask = extended_predict_causal_mask + extended_attention_mask
+ else:
+ extended_predict_attention_mask = extended_predict_causal_mask
+ return extended_predict_attention_mask.to(hidden_states.dtype)
+
+
+@add_start_docstrings(
+ "The bare ProphetNet Model outputting raw hidden-states without any specific head on top.",
+ PROPHETNET_START_DOCSTRING,
+)
+class ProphetNetModel(ProphetNetPreTrainedModel):
+ _tied_weights_keys = ["encoder.word_embeddings.weight", "decoder.word_embeddings.weight"]
+
+ def __init__(self, config: ProphetNetConfig):
+ super().__init__(config)
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
+
+ encoder_config = copy.deepcopy(config)
+ encoder_config.is_encoder_decoder = False
+ encoder_config.use_cache = False
+ self.encoder = ProphetNetEncoder(encoder_config, self.word_embeddings)
+
+ decoder_config = copy.deepcopy(config)
+ decoder_config.is_decoder = True
+ decoder_config.is_encoder_decoder = False
+ self.decoder = ProphetNetDecoder(decoder_config, self.word_embeddings)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.word_embeddings = value
+ self.encoder.word_embeddings = self.word_embeddings
+ self.decoder.word_embeddings = self.word_embeddings
+
+ def _tie_weights(self):
+ if self.config.tie_word_embeddings:
+ self._tie_or_clone_weights(self.encoder.word_embeddings, self.word_embeddings)
+ self._tie_or_clone_weights(self.decoder.word_embeddings, self.word_embeddings)
+
+ def get_encoder(self):
+ return self.encoder
+
+ def get_decoder(self):
+ return self.decoder
+
+ @add_start_docstrings_to_model_forward(PROPHETNET_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=ProphetNetSeq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ decoder_input_ids: Optional[torch.Tensor] = None,
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ decoder_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[Tuple] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ decoder_inputs_embeds: Optional[torch.Tensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, ProphetNetSeq2SeqModelOutput]:
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, ProphetNetModel
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/prophetnet-large-uncased")
+ >>> model = ProphetNetModel.from_pretrained("microsoft/prophetnet-large-uncased")
+
+ >>> input_ids = tokenizer(
+ ... "Studies have been shown that owning a dog is good for you", return_tensors="pt"
+ ... ).input_ids # Batch size 1
+ >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
+ >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
+
+ >>> last_hidden_states = outputs.last_hidden_state # main stream hidden states
+ >>> last_hidden_states_ngram = outputs.last_hidden_state_ngram # predict hidden states
+ ```"""
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if encoder_outputs is None:
+ encoder_outputs = self.encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ # decoder outputs consists of (dec_features, past_key_values, dec_hidden, dec_attn)
+ decoder_outputs = self.decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ encoder_hidden_states=encoder_outputs[0],
+ encoder_attention_mask=attention_mask,
+ head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=decoder_inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ use_cache=use_cache,
+ return_dict=return_dict,
+ )
+
+ if not return_dict:
+ return decoder_outputs + encoder_outputs
+ return ProphetNetSeq2SeqModelOutput(
+ last_hidden_state=decoder_outputs.last_hidden_state,
+ last_hidden_state_ngram=decoder_outputs.last_hidden_state_ngram,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_ngram_hidden_states=decoder_outputs.hidden_states_ngram,
+ decoder_attentions=decoder_outputs.attentions,
+ decoder_ngram_attentions=decoder_outputs.ngram_attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ "The ProphetNet Model with a language modeling head. Can be used for sequence generation tasks.",
+ PROPHETNET_START_DOCSTRING,
+)
+class ProphetNetForConditionalGeneration(ProphetNetPreTrainedModel, GenerationMixin):
+ _tied_weights_keys = ["encoder.word_embeddings.weight", "decoder.word_embeddings.weight", "lm_head.weight"]
+
+ def __init__(self, config: ProphetNetConfig):
+ super().__init__(config)
+ self.prophetnet = ProphetNetModel(config)
+ self.padding_idx = config.pad_token_id
+ self.disable_ngram_loss = config.disable_ngram_loss
+
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ def _tie_weights(self):
+ if self.config.tie_word_embeddings:
+ self._tie_or_clone_weights(self.prophetnet.word_embeddings, self.lm_head)
+
+ def get_input_embeddings(self):
+ return self.prophetnet.word_embeddings
+
+ @add_start_docstrings_to_model_forward(PROPHETNET_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=ProphetNetSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ decoder_input_ids: Optional[torch.Tensor] = None,
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ decoder_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[torch.Tensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ decoder_inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, ProphetNetSeq2SeqLMOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for
+ labels in `[0, ..., config.vocab_size]`
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, ProphetNetForConditionalGeneration
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/prophetnet-large-uncased")
+ >>> model = ProphetNetForConditionalGeneration.from_pretrained("microsoft/prophetnet-large-uncased")
+
+ >>> input_ids = tokenizer(
+ ... "Studies have been shown that owning a dog is good for you", return_tensors="pt"
+ ... ).input_ids # Batch size 1
+ >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
+ >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
+
+ >>> logits_next_token = outputs.logits # logits to predict next token as usual
+ >>> logits_ngram_next_tokens = outputs.logits_ngram # logits to predict 2nd, 3rd, ... next tokens
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:
+ # get decoder inputs from shifting lm labels to the right
+ decoder_input_ids = self._shift_right(labels)
+
+ outputs = self.prophetnet(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ decoder_input_ids=decoder_input_ids,
+ decoder_attention_mask=decoder_attention_mask,
+ head_mask=head_mask,
+ decoder_head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ encoder_outputs=encoder_outputs,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ decoder_inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ batch_size, sequence_length = (
+ decoder_input_ids.shape if decoder_input_ids is not None else decoder_inputs_embeds.shape[:2]
+ )
+
+ predicting_streams = outputs[1].view(batch_size, self.config.ngram, sequence_length, -1)
+ predict_logits = self.lm_head(predicting_streams)
+
+ logits = predict_logits[:, 0]
+ logits_ngram = predict_logits[:, 1:] if self.config.ngram > 1 else None
+
+ # To use .view in loss computation, make sure that logits is contiguous.
+ if not logits.is_contiguous():
+ logits = logits.contiguous()
+
+ loss = None
+ if labels is not None:
+ loss = self._compute_loss(predict_logits, labels)
+
+ if not return_dict:
+ all_logits = tuple(v for v in [logits, logits_ngram] if v is not None)
+ return (loss,) + all_logits + outputs[2:] if loss is not None else all_logits + outputs[2:]
+ else:
+ return ProphetNetSeq2SeqLMOutput(
+ loss=loss,
+ logits=logits,
+ logits_ngram=logits_ngram,
+ past_key_values=outputs.past_key_values,
+ decoder_hidden_states=outputs.decoder_hidden_states,
+ decoder_ngram_hidden_states=outputs.decoder_ngram_hidden_states,
+ decoder_attentions=outputs.decoder_attentions,
+ decoder_ngram_attentions=outputs.decoder_ngram_attentions,
+ cross_attentions=outputs.cross_attentions,
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
+ encoder_hidden_states=outputs.encoder_hidden_states,
+ encoder_attentions=outputs.encoder_attentions,
+ )
+
+ def _compute_loss(self, logits, labels, ignore_index=-100):
+ expend_targets = labels.new_zeros(self.config.ngram, labels.size(0), labels.size(1)).fill_(ignore_index)
+
+ for i in range(self.config.ngram):
+ if i > 0 and self.disable_ngram_loss:
+ break
+ expend_targets[i, :, :] = labels
+
+ logits = logits.transpose(0, 1).contiguous()
+ lprobs = nn.functional.log_softmax(
+ logits.view(-1, logits.size(-1)),
+ dim=-1,
+ dtype=torch.float32,
+ )
+
+ loss = nn.functional.nll_loss(lprobs, expend_targets.view(-1), reduction="mean")
+
+ if self.config.eps > 0.0:
+ smooth_loss = -lprobs.sum(dim=-1, keepdim=True)
+ non_masked_tokens = expend_targets.ne(ignore_index).view(-1)
+ smooth_loss = smooth_loss[non_masked_tokens]
+ smooth_loss = smooth_loss.mean()
+
+ eps_i = self.config.eps / lprobs.size(-1)
+ loss = (1.0 - self.config.eps) * loss + eps_i * smooth_loss
+
+ return loss
+
+ def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
+ return self._shift_right(labels)
+
+ @staticmethod
+ # Copied from transformers.models.bart.modeling_bart.BartForConditionalGeneration._reorder_cache
+ def _reorder_cache(past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ # cached cross_attention states don't have to be reordered -> they are always the same
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past[:2])
+ + layer_past[2:],
+ )
+ return reordered_past
+
+ def get_encoder(self):
+ return self.prophetnet.encoder
+
+ def get_decoder(self):
+ return self.prophetnet.decoder
+
+
+@add_start_docstrings(
+ "The standalone decoder part of the ProphetNetModel with a lm head on top. The model can be used for causal"
+ " language modeling.",
+ PROPHETNET_START_DOCSTRING,
+)
+class ProphetNetForCausalLM(ProphetNetPreTrainedModel, GenerationMixin):
+ _tied_weights_keys = [
+ "prophetnet.word_embeddings.weight",
+ "prophetnet.decoder.word_embeddings.weight",
+ "lm_head.weight",
+ ]
+
+ def __init__(self, config: ProphetNetConfig):
+ # set config for CLM
+ config = copy.deepcopy(config)
+ config.is_decoder = True
+ config.is_encoder_decoder = False
+ super().__init__(config)
+ self.prophetnet = ProphetNetDecoderWrapper(config)
+
+ self.padding_idx = config.pad_token_id
+ self.disable_ngram_loss = config.disable_ngram_loss
+
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.prophetnet.decoder.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.prophetnet.decoder.word_embeddings = value
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ def _tie_weights(self):
+ if self.config.tie_word_embeddings:
+ self._tie_or_clone_weights(self.prophetnet.decoder.word_embeddings, self.lm_head)
+
+ def set_decoder(self, decoder):
+ self.prophetnet.decoder = decoder
+
+ def get_decoder(self):
+ return self.prophetnet.decoder
+
+ @add_start_docstrings_to_model_forward(PROPHETNET_STANDALONE_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=ProphetNetDecoderLMOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, ProphetNetDecoderLMOutput]:
+ r"""
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
+ the model is configured as a decoder.
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
+ `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
+ ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, ProphetNetForCausalLM
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/prophetnet-large-uncased")
+ >>> model = ProphetNetForCausalLM.from_pretrained("microsoft/prophetnet-large-uncased")
+ >>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder."
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
+ >>> outputs = model(**inputs)
+
+ >>> logits = outputs.logits
+
+ >>> # Model can also be used with EncoderDecoder framework
+ >>> from transformers import BertTokenizer, EncoderDecoderModel, AutoTokenizer
+ >>> import torch
+
+ >>> tokenizer_enc = BertTokenizer.from_pretrained("google-bert/bert-large-uncased")
+ >>> tokenizer_dec = AutoTokenizer.from_pretrained("microsoft/prophetnet-large-uncased")
+ >>> model = EncoderDecoderModel.from_encoder_decoder_pretrained(
+ ... "google-bert/bert-large-uncased", "microsoft/prophetnet-large-uncased"
+ ... )
+
+ >>> ARTICLE = (
+ ... "the us state department said wednesday it had received no "
+ ... "formal word from bolivia that it was expelling the us ambassador there "
+ ... "but said the charges made against him are `` baseless ."
+ ... )
+ >>> input_ids = tokenizer_enc(ARTICLE, return_tensors="pt").input_ids
+ >>> labels = tokenizer_dec(
+ ... "us rejects charges against its ambassador in bolivia", return_tensors="pt"
+ ... ).input_ids
+ >>> outputs = model(input_ids=input_ids, decoder_input_ids=labels[:, :-1], labels=labels[:, 1:])
+
+ >>> loss = outputs.loss
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # decoder outputs consists of (dec_features, past_key_values, dec_hidden, dec_attn)
+ outputs = self.prophetnet.decoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ head_mask=head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ batch_size, sequence_length = input_ids.shape if input_ids is not None else inputs_embeds.shape[:2]
+
+ predicting_streams = outputs[1].view(batch_size, self.config.ngram, sequence_length, -1)
+ predict_logits = self.lm_head(predicting_streams)
+
+ logits = predict_logits[:, 0]
+ logits_ngram = predict_logits[:, 1:] if self.config.ngram > 1 else None
+
+ loss = None
+ if labels is not None:
+ loss = self._compute_loss(predict_logits, labels)
+
+ if not return_dict:
+ all_logits = tuple(v for v in [logits, logits_ngram] if v is not None)
+ return (loss,) + all_logits + outputs[2:] if loss is not None else all_logits + outputs[2:]
+ else:
+ return ProphetNetDecoderLMOutput(
+ loss=loss,
+ logits=logits,
+ logits_ngram=logits_ngram,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ hidden_states_ngram=outputs.hidden_states_ngram,
+ attentions=outputs.attentions,
+ ngram_attentions=outputs.ngram_attentions,
+ cross_attentions=outputs.cross_attentions,
+ )
+
+ def _compute_loss(self, logits, labels, ignore_index=-100):
+ expend_targets = labels.new_zeros(self.config.ngram, labels.size(0), labels.size(1)).fill_(ignore_index)
+
+ for i in range(self.config.ngram):
+ if i > 0 and self.disable_ngram_loss:
+ break
+ expend_targets[i, :, :] = labels
+
+ logits = logits.transpose(0, 1).contiguous()
+ lprobs = nn.functional.log_softmax(
+ logits.view(-1, logits.size(-1)),
+ dim=-1,
+ dtype=torch.float32,
+ )
+
+ loss = nn.functional.nll_loss(lprobs, expend_targets.view(-1), reduction="mean")
+
+ if self.config.eps > 0.0:
+ smooth_loss = -lprobs.sum(dim=-1, keepdim=True)
+ non_masked_tokens = expend_targets.ne(ignore_index).view(-1)
+ smooth_loss = smooth_loss[non_masked_tokens]
+ smooth_loss = smooth_loss.mean()
+
+ eps_i = self.config.eps / lprobs.size(-1)
+ loss = (1.0 - self.config.eps) * loss + eps_i * smooth_loss
+
+ return loss
+
+ def prepare_inputs_for_generation(
+ self,
+ input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ head_mask=None,
+ use_cache=None,
+ **kwargs,
+ ):
+ # Overwritten -- our tests complain if we use GenerationMixin.prepare_inputs_for_generation
+
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
+ if attention_mask is None:
+ attention_mask = input_ids.new_ones(input_ids.shape)
+
+ if past_key_values:
+ input_ids = input_ids[:, -1:]
+ # first step, decoder_cached_states are empty
+ return {
+ "input_ids": input_ids, # encoder_outputs is defined. input_ids not needed
+ "attention_mask": attention_mask,
+ "head_mask": head_mask,
+ "past_key_values": past_key_values,
+ "use_cache": use_cache,
+ }
+
+ @staticmethod
+ # Copied from transformers.models.bart.modeling_bart.BartForCausalLM._reorder_cache
+ def _reorder_cache(past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
+ )
+ return reordered_past
+
+
+class ProphetNetDecoderWrapper(ProphetNetPreTrainedModel):
+ """
+ This is a wrapper class, so that [`ProphetNetForCausalLM`] can correctly be loaded from pretrained prophetnet
+ classes.
+ """
+
+ def __init__(self, config: ProphetNetConfig):
+ super().__init__(config)
+
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
+ self.decoder = ProphetNetDecoder(config, word_embeddings=self.word_embeddings)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def _tie_weights(self):
+ self._tie_or_clone_weights(self.word_embeddings, self.decoder.get_input_embeddings())
+
+ def forward(self, *args, **kwargs):
+ return self.decoder(*args, **kwargs)
+
+
+__all__ = [
+ "ProphetNetDecoder",
+ "ProphetNetEncoder",
+ "ProphetNetForCausalLM",
+ "ProphetNetForConditionalGeneration",
+ "ProphetNetModel",
+ "ProphetNetPreTrainedModel",
+]
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/models/prophetnet/tokenization_prophetnet.py b/vlmpy310/lib/python3.10/site-packages/transformers/models/prophetnet/tokenization_prophetnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..276dbb8438f7fa1ee9680f6f0e357039deac5734
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/models/prophetnet/tokenization_prophetnet.py
@@ -0,0 +1,507 @@
+# coding=utf-8
+# Copyright 2020 The Microsoft Authors and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import collections
+import os
+import unicodedata
+from typing import Iterable, List, Optional, Tuple
+
+from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "prophetnet.tokenizer"}
+
+
+# Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize
+def whitespace_tokenize(text):
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
+ text = text.strip()
+ if not text:
+ return []
+ tokens = text.split()
+ return tokens
+
+
+# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
+class BasicTokenizer:
+ """
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
+
+ Args:
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ never_split (`Iterable`, *optional*):
+ Collection of tokens which will never be split during tokenization. Only has an effect when
+ `do_basic_tokenize=True`
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
+ Whether or not to tokenize Chinese characters.
+
+ This should likely be deactivated for Japanese (see this
+ [issue](https://github.com/huggingface/transformers/issues/328)).
+ strip_accents (`bool`, *optional*):
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
+ value for `lowercase` (as in the original BERT).
+ do_split_on_punc (`bool`, *optional*, defaults to `True`):
+ In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
+ the full context of the words, such as contractions.
+ """
+
+ def __init__(
+ self,
+ do_lower_case=True,
+ never_split=None,
+ tokenize_chinese_chars=True,
+ strip_accents=None,
+ do_split_on_punc=True,
+ ):
+ if never_split is None:
+ never_split = []
+ self.do_lower_case = do_lower_case
+ self.never_split = set(never_split)
+ self.tokenize_chinese_chars = tokenize_chinese_chars
+ self.strip_accents = strip_accents
+ self.do_split_on_punc = do_split_on_punc
+
+ def tokenize(self, text, never_split=None):
+ """
+ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
+
+ Args:
+ never_split (`List[str]`, *optional*)
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
+ """
+ # union() returns a new set by concatenating the two sets.
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
+ text = self._clean_text(text)
+
+ # This was added on November 1st, 2018 for the multilingual and Chinese
+ # models. This is also applied to the English models now, but it doesn't
+ # matter since the English models were not trained on any Chinese data
+ # and generally don't have any Chinese data in them (there are Chinese
+ # characters in the vocabulary because Wikipedia does have some Chinese
+ # words in the English Wikipedia.).
+ if self.tokenize_chinese_chars:
+ text = self._tokenize_chinese_chars(text)
+ # prevents treating the same character with different unicode codepoints as different characters
+ unicode_normalized_text = unicodedata.normalize("NFC", text)
+ orig_tokens = whitespace_tokenize(unicode_normalized_text)
+ split_tokens = []
+ for token in orig_tokens:
+ if token not in never_split:
+ if self.do_lower_case:
+ token = token.lower()
+ if self.strip_accents is not False:
+ token = self._run_strip_accents(token)
+ elif self.strip_accents:
+ token = self._run_strip_accents(token)
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
+
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
+ return output_tokens
+
+ def _run_strip_accents(self, text):
+ """Strips accents from a piece of text."""
+ text = unicodedata.normalize("NFD", text)
+ output = []
+ for char in text:
+ cat = unicodedata.category(char)
+ if cat == "Mn":
+ continue
+ output.append(char)
+ return "".join(output)
+
+ def _run_split_on_punc(self, text, never_split=None):
+ """Splits punctuation on a piece of text."""
+ if not self.do_split_on_punc or (never_split is not None and text in never_split):
+ return [text]
+ chars = list(text)
+ i = 0
+ start_new_word = True
+ output = []
+ while i < len(chars):
+ char = chars[i]
+ if _is_punctuation(char):
+ output.append([char])
+ start_new_word = True
+ else:
+ if start_new_word:
+ output.append([])
+ start_new_word = False
+ output[-1].append(char)
+ i += 1
+
+ return ["".join(x) for x in output]
+
+ def _tokenize_chinese_chars(self, text):
+ """Adds whitespace around any CJK character."""
+ output = []
+ for char in text:
+ cp = ord(char)
+ if self._is_chinese_char(cp):
+ output.append(" ")
+ output.append(char)
+ output.append(" ")
+ else:
+ output.append(char)
+ return "".join(output)
+
+ def _is_chinese_char(self, cp):
+ """Checks whether CP is the codepoint of a CJK character."""
+ # This defines a "chinese character" as anything in the CJK Unicode block:
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
+ #
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
+ # despite its name. The modern Korean Hangul alphabet is a different block,
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
+ # space-separated words, so they are not treated specially and handled
+ # like the all of the other languages.
+ if (
+ (cp >= 0x4E00 and cp <= 0x9FFF)
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
+ or (cp >= 0xF900 and cp <= 0xFAFF)
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
+ ): #
+ return True
+
+ return False
+
+ def _clean_text(self, text):
+ """Performs invalid character removal and whitespace cleanup on text."""
+ output = []
+ for char in text:
+ cp = ord(char)
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
+ continue
+ if _is_whitespace(char):
+ output.append(" ")
+ else:
+ output.append(char)
+ return "".join(output)
+
+
+# Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
+class WordpieceTokenizer:
+ """Runs WordPiece tokenization."""
+
+ def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
+ self.vocab = vocab
+ self.unk_token = unk_token
+ self.max_input_chars_per_word = max_input_chars_per_word
+
+ def tokenize(self, text):
+ """
+ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
+ tokenization using the given vocabulary.
+
+ For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
+
+ Args:
+ text: A single token or whitespace separated tokens. This should have
+ already been passed through *BasicTokenizer*.
+
+ Returns:
+ A list of wordpiece tokens.
+ """
+
+ output_tokens = []
+ for token in whitespace_tokenize(text):
+ chars = list(token)
+ if len(chars) > self.max_input_chars_per_word:
+ output_tokens.append(self.unk_token)
+ continue
+
+ is_bad = False
+ start = 0
+ sub_tokens = []
+ while start < len(chars):
+ end = len(chars)
+ cur_substr = None
+ while start < end:
+ substr = "".join(chars[start:end])
+ if start > 0:
+ substr = "##" + substr
+ if substr in self.vocab:
+ cur_substr = substr
+ break
+ end -= 1
+ if cur_substr is None:
+ is_bad = True
+ break
+ sub_tokens.append(cur_substr)
+ start = end
+
+ if is_bad:
+ output_tokens.append(self.unk_token)
+ else:
+ output_tokens.extend(sub_tokens)
+ return output_tokens
+
+
+def load_vocab(vocab_file):
+ """Loads a vocabulary file into a dictionary."""
+ vocab = collections.OrderedDict()
+ with open(vocab_file, "r", encoding="utf-8") as reader:
+ tokens = reader.readlines()
+ for index, token in enumerate(tokens):
+ token = token.rstrip("\n")
+ vocab[token] = index
+ return vocab
+
+
+class ProphetNetTokenizer(PreTrainedTokenizer):
+ r"""
+ Construct a ProphetNetTokenizer. Based on WordPiece.
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
+ this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ File containing the vocabulary.
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ do_basic_tokenize (`bool`, *optional*, defaults to `True`):
+ Whether or not to do basic tokenization before WordPiece.
+ never_split (`Iterable`, *optional*):
+ Collection of tokens which will never be split during tokenization. Only has an effect when
+ `do_basic_tokenize=True`
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ x_sep_token (`str`, *optional*, defaults to `"[X_SEP]"`):
+ Special second separator token, which can be generated by [`ProphetNetForConditionalGeneration`]. It is
+ used to separate bullet-point like sentences in summarization, *e.g.*.
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
+ The token used for padding, for example when batching sequences of different lengths.
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
+ Whether or not to tokenize Chinese characters.
+
+ This should likely be deactivated for Japanese (see this
+ [issue](https://github.com/huggingface/transformers/issues/328)).
+ strip_accents (`bool`, *optional*):
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
+ value for `lowercase` (as in the original BERT).
+ clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`):
+ Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like
+ extra spaces.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+
+ # first name has to correspond to main model input name
+ # to make sure `tokenizer.pad(...)` works correctly
+ # `ProphetNet` doesn't have `token_type_ids` as argument.
+ model_input_names: List[str] = ["input_ids", "attention_mask"]
+
+ def __init__(
+ self,
+ vocab_file: str,
+ do_lower_case: Optional[bool] = True,
+ do_basic_tokenize: Optional[bool] = True,
+ never_split: Optional[Iterable] = None,
+ unk_token: Optional[str] = "[UNK]",
+ sep_token: Optional[str] = "[SEP]",
+ x_sep_token: Optional[str] = "[X_SEP]",
+ pad_token: Optional[str] = "[PAD]",
+ mask_token: Optional[str] = "[MASK]",
+ tokenize_chinese_chars: Optional[bool] = True,
+ strip_accents: Optional[bool] = None,
+ clean_up_tokenization_spaces: bool = True,
+ **kwargs,
+ ):
+ if not os.path.isfile(vocab_file):
+ raise ValueError(
+ f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
+ " model use `tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
+ )
+ self.vocab = load_vocab(vocab_file)
+ self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
+ self.do_basic_tokenize = do_basic_tokenize
+ if do_basic_tokenize:
+ self.basic_tokenizer = BasicTokenizer(
+ do_lower_case=do_lower_case,
+ never_split=never_split,
+ tokenize_chinese_chars=tokenize_chinese_chars,
+ strip_accents=strip_accents,
+ )
+ self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
+
+ super().__init__(
+ do_lower_case=do_lower_case,
+ do_basic_tokenize=do_basic_tokenize,
+ never_split=never_split,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ x_sep_token=x_sep_token,
+ pad_token=pad_token,
+ mask_token=mask_token,
+ tokenize_chinese_chars=tokenize_chinese_chars,
+ strip_accents=strip_accents,
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
+ **kwargs,
+ )
+
+ @property
+ def vocab_size(self):
+ return len(self.vocab)
+
+ def get_vocab(self):
+ return dict(self.vocab, **self.added_tokens_encoder)
+
+ def _tokenize(self, text):
+ split_tokens = []
+ if self.do_basic_tokenize:
+ for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):
+ # If the token is part of the never_split set
+ if token in self.basic_tokenizer.never_split:
+ split_tokens.append(token)
+ else:
+ split_tokens += self.wordpiece_tokenizer.tokenize(token)
+ else:
+ split_tokens = self.wordpiece_tokenizer.tokenize(text)
+ return split_tokens
+
+ def _convert_token_to_id(self, token: str):
+ """Converts a token (str) in an id using the vocab."""
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
+
+ def _convert_id_to_token(self, index: int):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ return self.ids_to_tokens.get(index, self.unk_token)
+
+ def convert_tokens_to_string(self, tokens: str):
+ """Converts a sequence of tokens (string) in a single string."""
+ out_string = " ".join(tokens).replace(" ##", "").strip()
+ return out_string
+
+ def get_special_tokens_mask(
+ self,
+ token_ids_0: List[int],
+ token_ids_1: Optional[List[int]] = None,
+ already_has_special_tokens: Optional[bool] = False,
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ if token_ids_1 is None:
+ return ([0] * len(token_ids_0)) + [1]
+ return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A ProphetNet
+ sequence pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ sep = [self.sep_token_id]
+ if token_ids_1 is None:
+ return len(token_ids_0 + sep) * [0]
+ return len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ index = 0
+ if os.path.isdir(save_directory):
+ vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+ else:
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
+ with open(vocab_file, "w", encoding="utf-8") as writer:
+ for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
+ if index != token_index:
+ logger.warning(
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
+ " Please check that the vocabulary is not corrupted!"
+ )
+ index = token_index
+ writer.write(token + "\n")
+ index += 1
+ return (vocab_file,)
+
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A BERT sequence has the following format:
+
+ - single sequence: `[CLS] X [SEP]`
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ if token_ids_1 is None:
+ return token_ids_0 + [self.sep_token_id]
+ sep = [self.sep_token_id]
+ return token_ids_0 + sep + token_ids_1 + sep
+
+
+__all__ = ["ProphetNetTokenizer"]