ZTWHHH commited on
Commit
013cf23
·
verified ·
1 Parent(s): 0ed4a83

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. vlmpy310/lib/python3.10/site-packages/transformers/models/bert/__pycache__/configuration_bert.cpython-310.pyc +0 -0
  2. vlmpy310/lib/python3.10/site-packages/transformers/models/camembert/__init__.py +30 -0
  3. vlmpy310/lib/python3.10/site-packages/transformers/models/camembert/modeling_camembert.py +1712 -0
  4. vlmpy310/lib/python3.10/site-packages/transformers/models/decision_transformer/__init__.py +27 -0
  5. vlmpy310/lib/python3.10/site-packages/transformers/models/decision_transformer/__pycache__/__init__.cpython-310.pyc +0 -0
  6. vlmpy310/lib/python3.10/site-packages/transformers/models/decision_transformer/__pycache__/configuration_decision_transformer.cpython-310.pyc +0 -0
  7. vlmpy310/lib/python3.10/site-packages/transformers/models/decision_transformer/__pycache__/modeling_decision_transformer.cpython-310.pyc +0 -0
  8. vlmpy310/lib/python3.10/site-packages/transformers/models/decision_transformer/configuration_decision_transformer.py +157 -0
  9. vlmpy310/lib/python3.10/site-packages/transformers/models/decision_transformer/modeling_decision_transformer.py +963 -0
  10. vlmpy310/lib/python3.10/site-packages/transformers/models/ernie/__init__.py +27 -0
  11. vlmpy310/lib/python3.10/site-packages/transformers/models/ernie/__pycache__/__init__.cpython-310.pyc +0 -0
  12. vlmpy310/lib/python3.10/site-packages/transformers/models/ernie/__pycache__/configuration_ernie.cpython-310.pyc +0 -0
  13. vlmpy310/lib/python3.10/site-packages/transformers/models/ernie/__pycache__/modeling_ernie.cpython-310.pyc +0 -0
  14. vlmpy310/lib/python3.10/site-packages/transformers/models/ernie/configuration_ernie.py +163 -0
  15. vlmpy310/lib/python3.10/site-packages/transformers/models/ernie/modeling_ernie.py +1815 -0
  16. vlmpy310/lib/python3.10/site-packages/transformers/models/longformer/__init__.py +30 -0
  17. vlmpy310/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/__init__.cpython-310.pyc +0 -0
  18. vlmpy310/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/configuration_longformer.cpython-310.pyc +0 -0
  19. vlmpy310/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/convert_longformer_original_pytorch_lightning_to_pytorch.cpython-310.pyc +0 -0
  20. vlmpy310/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/modeling_longformer.cpython-310.pyc +0 -0
  21. vlmpy310/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/modeling_tf_longformer.cpython-310.pyc +0 -0
  22. vlmpy310/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/tokenization_longformer.cpython-310.pyc +0 -0
  23. vlmpy310/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/tokenization_longformer_fast.cpython-310.pyc +0 -0
  24. vlmpy310/lib/python3.10/site-packages/transformers/models/longformer/configuration_longformer.py +204 -0
  25. vlmpy310/lib/python3.10/site-packages/transformers/models/longformer/convert_longformer_original_pytorch_lightning_to_pytorch.py +85 -0
  26. vlmpy310/lib/python3.10/site-packages/transformers/models/longformer/modeling_longformer.py +0 -0
  27. vlmpy310/lib/python3.10/site-packages/transformers/models/longformer/modeling_tf_longformer.py +0 -0
  28. vlmpy310/lib/python3.10/site-packages/transformers/models/longformer/tokenization_longformer.py +402 -0
  29. vlmpy310/lib/python3.10/site-packages/transformers/models/longformer/tokenization_longformer_fast.py +265 -0
  30. vlmpy310/lib/python3.10/site-packages/transformers/models/musicgen/__init__.py +28 -0
  31. vlmpy310/lib/python3.10/site-packages/transformers/models/musicgen/__pycache__/__init__.cpython-310.pyc +0 -0
  32. vlmpy310/lib/python3.10/site-packages/transformers/models/musicgen/__pycache__/configuration_musicgen.cpython-310.pyc +0 -0
  33. vlmpy310/lib/python3.10/site-packages/transformers/models/musicgen/__pycache__/convert_musicgen_transformers.cpython-310.pyc +0 -0
  34. vlmpy310/lib/python3.10/site-packages/transformers/models/musicgen/__pycache__/modeling_musicgen.cpython-310.pyc +0 -0
  35. vlmpy310/lib/python3.10/site-packages/transformers/models/musicgen/__pycache__/processing_musicgen.cpython-310.pyc +0 -0
  36. vlmpy310/lib/python3.10/site-packages/transformers/models/musicgen/configuration_musicgen.py +247 -0
  37. vlmpy310/lib/python3.10/site-packages/transformers/models/musicgen/convert_musicgen_transformers.py +236 -0
  38. vlmpy310/lib/python3.10/site-packages/transformers/models/musicgen/modeling_musicgen.py +0 -0
  39. vlmpy310/lib/python3.10/site-packages/transformers/models/musicgen/processing_musicgen.py +144 -0
  40. vlmpy310/lib/python3.10/site-packages/transformers/models/paligemma/__init__.py +28 -0
  41. vlmpy310/lib/python3.10/site-packages/transformers/models/paligemma/configuration_paligemma.py +150 -0
  42. vlmpy310/lib/python3.10/site-packages/transformers/models/paligemma/modeling_paligemma.py +618 -0
  43. vlmpy310/lib/python3.10/site-packages/transformers/models/starcoder2/__init__.py +27 -0
  44. vlmpy310/lib/python3.10/site-packages/transformers/models/starcoder2/__pycache__/__init__.cpython-310.pyc +0 -0
  45. vlmpy310/lib/python3.10/site-packages/transformers/models/starcoder2/__pycache__/configuration_starcoder2.cpython-310.pyc +0 -0
  46. vlmpy310/lib/python3.10/site-packages/transformers/models/starcoder2/__pycache__/modeling_starcoder2.cpython-310.pyc +0 -0
  47. vlmpy310/lib/python3.10/site-packages/transformers/models/starcoder2/__pycache__/modular_starcoder2.cpython-310.pyc +0 -0
  48. vlmpy310/lib/python3.10/site-packages/transformers/models/starcoder2/configuration_starcoder2.py +202 -0
  49. vlmpy310/lib/python3.10/site-packages/transformers/models/starcoder2/modeling_starcoder2.py +1063 -0
  50. vlmpy310/lib/python3.10/site-packages/transformers/models/starcoder2/modular_starcoder2.py +274 -0
vlmpy310/lib/python3.10/site-packages/transformers/models/bert/__pycache__/configuration_bert.cpython-310.pyc ADDED
Binary file (6.49 kB). View file
 
vlmpy310/lib/python3.10/site-packages/transformers/models/camembert/__init__.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import _LazyModule
17
+ from ...utils.import_utils import define_import_structure
18
+
19
+
20
+ if TYPE_CHECKING:
21
+ from .configuration_camembert import *
22
+ from .modeling_camembert import *
23
+ from .modeling_tf_camembert import *
24
+ from .tokenization_camembert import *
25
+ from .tokenization_camembert_fast import *
26
+ else:
27
+ import sys
28
+
29
+ _file = globals()["__file__"]
30
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
vlmpy310/lib/python3.10/site-packages/transformers/models/camembert/modeling_camembert.py ADDED
@@ -0,0 +1,1712 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2019 Inria, Facebook AI Research and the HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """PyTorch CamemBERT model."""
17
+
18
+ import math
19
+ from typing import List, Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.utils.checkpoint
23
+ from packaging import version
24
+ from torch import nn
25
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
26
+
27
+ from ...activations import ACT2FN, gelu
28
+ from ...generation import GenerationMixin
29
+ from ...modeling_attn_mask_utils import (
30
+ _prepare_4d_attention_mask_for_sdpa,
31
+ _prepare_4d_causal_attention_mask_for_sdpa,
32
+ )
33
+ from ...modeling_outputs import (
34
+ BaseModelOutputWithPastAndCrossAttentions,
35
+ BaseModelOutputWithPoolingAndCrossAttentions,
36
+ CausalLMOutputWithCrossAttentions,
37
+ MaskedLMOutput,
38
+ MultipleChoiceModelOutput,
39
+ QuestionAnsweringModelOutput,
40
+ SequenceClassifierOutput,
41
+ TokenClassifierOutput,
42
+ )
43
+ from ...modeling_utils import PreTrainedModel
44
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
45
+ from ...utils import (
46
+ add_code_sample_docstrings,
47
+ add_start_docstrings,
48
+ add_start_docstrings_to_model_forward,
49
+ get_torch_version,
50
+ logging,
51
+ replace_return_docstrings,
52
+ )
53
+ from .configuration_camembert import CamembertConfig
54
+
55
+
56
+ logger = logging.get_logger(__name__)
57
+
58
+ _CHECKPOINT_FOR_DOC = "almanach/camembert-base"
59
+ _CONFIG_FOR_DOC = "CamembertConfig"
60
+
61
+
62
+ CAMEMBERT_START_DOCSTRING = r"""
63
+
64
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
65
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
66
+ etc.)
67
+
68
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
69
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
70
+ and behavior.
71
+
72
+ Parameters:
73
+ config ([`CamembertConfig`]): Model configuration class with all the parameters of the
74
+ model. Initializing with a config file does not load the weights associated with the model, only the
75
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
76
+ """
77
+
78
+
79
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaEmbeddings with Roberta->Camembert
80
+ class CamembertEmbeddings(nn.Module):
81
+ """
82
+ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
83
+ """
84
+
85
+ # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__
86
+ def __init__(self, config):
87
+ super().__init__()
88
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
89
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
90
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
91
+
92
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
93
+ # any TensorFlow checkpoint file
94
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
95
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
96
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
97
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
98
+ self.register_buffer(
99
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
100
+ )
101
+ self.register_buffer(
102
+ "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
103
+ )
104
+
105
+ # End copy
106
+ self.padding_idx = config.pad_token_id
107
+ self.position_embeddings = nn.Embedding(
108
+ config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
109
+ )
110
+
111
+ def forward(
112
+ self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
113
+ ):
114
+ if position_ids is None:
115
+ if input_ids is not None:
116
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
117
+ position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length)
118
+ else:
119
+ position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
120
+
121
+ if input_ids is not None:
122
+ input_shape = input_ids.size()
123
+ else:
124
+ input_shape = inputs_embeds.size()[:-1]
125
+
126
+ seq_length = input_shape[1]
127
+
128
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
129
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
130
+ # issue #5664
131
+ if token_type_ids is None:
132
+ if hasattr(self, "token_type_ids"):
133
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
134
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
135
+ token_type_ids = buffered_token_type_ids_expanded
136
+ else:
137
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
138
+
139
+ if inputs_embeds is None:
140
+ inputs_embeds = self.word_embeddings(input_ids)
141
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
142
+
143
+ embeddings = inputs_embeds + token_type_embeddings
144
+ if self.position_embedding_type == "absolute":
145
+ position_embeddings = self.position_embeddings(position_ids)
146
+ embeddings += position_embeddings
147
+ embeddings = self.LayerNorm(embeddings)
148
+ embeddings = self.dropout(embeddings)
149
+ return embeddings
150
+
151
+ def create_position_ids_from_inputs_embeds(self, inputs_embeds):
152
+ """
153
+ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
154
+
155
+ Args:
156
+ inputs_embeds: torch.Tensor
157
+
158
+ Returns: torch.Tensor
159
+ """
160
+ input_shape = inputs_embeds.size()[:-1]
161
+ sequence_length = input_shape[1]
162
+
163
+ position_ids = torch.arange(
164
+ self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
165
+ )
166
+ return position_ids.unsqueeze(0).expand(input_shape)
167
+
168
+
169
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaSelfAttention with Roberta->Camembert
170
+ class CamembertSelfAttention(nn.Module):
171
+ def __init__(self, config, position_embedding_type=None):
172
+ super().__init__()
173
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
174
+ raise ValueError(
175
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
176
+ f"heads ({config.num_attention_heads})"
177
+ )
178
+
179
+ self.num_attention_heads = config.num_attention_heads
180
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
181
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
182
+
183
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
184
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
185
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
186
+
187
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
188
+ self.position_embedding_type = position_embedding_type or getattr(
189
+ config, "position_embedding_type", "absolute"
190
+ )
191
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
192
+ self.max_position_embeddings = config.max_position_embeddings
193
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
194
+
195
+ self.is_decoder = config.is_decoder
196
+
197
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
198
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
199
+ x = x.view(new_x_shape)
200
+ return x.permute(0, 2, 1, 3)
201
+
202
+ def forward(
203
+ self,
204
+ hidden_states: torch.Tensor,
205
+ attention_mask: Optional[torch.FloatTensor] = None,
206
+ head_mask: Optional[torch.FloatTensor] = None,
207
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
208
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
209
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
210
+ output_attentions: Optional[bool] = False,
211
+ ) -> Tuple[torch.Tensor]:
212
+ mixed_query_layer = self.query(hidden_states)
213
+
214
+ # If this is instantiated as a cross-attention module, the keys
215
+ # and values come from an encoder; the attention mask needs to be
216
+ # such that the encoder's padding tokens are not attended to.
217
+ is_cross_attention = encoder_hidden_states is not None
218
+
219
+ if is_cross_attention and past_key_value is not None:
220
+ # reuse k,v, cross_attentions
221
+ key_layer = past_key_value[0]
222
+ value_layer = past_key_value[1]
223
+ attention_mask = encoder_attention_mask
224
+ elif is_cross_attention:
225
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
226
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
227
+ attention_mask = encoder_attention_mask
228
+ elif past_key_value is not None:
229
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
230
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
231
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
232
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
233
+ else:
234
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
235
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
236
+
237
+ query_layer = self.transpose_for_scores(mixed_query_layer)
238
+
239
+ use_cache = past_key_value is not None
240
+ if self.is_decoder:
241
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
242
+ # Further calls to cross_attention layer can then reuse all cross-attention
243
+ # key/value_states (first "if" case)
244
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
245
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
246
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
247
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
248
+ past_key_value = (key_layer, value_layer)
249
+
250
+ # Take the dot product between "query" and "key" to get the raw attention scores.
251
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
252
+
253
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
254
+ query_length, key_length = query_layer.shape[2], key_layer.shape[2]
255
+ if use_cache:
256
+ position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
257
+ -1, 1
258
+ )
259
+ else:
260
+ position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
261
+ position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
262
+ distance = position_ids_l - position_ids_r
263
+
264
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
265
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
266
+
267
+ if self.position_embedding_type == "relative_key":
268
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
269
+ attention_scores = attention_scores + relative_position_scores
270
+ elif self.position_embedding_type == "relative_key_query":
271
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
272
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
273
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
274
+
275
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
276
+ if attention_mask is not None:
277
+ # Apply the attention mask is (precomputed for all layers in CamembertModel forward() function)
278
+ attention_scores = attention_scores + attention_mask
279
+
280
+ # Normalize the attention scores to probabilities.
281
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
282
+
283
+ # This is actually dropping out entire tokens to attend to, which might
284
+ # seem a bit unusual, but is taken from the original Transformer paper.
285
+ attention_probs = self.dropout(attention_probs)
286
+
287
+ # Mask heads if we want to
288
+ if head_mask is not None:
289
+ attention_probs = attention_probs * head_mask
290
+
291
+ context_layer = torch.matmul(attention_probs, value_layer)
292
+
293
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
294
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
295
+ context_layer = context_layer.view(new_context_layer_shape)
296
+
297
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
298
+
299
+ if self.is_decoder:
300
+ outputs = outputs + (past_key_value,)
301
+ return outputs
302
+
303
+
304
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaSdpaSelfAttention with Roberta->Camembert
305
+ class CamembertSdpaSelfAttention(CamembertSelfAttention):
306
+ def __init__(self, config, position_embedding_type=None):
307
+ super().__init__(config, position_embedding_type=position_embedding_type)
308
+ self.dropout_prob = config.attention_probs_dropout_prob
309
+ self.require_contiguous_qkv = version.parse(get_torch_version()) < version.parse("2.2.0")
310
+
311
+ # Adapted from CamembertSelfAttention
312
+ def forward(
313
+ self,
314
+ hidden_states: torch.Tensor,
315
+ attention_mask: Optional[torch.Tensor] = None,
316
+ head_mask: Optional[torch.FloatTensor] = None,
317
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
318
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
319
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
320
+ output_attentions: Optional[bool] = False,
321
+ ) -> Tuple[torch.Tensor]:
322
+ if self.position_embedding_type != "absolute" or output_attentions or head_mask is not None:
323
+ # TODO: Improve this warning with e.g. `model.config._attn_implementation = "manual"` once implemented.
324
+ logger.warning_once(
325
+ "CamembertSdpaSelfAttention is used but `torch.nn.functional.scaled_dot_product_attention` does not support "
326
+ "non-absolute `position_embedding_type` or `output_attentions=True` or `head_mask`. Falling back to "
327
+ "the manual attention implementation, but specifying the manual implementation will be required from "
328
+ "Transformers version v5.0.0 onwards. This warning can be removed using the argument "
329
+ '`attn_implementation="eager"` when loading the model.'
330
+ )
331
+ return super().forward(
332
+ hidden_states,
333
+ attention_mask,
334
+ head_mask,
335
+ encoder_hidden_states,
336
+ encoder_attention_mask,
337
+ past_key_value,
338
+ output_attentions,
339
+ )
340
+
341
+ bsz, tgt_len, _ = hidden_states.size()
342
+
343
+ query_layer = self.transpose_for_scores(self.query(hidden_states))
344
+
345
+ # If this is instantiated as a cross-attention module, the keys and values come from an encoder; the attention
346
+ # mask needs to be such that the encoder's padding tokens are not attended to.
347
+ is_cross_attention = encoder_hidden_states is not None
348
+
349
+ current_states = encoder_hidden_states if is_cross_attention else hidden_states
350
+ attention_mask = encoder_attention_mask if is_cross_attention else attention_mask
351
+
352
+ # Check `seq_length` of `past_key_value` == `len(current_states)` to support prefix tuning
353
+ if is_cross_attention and past_key_value and past_key_value[0].shape[2] == current_states.shape[1]:
354
+ key_layer, value_layer = past_key_value
355
+ else:
356
+ key_layer = self.transpose_for_scores(self.key(current_states))
357
+ value_layer = self.transpose_for_scores(self.value(current_states))
358
+ if past_key_value is not None and not is_cross_attention:
359
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
360
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
361
+
362
+ if self.is_decoder:
363
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
364
+ # Further calls to cross_attention layer can then reuse all cross-attention
365
+ # key/value_states (first "if" case)
366
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
367
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
368
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
369
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
370
+ past_key_value = (key_layer, value_layer)
371
+
372
+ # SDPA with memory-efficient backend is broken in torch==2.1.2 when using non-contiguous inputs and a custom
373
+ # attn_mask, so we need to call `.contiguous()` here. This was fixed in torch==2.2.0.
374
+ # Reference: https://github.com/pytorch/pytorch/issues/112577
375
+ if self.require_contiguous_qkv and query_layer.device.type == "cuda" and attention_mask is not None:
376
+ query_layer = query_layer.contiguous()
377
+ key_layer = key_layer.contiguous()
378
+ value_layer = value_layer.contiguous()
379
+
380
+ # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
381
+ # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
382
+ # The tgt_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create
383
+ # a causal mask in case tgt_len == 1.
384
+ is_causal = (
385
+ True if self.is_decoder and not is_cross_attention and attention_mask is None and tgt_len > 1 else False
386
+ )
387
+
388
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
389
+ query_layer,
390
+ key_layer,
391
+ value_layer,
392
+ attn_mask=attention_mask,
393
+ dropout_p=self.dropout_prob if self.training else 0.0,
394
+ is_causal=is_causal,
395
+ )
396
+
397
+ attn_output = attn_output.transpose(1, 2)
398
+ attn_output = attn_output.reshape(bsz, tgt_len, self.all_head_size)
399
+
400
+ outputs = (attn_output,)
401
+ if self.is_decoder:
402
+ outputs = outputs + (past_key_value,)
403
+ return outputs
404
+
405
+
406
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaSelfOutput with Roberta->Camembert
407
+ class CamembertSelfOutput(nn.Module):
408
+ def __init__(self, config):
409
+ super().__init__()
410
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
411
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
412
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
413
+
414
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
415
+ hidden_states = self.dense(hidden_states)
416
+ hidden_states = self.dropout(hidden_states)
417
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
418
+ return hidden_states
419
+
420
+
421
+ CAMEMBERT_SELF_ATTENTION_CLASSES = {
422
+ "eager": CamembertSelfAttention,
423
+ "sdpa": CamembertSdpaSelfAttention,
424
+ }
425
+
426
+
427
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaAttention with Roberta->Camembert,ROBERTA->CAMEMBERT
428
+ class CamembertAttention(nn.Module):
429
+ def __init__(self, config, position_embedding_type=None):
430
+ super().__init__()
431
+ self.self = CAMEMBERT_SELF_ATTENTION_CLASSES[config._attn_implementation](
432
+ config, position_embedding_type=position_embedding_type
433
+ )
434
+ self.output = CamembertSelfOutput(config)
435
+ self.pruned_heads = set()
436
+
437
+ def prune_heads(self, heads):
438
+ if len(heads) == 0:
439
+ return
440
+ heads, index = find_pruneable_heads_and_indices(
441
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
442
+ )
443
+
444
+ # Prune linear layers
445
+ self.self.query = prune_linear_layer(self.self.query, index)
446
+ self.self.key = prune_linear_layer(self.self.key, index)
447
+ self.self.value = prune_linear_layer(self.self.value, index)
448
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
449
+
450
+ # Update hyper params and store pruned heads
451
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
452
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
453
+ self.pruned_heads = self.pruned_heads.union(heads)
454
+
455
+ def forward(
456
+ self,
457
+ hidden_states: torch.Tensor,
458
+ attention_mask: Optional[torch.FloatTensor] = None,
459
+ head_mask: Optional[torch.FloatTensor] = None,
460
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
461
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
462
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
463
+ output_attentions: Optional[bool] = False,
464
+ ) -> Tuple[torch.Tensor]:
465
+ self_outputs = self.self(
466
+ hidden_states,
467
+ attention_mask,
468
+ head_mask,
469
+ encoder_hidden_states,
470
+ encoder_attention_mask,
471
+ past_key_value,
472
+ output_attentions,
473
+ )
474
+ attention_output = self.output(self_outputs[0], hidden_states)
475
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
476
+ return outputs
477
+
478
+
479
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Roberta->Camembert
480
+ class CamembertIntermediate(nn.Module):
481
+ def __init__(self, config):
482
+ super().__init__()
483
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
484
+ if isinstance(config.hidden_act, str):
485
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
486
+ else:
487
+ self.intermediate_act_fn = config.hidden_act
488
+
489
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
490
+ hidden_states = self.dense(hidden_states)
491
+ hidden_states = self.intermediate_act_fn(hidden_states)
492
+ return hidden_states
493
+
494
+
495
+ # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->Roberta->Camembert
496
+ class CamembertOutput(nn.Module):
497
+ def __init__(self, config):
498
+ super().__init__()
499
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
500
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
501
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
502
+
503
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
504
+ hidden_states = self.dense(hidden_states)
505
+ hidden_states = self.dropout(hidden_states)
506
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
507
+ return hidden_states
508
+
509
+
510
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaLayer with Roberta->Camembert
511
+ class CamembertLayer(nn.Module):
512
+ def __init__(self, config):
513
+ super().__init__()
514
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
515
+ self.seq_len_dim = 1
516
+ self.attention = CamembertAttention(config)
517
+ self.is_decoder = config.is_decoder
518
+ self.add_cross_attention = config.add_cross_attention
519
+ if self.add_cross_attention:
520
+ if not self.is_decoder:
521
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
522
+ self.crossattention = CamembertAttention(config, position_embedding_type="absolute")
523
+ self.intermediate = CamembertIntermediate(config)
524
+ self.output = CamembertOutput(config)
525
+
526
+ def forward(
527
+ self,
528
+ hidden_states: torch.Tensor,
529
+ attention_mask: Optional[torch.FloatTensor] = None,
530
+ head_mask: Optional[torch.FloatTensor] = None,
531
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
532
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
533
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
534
+ output_attentions: Optional[bool] = False,
535
+ ) -> Tuple[torch.Tensor]:
536
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
537
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
538
+ self_attention_outputs = self.attention(
539
+ hidden_states,
540
+ attention_mask,
541
+ head_mask,
542
+ output_attentions=output_attentions,
543
+ past_key_value=self_attn_past_key_value,
544
+ )
545
+ attention_output = self_attention_outputs[0]
546
+
547
+ # if decoder, the last output is tuple of self-attn cache
548
+ if self.is_decoder:
549
+ outputs = self_attention_outputs[1:-1]
550
+ present_key_value = self_attention_outputs[-1]
551
+ else:
552
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
553
+
554
+ cross_attn_present_key_value = None
555
+ if self.is_decoder and encoder_hidden_states is not None:
556
+ if not hasattr(self, "crossattention"):
557
+ raise ValueError(
558
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
559
+ " by setting `config.add_cross_attention=True`"
560
+ )
561
+
562
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
563
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
564
+ cross_attention_outputs = self.crossattention(
565
+ attention_output,
566
+ attention_mask,
567
+ head_mask,
568
+ encoder_hidden_states,
569
+ encoder_attention_mask,
570
+ cross_attn_past_key_value,
571
+ output_attentions,
572
+ )
573
+ attention_output = cross_attention_outputs[0]
574
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
575
+
576
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
577
+ cross_attn_present_key_value = cross_attention_outputs[-1]
578
+ present_key_value = present_key_value + cross_attn_present_key_value
579
+
580
+ layer_output = apply_chunking_to_forward(
581
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
582
+ )
583
+ outputs = (layer_output,) + outputs
584
+
585
+ # if decoder, return the attn key/values as the last output
586
+ if self.is_decoder:
587
+ outputs = outputs + (present_key_value,)
588
+
589
+ return outputs
590
+
591
+ def feed_forward_chunk(self, attention_output):
592
+ intermediate_output = self.intermediate(attention_output)
593
+ layer_output = self.output(intermediate_output, attention_output)
594
+ return layer_output
595
+
596
+
597
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaEncoder with Roberta->Camembert
598
+ class CamembertEncoder(nn.Module):
599
+ def __init__(self, config):
600
+ super().__init__()
601
+ self.config = config
602
+ self.layer = nn.ModuleList([CamembertLayer(config) for _ in range(config.num_hidden_layers)])
603
+ self.gradient_checkpointing = False
604
+
605
+ def forward(
606
+ self,
607
+ hidden_states: torch.Tensor,
608
+ attention_mask: Optional[torch.FloatTensor] = None,
609
+ head_mask: Optional[torch.FloatTensor] = None,
610
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
611
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
612
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
613
+ use_cache: Optional[bool] = None,
614
+ output_attentions: Optional[bool] = False,
615
+ output_hidden_states: Optional[bool] = False,
616
+ return_dict: Optional[bool] = True,
617
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
618
+ all_hidden_states = () if output_hidden_states else None
619
+ all_self_attentions = () if output_attentions else None
620
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
621
+
622
+ if self.gradient_checkpointing and self.training:
623
+ if use_cache:
624
+ logger.warning_once(
625
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
626
+ )
627
+ use_cache = False
628
+
629
+ next_decoder_cache = () if use_cache else None
630
+ for i, layer_module in enumerate(self.layer):
631
+ if output_hidden_states:
632
+ all_hidden_states = all_hidden_states + (hidden_states,)
633
+
634
+ layer_head_mask = head_mask[i] if head_mask is not None else None
635
+ past_key_value = past_key_values[i] if past_key_values is not None else None
636
+
637
+ if self.gradient_checkpointing and self.training:
638
+ layer_outputs = self._gradient_checkpointing_func(
639
+ layer_module.__call__,
640
+ hidden_states,
641
+ attention_mask,
642
+ layer_head_mask,
643
+ encoder_hidden_states,
644
+ encoder_attention_mask,
645
+ past_key_value,
646
+ output_attentions,
647
+ )
648
+ else:
649
+ layer_outputs = layer_module(
650
+ hidden_states,
651
+ attention_mask,
652
+ layer_head_mask,
653
+ encoder_hidden_states,
654
+ encoder_attention_mask,
655
+ past_key_value,
656
+ output_attentions,
657
+ )
658
+
659
+ hidden_states = layer_outputs[0]
660
+ if use_cache:
661
+ next_decoder_cache += (layer_outputs[-1],)
662
+ if output_attentions:
663
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
664
+ if self.config.add_cross_attention:
665
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
666
+
667
+ if output_hidden_states:
668
+ all_hidden_states = all_hidden_states + (hidden_states,)
669
+
670
+ if not return_dict:
671
+ return tuple(
672
+ v
673
+ for v in [
674
+ hidden_states,
675
+ next_decoder_cache,
676
+ all_hidden_states,
677
+ all_self_attentions,
678
+ all_cross_attentions,
679
+ ]
680
+ if v is not None
681
+ )
682
+ return BaseModelOutputWithPastAndCrossAttentions(
683
+ last_hidden_state=hidden_states,
684
+ past_key_values=next_decoder_cache,
685
+ hidden_states=all_hidden_states,
686
+ attentions=all_self_attentions,
687
+ cross_attentions=all_cross_attentions,
688
+ )
689
+
690
+
691
+ # Copied from transformers.models.bert.modeling_bert.BertPooler
692
+ class CamembertPooler(nn.Module):
693
+ def __init__(self, config):
694
+ super().__init__()
695
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
696
+ self.activation = nn.Tanh()
697
+
698
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
699
+ # We "pool" the model by simply taking the hidden state corresponding
700
+ # to the first token.
701
+ first_token_tensor = hidden_states[:, 0]
702
+ pooled_output = self.dense(first_token_tensor)
703
+ pooled_output = self.activation(pooled_output)
704
+ return pooled_output
705
+
706
+
707
+ class CamembertPreTrainedModel(PreTrainedModel):
708
+ """
709
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
710
+ models.
711
+ """
712
+
713
+ config_class = CamembertConfig
714
+ base_model_prefix = "roberta"
715
+ supports_gradient_checkpointing = True
716
+ _supports_sdpa = True
717
+
718
+ # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights
719
+ def _init_weights(self, module):
720
+ """Initialize the weights"""
721
+ if isinstance(module, nn.Linear):
722
+ # Slightly different from the TF version which uses truncated_normal for initialization
723
+ # cf https://github.com/pytorch/pytorch/pull/5617
724
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
725
+ if module.bias is not None:
726
+ module.bias.data.zero_()
727
+ elif isinstance(module, nn.Embedding):
728
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
729
+ if module.padding_idx is not None:
730
+ module.weight.data[module.padding_idx].zero_()
731
+ elif isinstance(module, nn.LayerNorm):
732
+ module.bias.data.zero_()
733
+ module.weight.data.fill_(1.0)
734
+
735
+
736
+ CAMEMBERT_INPUTS_DOCSTRING = r"""
737
+ Args:
738
+ input_ids (`torch.LongTensor` of shape `({0})`):
739
+ Indices of input sequence tokens in the vocabulary.
740
+
741
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
742
+ [`PreTrainedTokenizer.__call__`] for details.
743
+
744
+ [What are input IDs?](../glossary#input-ids)
745
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
746
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
747
+
748
+ - 1 for tokens that are **not masked**,
749
+ - 0 for tokens that are **masked**.
750
+
751
+ [What are attention masks?](../glossary#attention-mask)
752
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
753
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
754
+ 1]`:
755
+
756
+ - 0 corresponds to a *sentence A* token,
757
+ - 1 corresponds to a *sentence B* token.
758
+
759
+ [What are token type IDs?](../glossary#token-type-ids)
760
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
761
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
762
+ config.max_position_embeddings - 1]`.
763
+
764
+ [What are position IDs?](../glossary#position-ids)
765
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
766
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
767
+
768
+ - 1 indicates the head is **not masked**,
769
+ - 0 indicates the head is **masked**.
770
+
771
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
772
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
773
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
774
+ model's internal embedding lookup matrix.
775
+ output_attentions (`bool`, *optional*):
776
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
777
+ tensors for more detail.
778
+ output_hidden_states (`bool`, *optional*):
779
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
780
+ more detail.
781
+ return_dict (`bool`, *optional*):
782
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
783
+ """
784
+
785
+
786
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaClassificationHead with Roberta->Camembert
787
+ class CamembertClassificationHead(nn.Module):
788
+ """Head for sentence-level classification tasks."""
789
+
790
+ def __init__(self, config):
791
+ super().__init__()
792
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
793
+ classifier_dropout = (
794
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
795
+ )
796
+ self.dropout = nn.Dropout(classifier_dropout)
797
+ self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
798
+
799
+ def forward(self, features, **kwargs):
800
+ x = features[:, 0, :] # take <s> token (equiv. to [CLS])
801
+ x = self.dropout(x)
802
+ x = self.dense(x)
803
+ x = torch.tanh(x)
804
+ x = self.dropout(x)
805
+ x = self.out_proj(x)
806
+ return x
807
+
808
+
809
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaLMHead with Roberta->Camembert
810
+ class CamembertLMHead(nn.Module):
811
+ """Camembert Head for masked language modeling."""
812
+
813
+ def __init__(self, config):
814
+ super().__init__()
815
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
816
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
817
+
818
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size)
819
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
820
+ self.decoder.bias = self.bias
821
+
822
+ def forward(self, features, **kwargs):
823
+ x = self.dense(features)
824
+ x = gelu(x)
825
+ x = self.layer_norm(x)
826
+
827
+ # project back to size of vocabulary with bias
828
+ x = self.decoder(x)
829
+
830
+ return x
831
+
832
+ def _tie_weights(self):
833
+ # To tie those two weights if they get disconnected (on TPU or when the bias is resized)
834
+ # For accelerate compatibility and to not break backward compatibility
835
+ if self.decoder.bias.device.type == "meta":
836
+ self.decoder.bias = self.bias
837
+ else:
838
+ self.bias = self.decoder.bias
839
+
840
+
841
+ @add_start_docstrings(
842
+ "The bare CamemBERT Model transformer outputting raw hidden-states without any specific head on top.",
843
+ CAMEMBERT_START_DOCSTRING,
844
+ )
845
+ class CamembertModel(CamembertPreTrainedModel):
846
+ """
847
+
848
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
849
+ cross-attention is added between the self-attention layers, following the architecture described in *Attention is
850
+ all you need*_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz
851
+ Kaiser and Illia Polosukhin.
852
+
853
+ To behave as a decoder the model needs to be initialized with the `is_decoder` argument of the configuration set to
854
+ `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
855
+ `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
856
+
857
+ .. _*Attention is all you need*: https://arxiv.org/abs/1706.03762
858
+
859
+ """
860
+
861
+ _no_split_modules = []
862
+
863
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaModel.__init__ with Roberta->Camembert
864
+ def __init__(self, config, add_pooling_layer=True):
865
+ super().__init__(config)
866
+ self.config = config
867
+
868
+ self.embeddings = CamembertEmbeddings(config)
869
+ self.encoder = CamembertEncoder(config)
870
+
871
+ self.pooler = CamembertPooler(config) if add_pooling_layer else None
872
+
873
+ self.attn_implementation = config._attn_implementation
874
+ self.position_embedding_type = config.position_embedding_type
875
+
876
+ # Initialize weights and apply final processing
877
+ self.post_init()
878
+
879
+ def get_input_embeddings(self):
880
+ return self.embeddings.word_embeddings
881
+
882
+ def set_input_embeddings(self, value):
883
+ self.embeddings.word_embeddings = value
884
+
885
+ def _prune_heads(self, heads_to_prune):
886
+ """
887
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
888
+ class PreTrainedModel
889
+ """
890
+ for layer, heads in heads_to_prune.items():
891
+ self.encoder.layer[layer].attention.prune_heads(heads)
892
+
893
+ @add_start_docstrings_to_model_forward(CAMEMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
894
+ @add_code_sample_docstrings(
895
+ checkpoint=_CHECKPOINT_FOR_DOC,
896
+ output_type=BaseModelOutputWithPoolingAndCrossAttentions,
897
+ config_class=_CONFIG_FOR_DOC,
898
+ )
899
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaModel.forward
900
+ def forward(
901
+ self,
902
+ input_ids: Optional[torch.Tensor] = None,
903
+ attention_mask: Optional[torch.Tensor] = None,
904
+ token_type_ids: Optional[torch.Tensor] = None,
905
+ position_ids: Optional[torch.Tensor] = None,
906
+ head_mask: Optional[torch.Tensor] = None,
907
+ inputs_embeds: Optional[torch.Tensor] = None,
908
+ encoder_hidden_states: Optional[torch.Tensor] = None,
909
+ encoder_attention_mask: Optional[torch.Tensor] = None,
910
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
911
+ use_cache: Optional[bool] = None,
912
+ output_attentions: Optional[bool] = None,
913
+ output_hidden_states: Optional[bool] = None,
914
+ return_dict: Optional[bool] = None,
915
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
916
+ r"""
917
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
918
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
919
+ the model is configured as a decoder.
920
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, target_length)`, *optional*):
921
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
922
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
923
+
924
+ - 1 for tokens that are **not masked**,
925
+ - 0 for tokens that are **masked**.
926
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
927
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
928
+
929
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
930
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
931
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
932
+ use_cache (`bool`, *optional*):
933
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
934
+ `past_key_values`).
935
+ """
936
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
937
+ output_hidden_states = (
938
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
939
+ )
940
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
941
+
942
+ if self.config.is_decoder:
943
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
944
+ else:
945
+ use_cache = False
946
+
947
+ if input_ids is not None and inputs_embeds is not None:
948
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
949
+ elif input_ids is not None:
950
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
951
+ input_shape = input_ids.size()
952
+ elif inputs_embeds is not None:
953
+ input_shape = inputs_embeds.size()[:-1]
954
+ else:
955
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
956
+
957
+ batch_size, seq_length = input_shape
958
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
959
+
960
+ # past_key_values_length
961
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
962
+
963
+ if token_type_ids is None:
964
+ if hasattr(self.embeddings, "token_type_ids"):
965
+ buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
966
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
967
+ token_type_ids = buffered_token_type_ids_expanded
968
+ else:
969
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
970
+
971
+ embedding_output = self.embeddings(
972
+ input_ids=input_ids,
973
+ position_ids=position_ids,
974
+ token_type_ids=token_type_ids,
975
+ inputs_embeds=inputs_embeds,
976
+ past_key_values_length=past_key_values_length,
977
+ )
978
+
979
+ if attention_mask is None:
980
+ attention_mask = torch.ones((batch_size, seq_length + past_key_values_length), device=device)
981
+
982
+ use_sdpa_attention_masks = (
983
+ self.attn_implementation == "sdpa"
984
+ and self.position_embedding_type == "absolute"
985
+ and head_mask is None
986
+ and not output_attentions
987
+ )
988
+
989
+ # Expand the attention mask
990
+ if use_sdpa_attention_masks and attention_mask.dim() == 2:
991
+ # Expand the attention mask for SDPA.
992
+ # [bsz, seq_len] -> [bsz, 1, seq_len, seq_len]
993
+ if self.config.is_decoder:
994
+ extended_attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(
995
+ attention_mask,
996
+ input_shape,
997
+ embedding_output,
998
+ past_key_values_length,
999
+ )
1000
+ else:
1001
+ extended_attention_mask = _prepare_4d_attention_mask_for_sdpa(
1002
+ attention_mask, embedding_output.dtype, tgt_len=seq_length
1003
+ )
1004
+ else:
1005
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
1006
+ # ourselves in which case we just need to make it broadcastable to all heads.
1007
+ extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape)
1008
+
1009
+ # If a 2D or 3D attention mask is provided for the cross-attention
1010
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
1011
+ if self.config.is_decoder and encoder_hidden_states is not None:
1012
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
1013
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
1014
+ if encoder_attention_mask is None:
1015
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
1016
+
1017
+ if use_sdpa_attention_masks and encoder_attention_mask.dim() == 2:
1018
+ # Expand the attention mask for SDPA.
1019
+ # [bsz, seq_len] -> [bsz, 1, seq_len, seq_len]
1020
+ encoder_extended_attention_mask = _prepare_4d_attention_mask_for_sdpa(
1021
+ encoder_attention_mask, embedding_output.dtype, tgt_len=seq_length
1022
+ )
1023
+ else:
1024
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
1025
+ else:
1026
+ encoder_extended_attention_mask = None
1027
+
1028
+ # Prepare head mask if needed
1029
+ # 1.0 in head_mask indicate we keep the head
1030
+ # attention_probs has shape bsz x n_heads x N x N
1031
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
1032
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
1033
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
1034
+
1035
+ encoder_outputs = self.encoder(
1036
+ embedding_output,
1037
+ attention_mask=extended_attention_mask,
1038
+ head_mask=head_mask,
1039
+ encoder_hidden_states=encoder_hidden_states,
1040
+ encoder_attention_mask=encoder_extended_attention_mask,
1041
+ past_key_values=past_key_values,
1042
+ use_cache=use_cache,
1043
+ output_attentions=output_attentions,
1044
+ output_hidden_states=output_hidden_states,
1045
+ return_dict=return_dict,
1046
+ )
1047
+ sequence_output = encoder_outputs[0]
1048
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
1049
+
1050
+ if not return_dict:
1051
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
1052
+
1053
+ return BaseModelOutputWithPoolingAndCrossAttentions(
1054
+ last_hidden_state=sequence_output,
1055
+ pooler_output=pooled_output,
1056
+ past_key_values=encoder_outputs.past_key_values,
1057
+ hidden_states=encoder_outputs.hidden_states,
1058
+ attentions=encoder_outputs.attentions,
1059
+ cross_attentions=encoder_outputs.cross_attentions,
1060
+ )
1061
+
1062
+
1063
+ @add_start_docstrings(
1064
+ """CamemBERT Model with a `language modeling` head on top.""",
1065
+ CAMEMBERT_START_DOCSTRING,
1066
+ )
1067
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaForMaskedLM with Roberta->Camembert, ROBERTA->CAMEMBERT
1068
+ class CamembertForMaskedLM(CamembertPreTrainedModel):
1069
+ _tied_weights_keys = ["lm_head.decoder.weight", "lm_head.decoder.bias"]
1070
+
1071
+ def __init__(self, config):
1072
+ super().__init__(config)
1073
+
1074
+ if config.is_decoder:
1075
+ logger.warning(
1076
+ "If you want to use `CamembertForMaskedLM` make sure `config.is_decoder=False` for "
1077
+ "bi-directional self-attention."
1078
+ )
1079
+
1080
+ self.roberta = CamembertModel(config, add_pooling_layer=False)
1081
+ self.lm_head = CamembertLMHead(config)
1082
+
1083
+ # Initialize weights and apply final processing
1084
+ self.post_init()
1085
+
1086
+ def get_output_embeddings(self):
1087
+ return self.lm_head.decoder
1088
+
1089
+ def set_output_embeddings(self, new_embeddings):
1090
+ self.lm_head.decoder = new_embeddings
1091
+
1092
+ @add_start_docstrings_to_model_forward(CAMEMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1093
+ @add_code_sample_docstrings(
1094
+ checkpoint=_CHECKPOINT_FOR_DOC,
1095
+ output_type=MaskedLMOutput,
1096
+ config_class=_CONFIG_FOR_DOC,
1097
+ mask="<mask>",
1098
+ expected_output="' Paris'",
1099
+ expected_loss=0.1,
1100
+ )
1101
+ def forward(
1102
+ self,
1103
+ input_ids: Optional[torch.LongTensor] = None,
1104
+ attention_mask: Optional[torch.FloatTensor] = None,
1105
+ token_type_ids: Optional[torch.LongTensor] = None,
1106
+ position_ids: Optional[torch.LongTensor] = None,
1107
+ head_mask: Optional[torch.FloatTensor] = None,
1108
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1109
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
1110
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
1111
+ labels: Optional[torch.LongTensor] = None,
1112
+ output_attentions: Optional[bool] = None,
1113
+ output_hidden_states: Optional[bool] = None,
1114
+ return_dict: Optional[bool] = None,
1115
+ ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]:
1116
+ r"""
1117
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1118
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1119
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1120
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1121
+ kwargs (`Dict[str, any]`, *optional*, defaults to `{}`):
1122
+ Used to hide legacy arguments that have been deprecated.
1123
+ """
1124
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1125
+
1126
+ outputs = self.roberta(
1127
+ input_ids,
1128
+ attention_mask=attention_mask,
1129
+ token_type_ids=token_type_ids,
1130
+ position_ids=position_ids,
1131
+ head_mask=head_mask,
1132
+ inputs_embeds=inputs_embeds,
1133
+ encoder_hidden_states=encoder_hidden_states,
1134
+ encoder_attention_mask=encoder_attention_mask,
1135
+ output_attentions=output_attentions,
1136
+ output_hidden_states=output_hidden_states,
1137
+ return_dict=return_dict,
1138
+ )
1139
+ sequence_output = outputs[0]
1140
+ prediction_scores = self.lm_head(sequence_output)
1141
+
1142
+ masked_lm_loss = None
1143
+ if labels is not None:
1144
+ # move labels to correct device to enable model parallelism
1145
+ labels = labels.to(prediction_scores.device)
1146
+ loss_fct = CrossEntropyLoss()
1147
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1148
+
1149
+ if not return_dict:
1150
+ output = (prediction_scores,) + outputs[2:]
1151
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1152
+
1153
+ return MaskedLMOutput(
1154
+ loss=masked_lm_loss,
1155
+ logits=prediction_scores,
1156
+ hidden_states=outputs.hidden_states,
1157
+ attentions=outputs.attentions,
1158
+ )
1159
+
1160
+
1161
+ @add_start_docstrings(
1162
+ """
1163
+ CamemBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the
1164
+ pooled output) e.g. for GLUE tasks.
1165
+ """,
1166
+ CAMEMBERT_START_DOCSTRING,
1167
+ )
1168
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaForSequenceClassification with Roberta->Camembert, ROBERTA->CAMEMBERT
1169
+ class CamembertForSequenceClassification(CamembertPreTrainedModel):
1170
+ def __init__(self, config):
1171
+ super().__init__(config)
1172
+ self.num_labels = config.num_labels
1173
+ self.config = config
1174
+
1175
+ self.roberta = CamembertModel(config, add_pooling_layer=False)
1176
+ self.classifier = CamembertClassificationHead(config)
1177
+
1178
+ # Initialize weights and apply final processing
1179
+ self.post_init()
1180
+
1181
+ @add_start_docstrings_to_model_forward(CAMEMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1182
+ @add_code_sample_docstrings(
1183
+ checkpoint="cardiffnlp/twitter-roberta-base-emotion",
1184
+ output_type=SequenceClassifierOutput,
1185
+ config_class=_CONFIG_FOR_DOC,
1186
+ expected_output="'optimism'",
1187
+ expected_loss=0.08,
1188
+ )
1189
+ def forward(
1190
+ self,
1191
+ input_ids: Optional[torch.LongTensor] = None,
1192
+ attention_mask: Optional[torch.FloatTensor] = None,
1193
+ token_type_ids: Optional[torch.LongTensor] = None,
1194
+ position_ids: Optional[torch.LongTensor] = None,
1195
+ head_mask: Optional[torch.FloatTensor] = None,
1196
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1197
+ labels: Optional[torch.LongTensor] = None,
1198
+ output_attentions: Optional[bool] = None,
1199
+ output_hidden_states: Optional[bool] = None,
1200
+ return_dict: Optional[bool] = None,
1201
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
1202
+ r"""
1203
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1204
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1205
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1206
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1207
+ """
1208
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1209
+
1210
+ outputs = self.roberta(
1211
+ input_ids,
1212
+ attention_mask=attention_mask,
1213
+ token_type_ids=token_type_ids,
1214
+ position_ids=position_ids,
1215
+ head_mask=head_mask,
1216
+ inputs_embeds=inputs_embeds,
1217
+ output_attentions=output_attentions,
1218
+ output_hidden_states=output_hidden_states,
1219
+ return_dict=return_dict,
1220
+ )
1221
+ sequence_output = outputs[0]
1222
+ logits = self.classifier(sequence_output)
1223
+
1224
+ loss = None
1225
+ if labels is not None:
1226
+ # move labels to correct device to enable model parallelism
1227
+ labels = labels.to(logits.device)
1228
+ if self.config.problem_type is None:
1229
+ if self.num_labels == 1:
1230
+ self.config.problem_type = "regression"
1231
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1232
+ self.config.problem_type = "single_label_classification"
1233
+ else:
1234
+ self.config.problem_type = "multi_label_classification"
1235
+
1236
+ if self.config.problem_type == "regression":
1237
+ loss_fct = MSELoss()
1238
+ if self.num_labels == 1:
1239
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1240
+ else:
1241
+ loss = loss_fct(logits, labels)
1242
+ elif self.config.problem_type == "single_label_classification":
1243
+ loss_fct = CrossEntropyLoss()
1244
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1245
+ elif self.config.problem_type == "multi_label_classification":
1246
+ loss_fct = BCEWithLogitsLoss()
1247
+ loss = loss_fct(logits, labels)
1248
+
1249
+ if not return_dict:
1250
+ output = (logits,) + outputs[2:]
1251
+ return ((loss,) + output) if loss is not None else output
1252
+
1253
+ return SequenceClassifierOutput(
1254
+ loss=loss,
1255
+ logits=logits,
1256
+ hidden_states=outputs.hidden_states,
1257
+ attentions=outputs.attentions,
1258
+ )
1259
+
1260
+
1261
+ @add_start_docstrings(
1262
+ """
1263
+ CamemBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
1264
+ softmax) e.g. for RocStories/SWAG tasks.
1265
+ """,
1266
+ CAMEMBERT_START_DOCSTRING,
1267
+ )
1268
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaForMultipleChoice with Roberta->Camembert, ROBERTA->CAMEMBERT
1269
+ class CamembertForMultipleChoice(CamembertPreTrainedModel):
1270
+ def __init__(self, config):
1271
+ super().__init__(config)
1272
+
1273
+ self.roberta = CamembertModel(config)
1274
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1275
+ self.classifier = nn.Linear(config.hidden_size, 1)
1276
+
1277
+ # Initialize weights and apply final processing
1278
+ self.post_init()
1279
+
1280
+ @add_start_docstrings_to_model_forward(
1281
+ CAMEMBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
1282
+ )
1283
+ @add_code_sample_docstrings(
1284
+ checkpoint=_CHECKPOINT_FOR_DOC,
1285
+ output_type=MultipleChoiceModelOutput,
1286
+ config_class=_CONFIG_FOR_DOC,
1287
+ )
1288
+ def forward(
1289
+ self,
1290
+ input_ids: Optional[torch.LongTensor] = None,
1291
+ token_type_ids: Optional[torch.LongTensor] = None,
1292
+ attention_mask: Optional[torch.FloatTensor] = None,
1293
+ labels: Optional[torch.LongTensor] = None,
1294
+ position_ids: Optional[torch.LongTensor] = None,
1295
+ head_mask: Optional[torch.FloatTensor] = None,
1296
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1297
+ output_attentions: Optional[bool] = None,
1298
+ output_hidden_states: Optional[bool] = None,
1299
+ return_dict: Optional[bool] = None,
1300
+ ) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]:
1301
+ r"""
1302
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1303
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
1304
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
1305
+ `input_ids` above)
1306
+ """
1307
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1308
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
1309
+
1310
+ flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
1311
+ flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
1312
+ flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
1313
+ flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
1314
+ flat_inputs_embeds = (
1315
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
1316
+ if inputs_embeds is not None
1317
+ else None
1318
+ )
1319
+
1320
+ outputs = self.roberta(
1321
+ flat_input_ids,
1322
+ position_ids=flat_position_ids,
1323
+ token_type_ids=flat_token_type_ids,
1324
+ attention_mask=flat_attention_mask,
1325
+ head_mask=head_mask,
1326
+ inputs_embeds=flat_inputs_embeds,
1327
+ output_attentions=output_attentions,
1328
+ output_hidden_states=output_hidden_states,
1329
+ return_dict=return_dict,
1330
+ )
1331
+ pooled_output = outputs[1]
1332
+
1333
+ pooled_output = self.dropout(pooled_output)
1334
+ logits = self.classifier(pooled_output)
1335
+ reshaped_logits = logits.view(-1, num_choices)
1336
+
1337
+ loss = None
1338
+ if labels is not None:
1339
+ # move labels to correct device to enable model parallelism
1340
+ labels = labels.to(reshaped_logits.device)
1341
+ loss_fct = CrossEntropyLoss()
1342
+ loss = loss_fct(reshaped_logits, labels)
1343
+
1344
+ if not return_dict:
1345
+ output = (reshaped_logits,) + outputs[2:]
1346
+ return ((loss,) + output) if loss is not None else output
1347
+
1348
+ return MultipleChoiceModelOutput(
1349
+ loss=loss,
1350
+ logits=reshaped_logits,
1351
+ hidden_states=outputs.hidden_states,
1352
+ attentions=outputs.attentions,
1353
+ )
1354
+
1355
+
1356
+ @add_start_docstrings(
1357
+ """
1358
+ CamemBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
1359
+ for Named-Entity-Recognition (NER) tasks.
1360
+ """,
1361
+ CAMEMBERT_START_DOCSTRING,
1362
+ )
1363
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaForTokenClassification with Roberta->Camembert, ROBERTA->CAMEMBERT
1364
+ class CamembertForTokenClassification(CamembertPreTrainedModel):
1365
+ def __init__(self, config):
1366
+ super().__init__(config)
1367
+ self.num_labels = config.num_labels
1368
+
1369
+ self.roberta = CamembertModel(config, add_pooling_layer=False)
1370
+ classifier_dropout = (
1371
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1372
+ )
1373
+ self.dropout = nn.Dropout(classifier_dropout)
1374
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1375
+
1376
+ # Initialize weights and apply final processing
1377
+ self.post_init()
1378
+
1379
+ @add_start_docstrings_to_model_forward(CAMEMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1380
+ @add_code_sample_docstrings(
1381
+ checkpoint="Jean-Baptiste/roberta-large-ner-english",
1382
+ output_type=TokenClassifierOutput,
1383
+ config_class=_CONFIG_FOR_DOC,
1384
+ expected_output="['O', 'ORG', 'ORG', 'O', 'O', 'O', 'O', 'O', 'LOC', 'O', 'LOC', 'LOC']",
1385
+ expected_loss=0.01,
1386
+ )
1387
+ def forward(
1388
+ self,
1389
+ input_ids: Optional[torch.LongTensor] = None,
1390
+ attention_mask: Optional[torch.FloatTensor] = None,
1391
+ token_type_ids: Optional[torch.LongTensor] = None,
1392
+ position_ids: Optional[torch.LongTensor] = None,
1393
+ head_mask: Optional[torch.FloatTensor] = None,
1394
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1395
+ labels: Optional[torch.LongTensor] = None,
1396
+ output_attentions: Optional[bool] = None,
1397
+ output_hidden_states: Optional[bool] = None,
1398
+ return_dict: Optional[bool] = None,
1399
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
1400
+ r"""
1401
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1402
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1403
+ """
1404
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1405
+
1406
+ outputs = self.roberta(
1407
+ input_ids,
1408
+ attention_mask=attention_mask,
1409
+ token_type_ids=token_type_ids,
1410
+ position_ids=position_ids,
1411
+ head_mask=head_mask,
1412
+ inputs_embeds=inputs_embeds,
1413
+ output_attentions=output_attentions,
1414
+ output_hidden_states=output_hidden_states,
1415
+ return_dict=return_dict,
1416
+ )
1417
+
1418
+ sequence_output = outputs[0]
1419
+
1420
+ sequence_output = self.dropout(sequence_output)
1421
+ logits = self.classifier(sequence_output)
1422
+
1423
+ loss = None
1424
+ if labels is not None:
1425
+ # move labels to correct device to enable model parallelism
1426
+ labels = labels.to(logits.device)
1427
+ loss_fct = CrossEntropyLoss()
1428
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1429
+
1430
+ if not return_dict:
1431
+ output = (logits,) + outputs[2:]
1432
+ return ((loss,) + output) if loss is not None else output
1433
+
1434
+ return TokenClassifierOutput(
1435
+ loss=loss,
1436
+ logits=logits,
1437
+ hidden_states=outputs.hidden_states,
1438
+ attentions=outputs.attentions,
1439
+ )
1440
+
1441
+
1442
+ @add_start_docstrings(
1443
+ """
1444
+ CamemBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1445
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`
1446
+ """,
1447
+ CAMEMBERT_START_DOCSTRING,
1448
+ )
1449
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaForQuestionAnswering with Roberta->Camembert, ROBERTA->CAMEMBERT
1450
+ class CamembertForQuestionAnswering(CamembertPreTrainedModel):
1451
+ def __init__(self, config):
1452
+ super().__init__(config)
1453
+ self.num_labels = config.num_labels
1454
+
1455
+ self.roberta = CamembertModel(config, add_pooling_layer=False)
1456
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1457
+
1458
+ # Initialize weights and apply final processing
1459
+ self.post_init()
1460
+
1461
+ @add_start_docstrings_to_model_forward(CAMEMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1462
+ @add_code_sample_docstrings(
1463
+ checkpoint="deepset/roberta-base-squad2",
1464
+ output_type=QuestionAnsweringModelOutput,
1465
+ config_class=_CONFIG_FOR_DOC,
1466
+ expected_output="' puppet'",
1467
+ expected_loss=0.86,
1468
+ )
1469
+ def forward(
1470
+ self,
1471
+ input_ids: Optional[torch.LongTensor] = None,
1472
+ attention_mask: Optional[torch.FloatTensor] = None,
1473
+ token_type_ids: Optional[torch.LongTensor] = None,
1474
+ position_ids: Optional[torch.LongTensor] = None,
1475
+ head_mask: Optional[torch.FloatTensor] = None,
1476
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1477
+ start_positions: Optional[torch.LongTensor] = None,
1478
+ end_positions: Optional[torch.LongTensor] = None,
1479
+ output_attentions: Optional[bool] = None,
1480
+ output_hidden_states: Optional[bool] = None,
1481
+ return_dict: Optional[bool] = None,
1482
+ ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]:
1483
+ r"""
1484
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1485
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1486
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1487
+ are not taken into account for computing the loss.
1488
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1489
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1490
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1491
+ are not taken into account for computing the loss.
1492
+ """
1493
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1494
+
1495
+ outputs = self.roberta(
1496
+ input_ids,
1497
+ attention_mask=attention_mask,
1498
+ token_type_ids=token_type_ids,
1499
+ position_ids=position_ids,
1500
+ head_mask=head_mask,
1501
+ inputs_embeds=inputs_embeds,
1502
+ output_attentions=output_attentions,
1503
+ output_hidden_states=output_hidden_states,
1504
+ return_dict=return_dict,
1505
+ )
1506
+
1507
+ sequence_output = outputs[0]
1508
+
1509
+ logits = self.qa_outputs(sequence_output)
1510
+ start_logits, end_logits = logits.split(1, dim=-1)
1511
+ start_logits = start_logits.squeeze(-1).contiguous()
1512
+ end_logits = end_logits.squeeze(-1).contiguous()
1513
+
1514
+ total_loss = None
1515
+ if start_positions is not None and end_positions is not None:
1516
+ # If we are on multi-GPU, split add a dimension
1517
+ if len(start_positions.size()) > 1:
1518
+ start_positions = start_positions.squeeze(-1)
1519
+ if len(end_positions.size()) > 1:
1520
+ end_positions = end_positions.squeeze(-1)
1521
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1522
+ ignored_index = start_logits.size(1)
1523
+ start_positions = start_positions.clamp(0, ignored_index)
1524
+ end_positions = end_positions.clamp(0, ignored_index)
1525
+
1526
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1527
+ start_loss = loss_fct(start_logits, start_positions)
1528
+ end_loss = loss_fct(end_logits, end_positions)
1529
+ total_loss = (start_loss + end_loss) / 2
1530
+
1531
+ if not return_dict:
1532
+ output = (start_logits, end_logits) + outputs[2:]
1533
+ return ((total_loss,) + output) if total_loss is not None else output
1534
+
1535
+ return QuestionAnsweringModelOutput(
1536
+ loss=total_loss,
1537
+ start_logits=start_logits,
1538
+ end_logits=end_logits,
1539
+ hidden_states=outputs.hidden_states,
1540
+ attentions=outputs.attentions,
1541
+ )
1542
+
1543
+
1544
+ @add_start_docstrings(
1545
+ """CamemBERT Model with a `language modeling` head on top for CLM fine-tuning.""", CAMEMBERT_START_DOCSTRING
1546
+ )
1547
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaForCausalLM with Roberta->Camembert, ROBERTA->CAMEMBERT, FacebookAI/roberta-base->almanach/camembert-base
1548
+ class CamembertForCausalLM(CamembertPreTrainedModel, GenerationMixin):
1549
+ _tied_weights_keys = ["lm_head.decoder.weight", "lm_head.decoder.bias"]
1550
+
1551
+ def __init__(self, config):
1552
+ super().__init__(config)
1553
+
1554
+ if not config.is_decoder:
1555
+ logger.warning("If you want to use `CamembertLMHeadModel` as a standalone, add `is_decoder=True.`")
1556
+
1557
+ self.roberta = CamembertModel(config, add_pooling_layer=False)
1558
+ self.lm_head = CamembertLMHead(config)
1559
+
1560
+ # Initialize weights and apply final processing
1561
+ self.post_init()
1562
+
1563
+ def get_output_embeddings(self):
1564
+ return self.lm_head.decoder
1565
+
1566
+ def set_output_embeddings(self, new_embeddings):
1567
+ self.lm_head.decoder = new_embeddings
1568
+
1569
+ @add_start_docstrings_to_model_forward(CAMEMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1570
+ @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
1571
+ def forward(
1572
+ self,
1573
+ input_ids: Optional[torch.LongTensor] = None,
1574
+ attention_mask: Optional[torch.FloatTensor] = None,
1575
+ token_type_ids: Optional[torch.LongTensor] = None,
1576
+ position_ids: Optional[torch.LongTensor] = None,
1577
+ head_mask: Optional[torch.FloatTensor] = None,
1578
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1579
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
1580
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
1581
+ labels: Optional[torch.LongTensor] = None,
1582
+ past_key_values: Tuple[Tuple[torch.FloatTensor]] = None,
1583
+ use_cache: Optional[bool] = None,
1584
+ output_attentions: Optional[bool] = None,
1585
+ output_hidden_states: Optional[bool] = None,
1586
+ return_dict: Optional[bool] = None,
1587
+ ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
1588
+ r"""
1589
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1590
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
1591
+ the model is configured as a decoder.
1592
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
1593
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
1594
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1595
+
1596
+ - 1 for tokens that are **not masked**,
1597
+ - 0 for tokens that are **masked**.
1598
+
1599
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1600
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
1601
+ `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
1602
+ ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1603
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
1604
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1605
+
1606
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1607
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1608
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1609
+ use_cache (`bool`, *optional*):
1610
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1611
+ `past_key_values`).
1612
+
1613
+ Returns:
1614
+
1615
+ Example:
1616
+
1617
+ ```python
1618
+ >>> from transformers import AutoTokenizer, CamembertForCausalLM, AutoConfig
1619
+ >>> import torch
1620
+
1621
+ >>> tokenizer = AutoTokenizer.from_pretrained("almanach/camembert-base")
1622
+ >>> config = AutoConfig.from_pretrained("almanach/camembert-base")
1623
+ >>> config.is_decoder = True
1624
+ >>> model = CamembertForCausalLM.from_pretrained("almanach/camembert-base", config=config)
1625
+
1626
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
1627
+ >>> outputs = model(**inputs)
1628
+
1629
+ >>> prediction_logits = outputs.logits
1630
+ ```"""
1631
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1632
+ if labels is not None:
1633
+ use_cache = False
1634
+
1635
+ outputs = self.roberta(
1636
+ input_ids,
1637
+ attention_mask=attention_mask,
1638
+ token_type_ids=token_type_ids,
1639
+ position_ids=position_ids,
1640
+ head_mask=head_mask,
1641
+ inputs_embeds=inputs_embeds,
1642
+ encoder_hidden_states=encoder_hidden_states,
1643
+ encoder_attention_mask=encoder_attention_mask,
1644
+ past_key_values=past_key_values,
1645
+ use_cache=use_cache,
1646
+ output_attentions=output_attentions,
1647
+ output_hidden_states=output_hidden_states,
1648
+ return_dict=return_dict,
1649
+ )
1650
+
1651
+ sequence_output = outputs[0]
1652
+ prediction_scores = self.lm_head(sequence_output)
1653
+
1654
+ lm_loss = None
1655
+ if labels is not None:
1656
+ # move labels to correct device to enable model parallelism
1657
+ labels = labels.to(prediction_scores.device)
1658
+ # we are doing next-token prediction; shift prediction scores and input ids by one
1659
+ shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
1660
+ labels = labels[:, 1:].contiguous()
1661
+ loss_fct = CrossEntropyLoss()
1662
+ lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1663
+
1664
+ if not return_dict:
1665
+ output = (prediction_scores,) + outputs[2:]
1666
+ return ((lm_loss,) + output) if lm_loss is not None else output
1667
+
1668
+ return CausalLMOutputWithCrossAttentions(
1669
+ loss=lm_loss,
1670
+ logits=prediction_scores,
1671
+ past_key_values=outputs.past_key_values,
1672
+ hidden_states=outputs.hidden_states,
1673
+ attentions=outputs.attentions,
1674
+ cross_attentions=outputs.cross_attentions,
1675
+ )
1676
+
1677
+ def _reorder_cache(self, past_key_values, beam_idx):
1678
+ reordered_past = ()
1679
+ for layer_past in past_key_values:
1680
+ reordered_past += (
1681
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1682
+ )
1683
+ return reordered_past
1684
+
1685
+
1686
+ # Copied from transformers.models.roberta.modeling_roberta.create_position_ids_from_input_ids
1687
+ def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
1688
+ """
1689
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
1690
+ are ignored. This is modified from fairseq's `utils.make_positions`.
1691
+
1692
+ Args:
1693
+ x: torch.Tensor x:
1694
+
1695
+ Returns: torch.Tensor
1696
+ """
1697
+ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
1698
+ mask = input_ids.ne(padding_idx).int()
1699
+ incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
1700
+ return incremental_indices.long() + padding_idx
1701
+
1702
+
1703
+ __all__ = [
1704
+ "CamembertForCausalLM",
1705
+ "CamembertForMaskedLM",
1706
+ "CamembertForMultipleChoice",
1707
+ "CamembertForQuestionAnswering",
1708
+ "CamembertForSequenceClassification",
1709
+ "CamembertForTokenClassification",
1710
+ "CamembertModel",
1711
+ "CamembertPreTrainedModel",
1712
+ ]
vlmpy310/lib/python3.10/site-packages/transformers/models/decision_transformer/__init__.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import _LazyModule
17
+ from ...utils.import_utils import define_import_structure
18
+
19
+
20
+ if TYPE_CHECKING:
21
+ from .configuration_decision_transformer import *
22
+ from .modeling_decision_transformer import *
23
+ else:
24
+ import sys
25
+
26
+ _file = globals()["__file__"]
27
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
vlmpy310/lib/python3.10/site-packages/transformers/models/decision_transformer/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (582 Bytes). View file
 
vlmpy310/lib/python3.10/site-packages/transformers/models/decision_transformer/__pycache__/configuration_decision_transformer.cpython-310.pyc ADDED
Binary file (6.09 kB). View file
 
vlmpy310/lib/python3.10/site-packages/transformers/models/decision_transformer/__pycache__/modeling_decision_transformer.cpython-310.pyc ADDED
Binary file (25.7 kB). View file
 
vlmpy310/lib/python3.10/site-packages/transformers/models/decision_transformer/configuration_decision_transformer.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Team and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Decision Transformer model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ class DecisionTransformerConfig(PretrainedConfig):
25
+ """
26
+ This is the configuration class to store the configuration of a [`DecisionTransformerModel`]. It is used to
27
+ instantiate a Decision Transformer model according to the specified arguments, defining the model architecture.
28
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the standard
29
+ DecisionTransformer architecture. Many of the config options are used to instatiate the GPT2 model that is used as
30
+ part of the architecture.
31
+
32
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
33
+ documentation from [`PretrainedConfig`] for more information.
34
+
35
+
36
+ Args:
37
+ state_dim (`int`, *optional*, defaults to 17):
38
+ The state size for the RL environment
39
+ act_dim (`int`, *optional*, defaults to 4):
40
+ The size of the output action space
41
+ hidden_size (`int`, *optional*, defaults to 128):
42
+ The size of the hidden layers
43
+ max_ep_len (`int`, *optional*, defaults to 4096):
44
+ The maximum length of an episode in the environment
45
+ action_tanh (`bool`, *optional*, defaults to True):
46
+ Whether to use a tanh activation on action prediction
47
+ vocab_size (`int`, *optional*, defaults to 50257):
48
+ Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the
49
+ `inputs_ids` passed when calling [`DecisionTransformerModel`].
50
+ n_positions (`int`, *optional*, defaults to 1024):
51
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
52
+ just in case (e.g., 512 or 1024 or 2048).
53
+ n_layer (`int`, *optional*, defaults to 3):
54
+ Number of hidden layers in the Transformer encoder.
55
+ n_head (`int`, *optional*, defaults to 1):
56
+ Number of attention heads for each attention layer in the Transformer encoder.
57
+ n_inner (`int`, *optional*):
58
+ Dimensionality of the inner feed-forward layers. If unset, will default to 4 times `n_embd`.
59
+ activation_function (`str`, *optional*, defaults to `"gelu"`):
60
+ Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`.
61
+ resid_pdrop (`float`, *optional*, defaults to 0.1):
62
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
63
+ embd_pdrop (`int`, *optional*, defaults to 0.1):
64
+ The dropout ratio for the embeddings.
65
+ attn_pdrop (`float`, *optional*, defaults to 0.1):
66
+ The dropout ratio for the attention.
67
+ layer_norm_epsilon (`float`, *optional*, defaults to 1e-5):
68
+ The epsilon to use in the layer normalization layers.
69
+ initializer_range (`float`, *optional*, defaults to 0.02):
70
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
71
+ scale_attn_weights (`bool`, *optional*, defaults to `True`):
72
+ Scale attention weights by dividing by sqrt(hidden_size)..
73
+ use_cache (`bool`, *optional*, defaults to `True`):
74
+ Whether or not the model should return the last key/values attentions (not used by all models).
75
+ scale_attn_by_inverse_layer_idx (`bool`, *optional*, defaults to `False`):
76
+ Whether to additionally scale attention weights by `1 / layer_idx + 1`.
77
+ reorder_and_upcast_attn (`bool`, *optional*, defaults to `False`):
78
+ Whether to scale keys (K) prior to computing attention (dot-product) and upcast attention
79
+ dot-product/softmax to float() when training with mixed precision.
80
+
81
+ Example:
82
+
83
+ ```python
84
+ >>> from transformers import DecisionTransformerConfig, DecisionTransformerModel
85
+
86
+ >>> # Initializing a DecisionTransformer configuration
87
+ >>> configuration = DecisionTransformerConfig()
88
+
89
+ >>> # Initializing a model (with random weights) from the configuration
90
+ >>> model = DecisionTransformerModel(configuration)
91
+
92
+ >>> # Accessing the model configuration
93
+ >>> configuration = model.config
94
+ ```"""
95
+
96
+ model_type = "decision_transformer"
97
+ keys_to_ignore_at_inference = ["past_key_values"]
98
+ attribute_map = {
99
+ "max_position_embeddings": "n_positions",
100
+ "num_attention_heads": "n_head",
101
+ "num_hidden_layers": "n_layer",
102
+ }
103
+
104
+ def __init__(
105
+ self,
106
+ state_dim=17,
107
+ act_dim=4,
108
+ hidden_size=128,
109
+ max_ep_len=4096,
110
+ action_tanh=True,
111
+ vocab_size=1,
112
+ n_positions=1024,
113
+ n_layer=3,
114
+ n_head=1,
115
+ n_inner=None,
116
+ activation_function="relu",
117
+ resid_pdrop=0.1,
118
+ embd_pdrop=0.1,
119
+ attn_pdrop=0.1,
120
+ layer_norm_epsilon=1e-5,
121
+ initializer_range=0.02,
122
+ scale_attn_weights=True,
123
+ use_cache=True,
124
+ bos_token_id=50256,
125
+ eos_token_id=50256,
126
+ scale_attn_by_inverse_layer_idx=False,
127
+ reorder_and_upcast_attn=False,
128
+ **kwargs,
129
+ ):
130
+ self.state_dim = state_dim
131
+ self.act_dim = act_dim
132
+ self.hidden_size = hidden_size
133
+ self.max_ep_len = max_ep_len
134
+ self.action_tanh = action_tanh
135
+ self.vocab_size = vocab_size
136
+ self.n_positions = n_positions
137
+ self.n_layer = n_layer
138
+ self.n_head = n_head
139
+ self.n_inner = n_inner
140
+ self.activation_function = activation_function
141
+ self.resid_pdrop = resid_pdrop
142
+ self.embd_pdrop = embd_pdrop
143
+ self.attn_pdrop = attn_pdrop
144
+ self.layer_norm_epsilon = layer_norm_epsilon
145
+ self.initializer_range = initializer_range
146
+ self.scale_attn_weights = scale_attn_weights
147
+ self.use_cache = use_cache
148
+ self.scale_attn_by_inverse_layer_idx = scale_attn_by_inverse_layer_idx
149
+ self.reorder_and_upcast_attn = reorder_and_upcast_attn
150
+
151
+ self.bos_token_id = bos_token_id
152
+ self.eos_token_id = eos_token_id
153
+
154
+ super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
155
+
156
+
157
+ __all__ = ["DecisionTransformerConfig"]
vlmpy310/lib/python3.10/site-packages/transformers/models/decision_transformer/modeling_decision_transformer.py ADDED
@@ -0,0 +1,963 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Team The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """PyTorch DecisionTransformer model."""
16
+
17
+ import math
18
+ import os
19
+ from dataclasses import dataclass
20
+ from typing import Callable, Optional, Tuple, Union
21
+
22
+ import torch
23
+ import torch.utils.checkpoint
24
+ from torch import nn
25
+
26
+ from ...activations import ACT2FN
27
+ from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions
28
+ from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
29
+ from ...pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_conv1d_layer
30
+ from ...utils import (
31
+ ModelOutput,
32
+ add_start_docstrings,
33
+ add_start_docstrings_to_model_forward,
34
+ logging,
35
+ replace_return_docstrings,
36
+ )
37
+ from .configuration_decision_transformer import DecisionTransformerConfig
38
+
39
+
40
+ logger = logging.get_logger(__name__)
41
+
42
+ _CHECKPOINT_FOR_DOC = "edbeeching/decision-transformer-gym-hopper-medium"
43
+ _CONFIG_FOR_DOC = "DecisionTransformerConfig"
44
+
45
+
46
+ # Copied from transformers.models.gpt2.modeling_gpt2.load_tf_weights_in_gpt2
47
+ def load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path):
48
+ """Load tf checkpoints in a pytorch model"""
49
+ try:
50
+ import re
51
+
52
+ import tensorflow as tf
53
+ except ImportError:
54
+ logger.error(
55
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
56
+ "https://www.tensorflow.org/install/ for installation instructions."
57
+ )
58
+ raise
59
+ tf_path = os.path.abspath(gpt2_checkpoint_path)
60
+ logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
61
+ # Load weights from TF model
62
+ init_vars = tf.train.list_variables(tf_path)
63
+ names = []
64
+ arrays = []
65
+ for name, shape in init_vars:
66
+ logger.info(f"Loading TF weight {name} with shape {shape}")
67
+ array = tf.train.load_variable(tf_path, name)
68
+ names.append(name)
69
+ arrays.append(array.squeeze())
70
+
71
+ for name, array in zip(names, arrays):
72
+ name = name[6:] # skip "model/"
73
+ name = name.split("/")
74
+ pointer = model
75
+ for m_name in name:
76
+ if re.fullmatch(r"[A-Za-z]+\d+", m_name):
77
+ scope_names = re.split(r"(\d+)", m_name)
78
+ else:
79
+ scope_names = [m_name]
80
+ if scope_names[0] == "w" or scope_names[0] == "g":
81
+ pointer = getattr(pointer, "weight")
82
+ elif scope_names[0] == "b":
83
+ pointer = getattr(pointer, "bias")
84
+ elif scope_names[0] == "wpe" or scope_names[0] == "wte":
85
+ pointer = getattr(pointer, scope_names[0])
86
+ pointer = getattr(pointer, "weight")
87
+ else:
88
+ pointer = getattr(pointer, scope_names[0])
89
+ if len(scope_names) >= 2:
90
+ num = int(scope_names[1])
91
+ pointer = pointer[num]
92
+ try:
93
+ if pointer.shape != array.shape:
94
+ raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
95
+ except ValueError as e:
96
+ e.args += (pointer.shape, array.shape)
97
+ raise
98
+ logger.info(f"Initialize PyTorch weight {name}")
99
+ pointer.data = torch.from_numpy(array)
100
+ return model
101
+
102
+
103
+ # Copied from transformers.models.gpt2.modeling_gpt2.eager_attention_forward
104
+ def eager_attention_forward(module, query, key, value, attention_mask, head_mask=None, **kwargs):
105
+ attn_weights = torch.matmul(query, key.transpose(-1, -2))
106
+
107
+ if module.scale_attn_weights:
108
+ attn_weights = attn_weights / torch.full(
109
+ [], value.size(-1) ** 0.5, dtype=attn_weights.dtype, device=attn_weights.device
110
+ )
111
+
112
+ # Layer-wise attention scaling
113
+ if module.scale_attn_by_inverse_layer_idx:
114
+ attn_weights = attn_weights / float(module.layer_idx + 1)
115
+
116
+ if not module.is_cross_attention:
117
+ # if only "normal" attention layer implements causal mask
118
+ query_length, key_length = query.size(-2), key.size(-2)
119
+ causal_mask = module.bias[:, :, key_length - query_length : key_length, :key_length]
120
+ mask_value = torch.finfo(attn_weights.dtype).min
121
+ # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
122
+ # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
123
+ mask_value = torch.full([], mask_value, dtype=attn_weights.dtype, device=attn_weights.device)
124
+ attn_weights = torch.where(causal_mask, attn_weights.to(attn_weights.dtype), mask_value)
125
+
126
+ if attention_mask is not None:
127
+ # Apply the attention mask
128
+ attn_weights = attn_weights + attention_mask
129
+
130
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
131
+
132
+ # Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op otherwise
133
+ attn_weights = attn_weights.type(value.dtype)
134
+ attn_weights = module.attn_dropout(attn_weights)
135
+
136
+ # Mask heads if we want to
137
+ if head_mask is not None:
138
+ attn_weights = attn_weights * head_mask
139
+
140
+ attn_output = torch.matmul(attn_weights, value)
141
+ attn_output = attn_output.transpose(1, 2)
142
+
143
+ return attn_output, attn_weights
144
+
145
+
146
+ # Copied from transformers.models.gpt2.modeling_gpt2.GPT2Attention with GPT2->DecisionTransformerGPT2
147
+ class DecisionTransformerGPT2Attention(nn.Module):
148
+ def __init__(self, config, is_cross_attention=False, layer_idx=None):
149
+ super().__init__()
150
+ self.config = config
151
+ max_positions = config.max_position_embeddings
152
+ self.register_buffer(
153
+ "bias",
154
+ torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view(
155
+ 1, 1, max_positions, max_positions
156
+ ),
157
+ persistent=False,
158
+ )
159
+ self.register_buffer("masked_bias", torch.tensor(-1e4), persistent=False)
160
+
161
+ self.embed_dim = config.hidden_size
162
+ self.num_heads = config.num_attention_heads
163
+ self.head_dim = self.embed_dim // self.num_heads
164
+ self.split_size = self.embed_dim
165
+ if self.head_dim * self.num_heads != self.embed_dim:
166
+ raise ValueError(
167
+ f"`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
168
+ f" {self.num_heads})."
169
+ )
170
+
171
+ self.scale_attn_weights = config.scale_attn_weights
172
+ self.is_cross_attention = is_cross_attention
173
+
174
+ # Layer-wise attention scaling, reordering, and upcasting
175
+ self.scale_attn_by_inverse_layer_idx = config.scale_attn_by_inverse_layer_idx
176
+ self.layer_idx = layer_idx
177
+ self.reorder_and_upcast_attn = config.reorder_and_upcast_attn
178
+
179
+ if self.is_cross_attention:
180
+ self.c_attn = Conv1D(2 * self.embed_dim, self.embed_dim)
181
+ self.q_attn = Conv1D(self.embed_dim, self.embed_dim)
182
+ else:
183
+ self.c_attn = Conv1D(3 * self.embed_dim, self.embed_dim)
184
+ self.c_proj = Conv1D(self.embed_dim, self.embed_dim)
185
+
186
+ self.attn_dropout = nn.Dropout(config.attn_pdrop)
187
+ self.resid_dropout = nn.Dropout(config.resid_pdrop)
188
+ self.is_causal = True
189
+
190
+ self.pruned_heads = set()
191
+
192
+ def prune_heads(self, heads):
193
+ if len(heads) == 0:
194
+ return
195
+ heads, index = find_pruneable_heads_and_indices(heads, self.num_heads, self.head_dim, self.pruned_heads)
196
+ index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)])
197
+
198
+ # Prune conv1d layers
199
+ self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
200
+ self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
201
+
202
+ # Update hyper params
203
+ self.split_size = (self.split_size // self.num_heads) * (self.num_heads - len(heads))
204
+ self.num_heads = self.num_heads - len(heads)
205
+ self.pruned_heads = self.pruned_heads.union(heads)
206
+
207
+ def _upcast_and_reordered_attn(self, query, key, value, attention_mask=None, head_mask=None):
208
+ # Use `torch.baddbmm` (a bit more efficient w/ alpha param for scaling -- from Megatron-LM)
209
+ bsz, num_heads, q_seq_len, dk = query.size()
210
+ _, _, k_seq_len, _ = key.size()
211
+
212
+ # Preallocate attn_weights for `baddbmm`
213
+ attn_weights = torch.empty(bsz * num_heads, q_seq_len, k_seq_len, dtype=torch.float32, device=query.device)
214
+
215
+ # Compute Scale Factor
216
+ scale_factor = 1.0
217
+ if self.scale_attn_weights:
218
+ scale_factor /= float(value.size(-1)) ** 0.5
219
+
220
+ if self.scale_attn_by_inverse_layer_idx:
221
+ scale_factor /= float(self.layer_idx + 1)
222
+
223
+ # Upcast (turn off autocast) and reorder (Scale K by 1 / root(dk))
224
+ with torch.amp.autocast(query.device.type, enabled=False):
225
+ q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(-1, dk, k_seq_len)
226
+ attn_weights = torch.baddbmm(attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor)
227
+ attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len)
228
+
229
+ if not self.is_cross_attention:
230
+ # if only "normal" attention layer implements causal mask
231
+ query_length, key_length = query.size(-2), key.size(-2)
232
+ causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length]
233
+ mask_value = torch.finfo(attn_weights.dtype).min
234
+ # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
235
+ # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
236
+ mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device)
237
+ attn_weights = torch.where(causal_mask, attn_weights, mask_value)
238
+
239
+ if attention_mask is not None:
240
+ # Apply the attention mask
241
+ attn_weights = attn_weights + attention_mask
242
+
243
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
244
+
245
+ # Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op if otherwise
246
+ if attn_weights.dtype != torch.float32:
247
+ raise RuntimeError("Error with upcasting, attn_weights does not have dtype torch.float32")
248
+ attn_weights = attn_weights.type(value.dtype)
249
+ attn_weights = self.attn_dropout(attn_weights)
250
+
251
+ # Mask heads if we want to
252
+ if head_mask is not None:
253
+ attn_weights = attn_weights * head_mask
254
+
255
+ attn_output = torch.matmul(attn_weights, value)
256
+ attn_output = attn_output.transpose(1, 2)
257
+
258
+ return attn_output, attn_weights
259
+
260
+ def forward(
261
+ self,
262
+ hidden_states: Optional[Tuple[torch.FloatTensor]],
263
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
264
+ attention_mask: Optional[torch.FloatTensor] = None,
265
+ head_mask: Optional[torch.FloatTensor] = None,
266
+ encoder_hidden_states: Optional[torch.Tensor] = None,
267
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
268
+ use_cache: Optional[bool] = False,
269
+ output_attentions: Optional[bool] = False,
270
+ **kwargs,
271
+ ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]:
272
+ if encoder_hidden_states is not None:
273
+ if not hasattr(self, "q_attn"):
274
+ raise ValueError(
275
+ "If class is used as cross attention, the weights `q_attn` have to be defined. "
276
+ "Please make sure to instantiate class with `DecisionTransformerGPT2Attention(..., is_cross_attention=True)`."
277
+ )
278
+
279
+ query_states = self.q_attn(hidden_states)
280
+ key_states, value_states = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2)
281
+ attention_mask = encoder_attention_mask
282
+ else:
283
+ query_states, key_states, value_states = self.c_attn(hidden_states).split(self.split_size, dim=2)
284
+
285
+ shape_q = (*query_states.shape[:-1], -1, self.head_dim)
286
+ shape_kv = (*key_states.shape[:-1], -1, self.head_dim)
287
+
288
+ query_states = query_states.view(shape_q).transpose(1, 2)
289
+ key_states = key_states.view(shape_kv).transpose(1, 2)
290
+ value_states = value_states.view(shape_kv).transpose(1, 2)
291
+
292
+ if layer_past is not None:
293
+ past_key, past_value = layer_past
294
+ key_states = torch.cat((past_key, key_states), dim=-2)
295
+ value_states = torch.cat((past_value, value_states), dim=-2)
296
+
297
+ if use_cache is True:
298
+ present = (key_states, value_states)
299
+ else:
300
+ present = None
301
+
302
+ is_cross_attention = encoder_hidden_states is not None
303
+ is_causal = attention_mask is None and query_states.shape[-2] > 1 and not is_cross_attention
304
+
305
+ using_eager = self.config._attn_implementation == "eager"
306
+ attention_interface: Callable = eager_attention_forward
307
+ if self.config._attn_implementation != "eager":
308
+ if self.config._attn_implementation == "sdpa" and (output_attentions or head_mask is not None):
309
+ using_eager = True
310
+ logger.warning_once(
311
+ "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to "
312
+ 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
313
+ )
314
+ else:
315
+ # Attention functions are consistent with previous equivalent attention classes, however they do not support some options
316
+ # (e.g. layer scaling, head mask) that eager supports. These implementations are thus equivalent to previous code, but
317
+ # not necessarily to eager (if mentionned options are provided).
318
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
319
+
320
+ if using_eager and self.reorder_and_upcast_attn:
321
+ attn_output, attn_weights = self._upcast_and_reordered_attn(
322
+ query_states, key_states, value_states, attention_mask, head_mask
323
+ )
324
+ else:
325
+ attn_output, attn_weights = attention_interface(
326
+ self,
327
+ query_states,
328
+ key_states,
329
+ value_states,
330
+ attention_mask,
331
+ head_mask=head_mask,
332
+ dropout=self.attn_dropout.p if self.training else 0.0,
333
+ is_causal=is_causal,
334
+ **kwargs,
335
+ )
336
+
337
+ attn_output = attn_output.reshape(*attn_output.shape[:-2], -1).contiguous()
338
+ attn_output = self.c_proj(attn_output)
339
+ attn_output = self.resid_dropout(attn_output)
340
+
341
+ outputs = (attn_output, present)
342
+ if output_attentions:
343
+ outputs += (attn_weights,)
344
+
345
+ return outputs # a, present, (attentions)
346
+
347
+
348
+ # Copied from transformers.models.gpt2.modeling_gpt2.GPT2MLP with GPT2->DecisionTransformerGPT2
349
+ class DecisionTransformerGPT2MLP(nn.Module):
350
+ def __init__(self, intermediate_size, config):
351
+ super().__init__()
352
+ embed_dim = config.hidden_size
353
+ self.c_fc = Conv1D(intermediate_size, embed_dim)
354
+ self.c_proj = Conv1D(embed_dim, intermediate_size)
355
+ self.act = ACT2FN[config.activation_function]
356
+ self.dropout = nn.Dropout(config.resid_pdrop)
357
+
358
+ def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor:
359
+ hidden_states = self.c_fc(hidden_states)
360
+ hidden_states = self.act(hidden_states)
361
+ hidden_states = self.c_proj(hidden_states)
362
+ hidden_states = self.dropout(hidden_states)
363
+ return hidden_states
364
+
365
+
366
+ # Copied from transformers.models.gpt2.modeling_gpt2.GPT2Block with GPT2->DecisionTransformerGPT2
367
+ class DecisionTransformerGPT2Block(nn.Module):
368
+ # Ignore copy
369
+ def __init__(self, config, layer_idx=None):
370
+ super().__init__()
371
+ hidden_size = config.hidden_size
372
+ inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size
373
+
374
+ self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
375
+ self.attn = DecisionTransformerGPT2Attention(config, layer_idx=layer_idx)
376
+ self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
377
+
378
+ if config.add_cross_attention:
379
+ self.crossattention = DecisionTransformerGPT2Attention(
380
+ config, is_cross_attention=True, layer_idx=layer_idx
381
+ )
382
+ self.ln_cross_attn = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
383
+
384
+ self.mlp = DecisionTransformerGPT2MLP(inner_dim, config)
385
+
386
+ def forward(
387
+ self,
388
+ hidden_states: Optional[Tuple[torch.FloatTensor]],
389
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
390
+ attention_mask: Optional[torch.FloatTensor] = None,
391
+ head_mask: Optional[torch.FloatTensor] = None,
392
+ encoder_hidden_states: Optional[torch.Tensor] = None,
393
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
394
+ use_cache: Optional[bool] = False,
395
+ output_attentions: Optional[bool] = False,
396
+ ) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]:
397
+ residual = hidden_states
398
+ hidden_states = self.ln_1(hidden_states)
399
+ attn_outputs = self.attn(
400
+ hidden_states,
401
+ layer_past=layer_past,
402
+ attention_mask=attention_mask,
403
+ head_mask=head_mask,
404
+ use_cache=use_cache,
405
+ output_attentions=output_attentions,
406
+ )
407
+ attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
408
+ outputs = attn_outputs[1:]
409
+ # residual connection
410
+ hidden_states = attn_output + residual
411
+
412
+ if encoder_hidden_states is not None:
413
+ # add one self-attention block for cross-attention
414
+ if not hasattr(self, "crossattention"):
415
+ raise ValueError(
416
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with "
417
+ "cross-attention layers by setting `config.add_cross_attention=True`"
418
+ )
419
+ residual = hidden_states
420
+ hidden_states = self.ln_cross_attn(hidden_states)
421
+ cross_attn_outputs = self.crossattention(
422
+ hidden_states,
423
+ attention_mask=attention_mask,
424
+ head_mask=head_mask,
425
+ encoder_hidden_states=encoder_hidden_states,
426
+ encoder_attention_mask=encoder_attention_mask,
427
+ output_attentions=output_attentions,
428
+ )
429
+ attn_output = cross_attn_outputs[0]
430
+ # residual connection
431
+ hidden_states = residual + attn_output
432
+ outputs = outputs + cross_attn_outputs[2:] # add cross attentions if we output attention weights
433
+
434
+ residual = hidden_states
435
+ hidden_states = self.ln_2(hidden_states)
436
+ feed_forward_hidden_states = self.mlp(hidden_states)
437
+ # residual connection
438
+ hidden_states = residual + feed_forward_hidden_states
439
+
440
+ if use_cache:
441
+ outputs = (hidden_states,) + outputs
442
+ else:
443
+ outputs = (hidden_states,) + outputs[1:]
444
+
445
+ return outputs # hidden_states, present, (attentions, cross_attentions)
446
+
447
+
448
+ class DecisionTransformerGPT2PreTrainedModel(PreTrainedModel):
449
+ """
450
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
451
+ models.
452
+ """
453
+
454
+ config_class = DecisionTransformerConfig
455
+ load_tf_weights = load_tf_weights_in_gpt2
456
+ base_model_prefix = "transformer"
457
+ is_parallelizable = True
458
+ supports_gradient_checkpointing = True
459
+
460
+ def __init__(self, *inputs, **kwargs):
461
+ super().__init__(*inputs, **kwargs)
462
+
463
+ def _init_weights(self, module):
464
+ """Initialize the weights."""
465
+ if isinstance(module, (nn.Linear, Conv1D)):
466
+ # Slightly different from the TF version which uses truncated_normal for initialization
467
+ # cf https://github.com/pytorch/pytorch/pull/5617
468
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
469
+ if module.bias is not None:
470
+ module.bias.data.zero_()
471
+ elif isinstance(module, nn.Embedding):
472
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
473
+ if module.padding_idx is not None:
474
+ module.weight.data[module.padding_idx].zero_()
475
+ elif isinstance(module, nn.LayerNorm):
476
+ module.bias.data.zero_()
477
+ module.weight.data.fill_(1.0)
478
+
479
+ # Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
480
+ # > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
481
+ # > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
482
+ # > -- GPT-2 :: https://openai.com/blog/better-language-models/
483
+ #
484
+ # Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
485
+ for name, p in module.named_parameters():
486
+ if "c_proj" in name and "weight" in name:
487
+ # Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
488
+ p.data.normal_(mean=0.0, std=(self.config.initializer_range / math.sqrt(2 * self.config.n_layer)))
489
+
490
+
491
+ class DecisionTransformerGPT2Model(DecisionTransformerGPT2PreTrainedModel):
492
+ def __init__(self, config):
493
+ super().__init__(config)
494
+
495
+ self.embed_dim = config.hidden_size
496
+
497
+ self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
498
+ self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim)
499
+
500
+ self.drop = nn.Dropout(config.embd_pdrop)
501
+ self.h = nn.ModuleList(
502
+ [DecisionTransformerGPT2Block(config, layer_idx=i) for i in range(config.num_hidden_layers)]
503
+ )
504
+ self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
505
+
506
+ # Model parallel
507
+ self.model_parallel = False
508
+ self.device_map = None
509
+ self.gradient_checkpointing = False
510
+
511
+ # Initialize weights and apply final processing
512
+ self.post_init()
513
+
514
+ def get_input_embeddings(self):
515
+ return self.wte
516
+
517
+ def set_input_embeddings(self, new_embeddings):
518
+ self.wte = new_embeddings
519
+
520
+ def forward(
521
+ self,
522
+ input_ids: Optional[torch.LongTensor] = None,
523
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
524
+ attention_mask: Optional[torch.FloatTensor] = None,
525
+ token_type_ids: Optional[torch.LongTensor] = None,
526
+ position_ids: Optional[torch.LongTensor] = None,
527
+ head_mask: Optional[torch.FloatTensor] = None,
528
+ inputs_embeds: Optional[torch.FloatTensor] = None,
529
+ encoder_hidden_states: Optional[torch.Tensor] = None,
530
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
531
+ use_cache: Optional[bool] = None,
532
+ output_attentions: Optional[bool] = None,
533
+ output_hidden_states: Optional[bool] = None,
534
+ return_dict: Optional[bool] = None,
535
+ ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
536
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
537
+ output_hidden_states = (
538
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
539
+ )
540
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
541
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
542
+
543
+ if input_ids is not None and inputs_embeds is not None:
544
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
545
+ elif input_ids is not None:
546
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
547
+ input_shape = input_ids.size()
548
+ input_ids = input_ids.view(-1, input_shape[-1])
549
+ batch_size = input_ids.shape[0]
550
+ elif inputs_embeds is not None:
551
+ input_shape = inputs_embeds.size()[:-1]
552
+ batch_size = inputs_embeds.shape[0]
553
+ else:
554
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
555
+
556
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
557
+
558
+ if token_type_ids is not None:
559
+ token_type_ids = token_type_ids.view(-1, input_shape[-1])
560
+
561
+ if past_key_values is None:
562
+ past_length = 0
563
+ past_key_values = tuple([None] * len(self.h))
564
+ else:
565
+ past_length = past_key_values[0][0].size(-2)
566
+ if position_ids is None:
567
+ position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
568
+ position_ids = position_ids.unsqueeze(0)
569
+
570
+ # Attention mask.
571
+ if attention_mask is not None:
572
+ if batch_size <= 0:
573
+ raise ValueError("batch_size has to be defined and > 0")
574
+ attention_mask = attention_mask.view(batch_size, -1)
575
+ # We create a 3D attention mask from a 2D tensor mask.
576
+ # Sizes are [batch_size, 1, 1, to_seq_length]
577
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
578
+ # this attention mask is more simple than the triangular masking of causal attention
579
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
580
+ attention_mask = attention_mask[:, None, None, :]
581
+
582
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
583
+ # masked positions, this operation will create a tensor which is 0.0 for
584
+ # positions we want to attend and the dtype's smallest value for masked positions.
585
+ # Since we are adding it to the raw scores before the softmax, this is
586
+ # effectively the same as removing these entirely.
587
+ attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
588
+ attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
589
+
590
+ # If a 2D or 3D attention mask is provided for the cross-attention
591
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
592
+ if self.config.add_cross_attention and encoder_hidden_states is not None:
593
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
594
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
595
+ if encoder_attention_mask is None:
596
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
597
+ encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask)
598
+ else:
599
+ encoder_attention_mask = None
600
+
601
+ # Prepare head mask if needed
602
+ # 1.0 in head_mask indicate we keep the head
603
+ # attention_probs has shape bsz x n_heads x N x N
604
+ # head_mask has shape n_layer x batch x n_heads x N x N
605
+ head_mask = self.get_head_mask(head_mask, self.config.n_layer)
606
+
607
+ if inputs_embeds is None:
608
+ inputs_embeds = self.wte(input_ids)
609
+ position_embeds = self.wpe(position_ids)
610
+ hidden_states = inputs_embeds + position_embeds
611
+
612
+ if token_type_ids is not None:
613
+ token_type_embeds = self.wte(token_type_ids)
614
+ hidden_states = hidden_states + token_type_embeds
615
+
616
+ hidden_states = self.drop(hidden_states)
617
+
618
+ output_shape = (-1,) + input_shape[1:] + (hidden_states.size(-1),)
619
+
620
+ if self.gradient_checkpointing and self.training:
621
+ if use_cache:
622
+ logger.warning_once(
623
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
624
+ )
625
+ use_cache = False
626
+
627
+ presents = () if use_cache else None
628
+ all_self_attentions = () if output_attentions else None
629
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
630
+ all_hidden_states = () if output_hidden_states else None
631
+ for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
632
+ # Model parallel
633
+ if self.model_parallel:
634
+ torch.cuda.set_device(hidden_states.device)
635
+ # Ensure layer_past is on same device as hidden_states (might not be correct)
636
+ if layer_past is not None:
637
+ layer_past = tuple(past_state.to(hidden_states.device) for past_state in layer_past)
638
+ # Ensure that attention_mask is always on the same device as hidden_states
639
+ if attention_mask is not None:
640
+ attention_mask = attention_mask.to(hidden_states.device)
641
+ if isinstance(head_mask, torch.Tensor):
642
+ head_mask = head_mask.to(hidden_states.device)
643
+ if output_hidden_states:
644
+ all_hidden_states = all_hidden_states + (hidden_states,)
645
+
646
+ if self.gradient_checkpointing and self.training:
647
+ outputs = self._gradient_checkpointing_func(
648
+ block.__call__,
649
+ hidden_states,
650
+ None,
651
+ attention_mask,
652
+ head_mask[i],
653
+ encoder_hidden_states,
654
+ encoder_attention_mask,
655
+ use_cache,
656
+ output_attentions,
657
+ )
658
+ else:
659
+ outputs = block(
660
+ hidden_states,
661
+ layer_past=layer_past,
662
+ attention_mask=attention_mask,
663
+ head_mask=head_mask[i],
664
+ encoder_hidden_states=encoder_hidden_states,
665
+ encoder_attention_mask=encoder_attention_mask,
666
+ use_cache=use_cache,
667
+ output_attentions=output_attentions,
668
+ )
669
+
670
+ hidden_states = outputs[0]
671
+ if use_cache is True:
672
+ presents = presents + (outputs[1],)
673
+
674
+ if output_attentions:
675
+ all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
676
+ if self.config.add_cross_attention:
677
+ all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],)
678
+
679
+ # Model Parallel: If it's the last layer for that device, put things on the next device
680
+ if self.model_parallel:
681
+ for k, v in self.device_map.items():
682
+ if i == v[-1] and "cuda:" + str(k) != self.last_device:
683
+ hidden_states = hidden_states.to("cuda:" + str(k + 1))
684
+
685
+ hidden_states = self.ln_f(hidden_states)
686
+
687
+ hidden_states = hidden_states.view(output_shape)
688
+ # Add last hidden state
689
+ if output_hidden_states:
690
+ all_hidden_states = all_hidden_states + (hidden_states,)
691
+
692
+ if not return_dict:
693
+ return tuple(
694
+ v
695
+ for v in [hidden_states, presents, all_hidden_states, all_self_attentions, all_cross_attentions]
696
+ if v is not None
697
+ )
698
+
699
+ return BaseModelOutputWithPastAndCrossAttentions(
700
+ last_hidden_state=hidden_states,
701
+ past_key_values=presents,
702
+ hidden_states=all_hidden_states,
703
+ attentions=all_self_attentions,
704
+ cross_attentions=all_cross_attentions,
705
+ )
706
+
707
+
708
+ @dataclass
709
+ class DecisionTransformerOutput(ModelOutput):
710
+ """
711
+ Base class for model's outputs that also contains a pooling of the last hidden states.
712
+
713
+ Args:
714
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
715
+ Sequence of hidden-states at the output of the last layer of the model.
716
+ state_preds (`torch.FloatTensor` of shape `(batch_size, sequence_length, state_dim)`):
717
+ Environment state predictions
718
+ action_preds (`torch.FloatTensor` of shape `(batch_size, sequence_length, action_dim)`):
719
+ Model action predictions
720
+ return_preds (`torch.FloatTensor` of shape `(batch_size, sequence_length, 1)`):
721
+ Predicted returns for each state
722
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
723
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
724
+ shape `(batch_size, sequence_length, hidden_size)`.
725
+
726
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
727
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
728
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
729
+ sequence_length)`.
730
+
731
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
732
+ heads.
733
+ """
734
+
735
+ state_preds: torch.FloatTensor = None
736
+ action_preds: torch.FloatTensor = None
737
+ return_preds: torch.FloatTensor = None
738
+ hidden_states: torch.FloatTensor = None
739
+ attentions: torch.FloatTensor = None
740
+ last_hidden_state: torch.FloatTensor = None
741
+
742
+
743
+ class DecisionTransformerPreTrainedModel(PreTrainedModel):
744
+ """
745
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
746
+ models.
747
+ """
748
+
749
+ config_class = DecisionTransformerConfig
750
+ base_model_prefix = "decision_transformer"
751
+ main_input_name = "states"
752
+ supports_gradient_checkpointing = False
753
+
754
+ def _init_weights(self, module):
755
+ """Initialize the weights"""
756
+ if isinstance(module, nn.Linear):
757
+ # Slightly different from the TF version which uses truncated_normal for initialization
758
+ # cf https://github.com/pytorch/pytorch/pull/5617
759
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
760
+ if module.bias is not None:
761
+ module.bias.data.zero_()
762
+ elif isinstance(module, nn.Embedding):
763
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
764
+ if module.padding_idx is not None:
765
+ module.weight.data[module.padding_idx].zero_()
766
+ elif isinstance(module, nn.LayerNorm):
767
+ module.bias.data.zero_()
768
+ module.weight.data.fill_(1.0)
769
+
770
+
771
+ DECISION_TRANSFORMER_START_DOCSTRING = r"""
772
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
773
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
774
+ behavior.
775
+
776
+ Parameters:
777
+ config ([`~DecisionTransformerConfig`]): Model configuration class with all the parameters of the model.
778
+ Initializing with a config file does not load the weights associated with the model, only the
779
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
780
+ """
781
+
782
+ DECISION_TRANSFORMER_INPUTS_DOCSTRING = r"""
783
+ Args:
784
+ states (`torch.FloatTensor` of shape `(batch_size, episode_length, state_dim)`):
785
+ The states for each step in the trajectory
786
+ actions (`torch.FloatTensor` of shape `(batch_size, episode_length, act_dim)`):
787
+ The actions taken by the "expert" policy for the current state, these are masked for auto regressive
788
+ prediction
789
+ rewards (`torch.FloatTensor` of shape `(batch_size, episode_length, 1)`):
790
+ The rewards for each state, action
791
+ returns_to_go (`torch.FloatTensor` of shape `(batch_size, episode_length, 1)`):
792
+ The returns for each state in the trajectory
793
+ timesteps (`torch.LongTensor` of shape `(batch_size, episode_length)`):
794
+ The timestep for each step in the trajectory
795
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, episode_length)`):
796
+ Masking, used to mask the actions when performing autoregressive prediction
797
+ """
798
+
799
+
800
+ @add_start_docstrings("The Decision Transformer Model", DECISION_TRANSFORMER_START_DOCSTRING)
801
+ class DecisionTransformerModel(DecisionTransformerPreTrainedModel):
802
+ """
803
+
804
+ The model builds upon the GPT2 architecture to perform autoregressive prediction of actions in an offline RL
805
+ setting. Refer to the paper for more details: https://arxiv.org/abs/2106.01345
806
+
807
+ """
808
+
809
+ def __init__(self, config):
810
+ super().__init__(config)
811
+ self.config = config
812
+ self.hidden_size = config.hidden_size
813
+ # note: the only difference between this GPT2Model and the default Huggingface version
814
+ # is that the positional embeddings are removed (since we'll add those ourselves)
815
+ self.encoder = DecisionTransformerGPT2Model(config)
816
+
817
+ self.embed_timestep = nn.Embedding(config.max_ep_len, config.hidden_size)
818
+ self.embed_return = torch.nn.Linear(1, config.hidden_size)
819
+ self.embed_state = torch.nn.Linear(config.state_dim, config.hidden_size)
820
+ self.embed_action = torch.nn.Linear(config.act_dim, config.hidden_size)
821
+
822
+ self.embed_ln = nn.LayerNorm(config.hidden_size)
823
+
824
+ # note: we don't predict states or returns for the paper
825
+ self.predict_state = torch.nn.Linear(config.hidden_size, config.state_dim)
826
+ self.predict_action = nn.Sequential(
827
+ *([nn.Linear(config.hidden_size, config.act_dim)] + ([nn.Tanh()] if config.action_tanh else []))
828
+ )
829
+ self.predict_return = torch.nn.Linear(config.hidden_size, 1)
830
+
831
+ # Initialize weights and apply final processing
832
+ self.post_init()
833
+
834
+ @add_start_docstrings_to_model_forward(DECISION_TRANSFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
835
+ @replace_return_docstrings(output_type=DecisionTransformerOutput, config_class=_CONFIG_FOR_DOC)
836
+ def forward(
837
+ self,
838
+ states: Optional[torch.FloatTensor] = None,
839
+ actions: Optional[torch.FloatTensor] = None,
840
+ rewards: Optional[torch.FloatTensor] = None,
841
+ returns_to_go: Optional[torch.FloatTensor] = None,
842
+ timesteps: Optional[torch.LongTensor] = None,
843
+ attention_mask: Optional[torch.FloatTensor] = None,
844
+ output_hidden_states: Optional[bool] = None,
845
+ output_attentions: Optional[bool] = None,
846
+ return_dict: Optional[bool] = None,
847
+ ) -> Union[Tuple[torch.FloatTensor], DecisionTransformerOutput]:
848
+ r"""
849
+ Returns:
850
+
851
+ Examples:
852
+
853
+ ```python
854
+ >>> from transformers import DecisionTransformerModel
855
+ >>> import torch
856
+
857
+ >>> model = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-medium")
858
+ >>> # evaluation
859
+ >>> model = model.to(device)
860
+ >>> model.eval()
861
+
862
+ >>> env = gym.make("Hopper-v3")
863
+ >>> state_dim = env.observation_space.shape[0]
864
+ >>> act_dim = env.action_space.shape[0]
865
+
866
+ >>> state = env.reset()
867
+ >>> states = torch.from_numpy(state).reshape(1, 1, state_dim).to(device=device, dtype=torch.float32)
868
+ >>> actions = torch.zeros((1, 1, act_dim), device=device, dtype=torch.float32)
869
+ >>> rewards = torch.zeros(1, 1, device=device, dtype=torch.float32)
870
+ >>> target_return = torch.tensor(TARGET_RETURN, dtype=torch.float32).reshape(1, 1)
871
+ >>> timesteps = torch.tensor(0, device=device, dtype=torch.long).reshape(1, 1)
872
+ >>> attention_mask = torch.zeros(1, 1, device=device, dtype=torch.float32)
873
+
874
+ >>> # forward pass
875
+ >>> with torch.no_grad():
876
+ ... state_preds, action_preds, return_preds = model(
877
+ ... states=states,
878
+ ... actions=actions,
879
+ ... rewards=rewards,
880
+ ... returns_to_go=target_return,
881
+ ... timesteps=timesteps,
882
+ ... attention_mask=attention_mask,
883
+ ... return_dict=False,
884
+ ... )
885
+ ```"""
886
+
887
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
888
+ output_hidden_states = (
889
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
890
+ )
891
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
892
+
893
+ batch_size, seq_length = states.shape[0], states.shape[1]
894
+
895
+ if attention_mask is None:
896
+ # attention mask for GPT: 1 if can be attended to, 0 if not
897
+ attention_mask = torch.ones((batch_size, seq_length), dtype=torch.long)
898
+
899
+ # embed each modality with a different head
900
+ state_embeddings = self.embed_state(states)
901
+ action_embeddings = self.embed_action(actions)
902
+ returns_embeddings = self.embed_return(returns_to_go)
903
+ time_embeddings = self.embed_timestep(timesteps)
904
+
905
+ # time embeddings are treated similar to positional embeddings
906
+ state_embeddings = state_embeddings + time_embeddings
907
+ action_embeddings = action_embeddings + time_embeddings
908
+ returns_embeddings = returns_embeddings + time_embeddings
909
+
910
+ # this makes the sequence look like (R_1, s_1, a_1, R_2, s_2, a_2, ...)
911
+ # which works nice in an autoregressive sense since states predict actions
912
+ stacked_inputs = (
913
+ torch.stack((returns_embeddings, state_embeddings, action_embeddings), dim=1)
914
+ .permute(0, 2, 1, 3)
915
+ .reshape(batch_size, 3 * seq_length, self.hidden_size)
916
+ )
917
+ stacked_inputs = self.embed_ln(stacked_inputs)
918
+
919
+ # to make the attention mask fit the stacked inputs, have to stack it as well
920
+ stacked_attention_mask = (
921
+ torch.stack((attention_mask, attention_mask, attention_mask), dim=1)
922
+ .permute(0, 2, 1)
923
+ .reshape(batch_size, 3 * seq_length)
924
+ )
925
+ device = stacked_inputs.device
926
+ # we feed in the input embeddings (not word indices as in NLP) to the model
927
+ encoder_outputs = self.encoder(
928
+ inputs_embeds=stacked_inputs,
929
+ attention_mask=stacked_attention_mask,
930
+ position_ids=torch.zeros(stacked_attention_mask.shape, device=device, dtype=torch.long),
931
+ output_attentions=output_attentions,
932
+ output_hidden_states=output_hidden_states,
933
+ return_dict=return_dict,
934
+ )
935
+ x = encoder_outputs[0]
936
+
937
+ # reshape x so that the second dimension corresponds to the original
938
+ # returns (0), states (1), or actions (2); i.e. x[:,1,t] is the token for s_t
939
+ x = x.reshape(batch_size, seq_length, 3, self.hidden_size).permute(0, 2, 1, 3)
940
+
941
+ # get predictions
942
+ return_preds = self.predict_return(x[:, 2]) # predict next return given state and action
943
+ state_preds = self.predict_state(x[:, 2]) # predict next state given state and action
944
+ action_preds = self.predict_action(x[:, 1]) # predict next action given state
945
+ if not return_dict:
946
+ return (state_preds, action_preds, return_preds)
947
+
948
+ return DecisionTransformerOutput(
949
+ last_hidden_state=encoder_outputs.last_hidden_state,
950
+ state_preds=state_preds,
951
+ action_preds=action_preds,
952
+ return_preds=return_preds,
953
+ hidden_states=encoder_outputs.hidden_states,
954
+ attentions=encoder_outputs.attentions,
955
+ )
956
+
957
+
958
+ __all__ = [
959
+ "DecisionTransformerGPT2Model",
960
+ "DecisionTransformerGPT2PreTrainedModel",
961
+ "DecisionTransformerModel",
962
+ "DecisionTransformerPreTrainedModel",
963
+ ]
vlmpy310/lib/python3.10/site-packages/transformers/models/ernie/__init__.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import _LazyModule
17
+ from ...utils.import_utils import define_import_structure
18
+
19
+
20
+ if TYPE_CHECKING:
21
+ from .configuration_ernie import *
22
+ from .modeling_ernie import *
23
+ else:
24
+ import sys
25
+
26
+ _file = globals()["__file__"]
27
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
vlmpy310/lib/python3.10/site-packages/transformers/models/ernie/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (537 Bytes). View file
 
vlmpy310/lib/python3.10/site-packages/transformers/models/ernie/__pycache__/configuration_ernie.cpython-310.pyc ADDED
Binary file (6.8 kB). View file
 
vlmpy310/lib/python3.10/site-packages/transformers/models/ernie/__pycache__/modeling_ernie.cpython-310.pyc ADDED
Binary file (52.9 kB). View file
 
vlmpy310/lib/python3.10/site-packages/transformers/models/ernie/configuration_ernie.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ERNIE model configuration"""
17
+
18
+ from collections import OrderedDict
19
+ from typing import Mapping
20
+
21
+ from ...configuration_utils import PretrainedConfig
22
+ from ...onnx import OnnxConfig
23
+ from ...utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+
29
+ class ErnieConfig(PretrainedConfig):
30
+ r"""
31
+ This is the configuration class to store the configuration of a [`ErnieModel`] or a [`TFErnieModel`]. It is used to
32
+ instantiate a ERNIE model according to the specified arguments, defining the model architecture. Instantiating a
33
+ configuration with the defaults will yield a similar configuration to that of the ERNIE
34
+ [nghuyong/ernie-3.0-base-zh](https://huggingface.co/nghuyong/ernie-3.0-base-zh) architecture.
35
+
36
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
37
+ documentation from [`PretrainedConfig`] for more information.
38
+
39
+
40
+ Args:
41
+ vocab_size (`int`, *optional*, defaults to 30522):
42
+ Vocabulary size of the ERNIE model. Defines the number of different tokens that can be represented by the
43
+ `inputs_ids` passed when calling [`ErnieModel`] or [`TFErnieModel`].
44
+ hidden_size (`int`, *optional*, defaults to 768):
45
+ Dimensionality of the encoder layers and the pooler layer.
46
+ num_hidden_layers (`int`, *optional*, defaults to 12):
47
+ Number of hidden layers in the Transformer encoder.
48
+ num_attention_heads (`int`, *optional*, defaults to 12):
49
+ Number of attention heads for each attention layer in the Transformer encoder.
50
+ intermediate_size (`int`, *optional*, defaults to 3072):
51
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
52
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
53
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
54
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
55
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
56
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
57
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
58
+ The dropout ratio for the attention probabilities.
59
+ max_position_embeddings (`int`, *optional*, defaults to 512):
60
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
61
+ just in case (e.g., 512 or 1024 or 2048).
62
+ type_vocab_size (`int`, *optional*, defaults to 2):
63
+ The vocabulary size of the `token_type_ids` passed when calling [`ErnieModel`] or [`TFErnieModel`].
64
+ task_type_vocab_size (`int`, *optional*, defaults to 3):
65
+ The vocabulary size of the `task_type_ids` for ERNIE2.0/ERNIE3.0 model
66
+ use_task_id (`bool`, *optional*, defaults to `False`):
67
+ Whether or not the model support `task_type_ids`
68
+ initializer_range (`float`, *optional*, defaults to 0.02):
69
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
70
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
71
+ The epsilon used by the layer normalization layers.
72
+ pad_token_id (`int`, *optional*, defaults to 0):
73
+ Padding token id.
74
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
75
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
76
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
77
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
78
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
79
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
80
+ use_cache (`bool`, *optional*, defaults to `True`):
81
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
82
+ relevant if `config.is_decoder=True`.
83
+ classifier_dropout (`float`, *optional*):
84
+ The dropout ratio for the classification head.
85
+
86
+ Examples:
87
+
88
+ ```python
89
+ >>> from transformers import ErnieConfig, ErnieModel
90
+
91
+ >>> # Initializing a ERNIE nghuyong/ernie-3.0-base-zh style configuration
92
+ >>> configuration = ErnieConfig()
93
+
94
+ >>> # Initializing a model (with random weights) from the nghuyong/ernie-3.0-base-zh style configuration
95
+ >>> model = ErnieModel(configuration)
96
+
97
+ >>> # Accessing the model configuration
98
+ >>> configuration = model.config
99
+ ```"""
100
+
101
+ model_type = "ernie"
102
+
103
+ def __init__(
104
+ self,
105
+ vocab_size=30522,
106
+ hidden_size=768,
107
+ num_hidden_layers=12,
108
+ num_attention_heads=12,
109
+ intermediate_size=3072,
110
+ hidden_act="gelu",
111
+ hidden_dropout_prob=0.1,
112
+ attention_probs_dropout_prob=0.1,
113
+ max_position_embeddings=512,
114
+ type_vocab_size=2,
115
+ task_type_vocab_size=3,
116
+ use_task_id=False,
117
+ initializer_range=0.02,
118
+ layer_norm_eps=1e-12,
119
+ pad_token_id=0,
120
+ position_embedding_type="absolute",
121
+ use_cache=True,
122
+ classifier_dropout=None,
123
+ **kwargs,
124
+ ):
125
+ super().__init__(pad_token_id=pad_token_id, **kwargs)
126
+
127
+ self.vocab_size = vocab_size
128
+ self.hidden_size = hidden_size
129
+ self.num_hidden_layers = num_hidden_layers
130
+ self.num_attention_heads = num_attention_heads
131
+ self.hidden_act = hidden_act
132
+ self.intermediate_size = intermediate_size
133
+ self.hidden_dropout_prob = hidden_dropout_prob
134
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
135
+ self.max_position_embeddings = max_position_embeddings
136
+ self.type_vocab_size = type_vocab_size
137
+ self.task_type_vocab_size = task_type_vocab_size
138
+ self.use_task_id = use_task_id
139
+ self.initializer_range = initializer_range
140
+ self.layer_norm_eps = layer_norm_eps
141
+ self.position_embedding_type = position_embedding_type
142
+ self.use_cache = use_cache
143
+ self.classifier_dropout = classifier_dropout
144
+
145
+
146
+ class ErnieOnnxConfig(OnnxConfig):
147
+ @property
148
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
149
+ if self.task == "multiple-choice":
150
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
151
+ else:
152
+ dynamic_axis = {0: "batch", 1: "sequence"}
153
+ return OrderedDict(
154
+ [
155
+ ("input_ids", dynamic_axis),
156
+ ("attention_mask", dynamic_axis),
157
+ ("token_type_ids", dynamic_axis),
158
+ ("task_type_ids", dynamic_axis),
159
+ ]
160
+ )
161
+
162
+
163
+ __all__ = ["ErnieConfig", "ErnieOnnxConfig"]
vlmpy310/lib/python3.10/site-packages/transformers/models/ernie/modeling_ernie.py ADDED
@@ -0,0 +1,1815 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """PyTorch ERNIE model."""
16
+
17
+ import math
18
+ import warnings
19
+ from dataclasses import dataclass
20
+ from typing import List, Optional, Tuple, Union
21
+
22
+ import torch
23
+ import torch.utils.checkpoint
24
+ from torch import nn
25
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
26
+
27
+ from ...activations import ACT2FN
28
+ from ...generation import GenerationMixin
29
+ from ...modeling_outputs import (
30
+ BaseModelOutputWithPastAndCrossAttentions,
31
+ BaseModelOutputWithPoolingAndCrossAttentions,
32
+ CausalLMOutputWithCrossAttentions,
33
+ MaskedLMOutput,
34
+ MultipleChoiceModelOutput,
35
+ NextSentencePredictorOutput,
36
+ QuestionAnsweringModelOutput,
37
+ SequenceClassifierOutput,
38
+ TokenClassifierOutput,
39
+ )
40
+ from ...modeling_utils import PreTrainedModel
41
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
42
+ from ...utils import (
43
+ ModelOutput,
44
+ add_code_sample_docstrings,
45
+ add_start_docstrings,
46
+ add_start_docstrings_to_model_forward,
47
+ logging,
48
+ replace_return_docstrings,
49
+ )
50
+ from .configuration_ernie import ErnieConfig
51
+
52
+
53
+ logger = logging.get_logger(__name__)
54
+
55
+ _CHECKPOINT_FOR_DOC = "nghuyong/ernie-1.0-base-zh"
56
+ _CONFIG_FOR_DOC = "ErnieConfig"
57
+
58
+
59
+ class ErnieEmbeddings(nn.Module):
60
+ """Construct the embeddings from word, position and token_type embeddings."""
61
+
62
+ def __init__(self, config):
63
+ super().__init__()
64
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
65
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
66
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
67
+ self.use_task_id = config.use_task_id
68
+ if config.use_task_id:
69
+ self.task_type_embeddings = nn.Embedding(config.task_type_vocab_size, config.hidden_size)
70
+
71
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
72
+ # any TensorFlow checkpoint file
73
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
74
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
75
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
76
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
77
+ self.register_buffer(
78
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
79
+ )
80
+ self.register_buffer(
81
+ "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
82
+ )
83
+
84
+ def forward(
85
+ self,
86
+ input_ids: Optional[torch.LongTensor] = None,
87
+ token_type_ids: Optional[torch.LongTensor] = None,
88
+ task_type_ids: Optional[torch.LongTensor] = None,
89
+ position_ids: Optional[torch.LongTensor] = None,
90
+ inputs_embeds: Optional[torch.FloatTensor] = None,
91
+ past_key_values_length: int = 0,
92
+ ) -> torch.Tensor:
93
+ if input_ids is not None:
94
+ input_shape = input_ids.size()
95
+ else:
96
+ input_shape = inputs_embeds.size()[:-1]
97
+
98
+ seq_length = input_shape[1]
99
+
100
+ if position_ids is None:
101
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
102
+
103
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
104
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
105
+ # issue #5664
106
+ if token_type_ids is None:
107
+ if hasattr(self, "token_type_ids"):
108
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
109
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
110
+ token_type_ids = buffered_token_type_ids_expanded
111
+ else:
112
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
113
+
114
+ if inputs_embeds is None:
115
+ inputs_embeds = self.word_embeddings(input_ids)
116
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
117
+
118
+ embeddings = inputs_embeds + token_type_embeddings
119
+ if self.position_embedding_type == "absolute":
120
+ position_embeddings = self.position_embeddings(position_ids)
121
+ embeddings += position_embeddings
122
+
123
+ # add `task_type_id` for ERNIE model
124
+ if self.use_task_id:
125
+ if task_type_ids is None:
126
+ task_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
127
+ task_type_embeddings = self.task_type_embeddings(task_type_ids)
128
+ embeddings += task_type_embeddings
129
+
130
+ embeddings = self.LayerNorm(embeddings)
131
+ embeddings = self.dropout(embeddings)
132
+ return embeddings
133
+
134
+
135
+ # Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->Ernie
136
+ class ErnieSelfAttention(nn.Module):
137
+ def __init__(self, config, position_embedding_type=None):
138
+ super().__init__()
139
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
140
+ raise ValueError(
141
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
142
+ f"heads ({config.num_attention_heads})"
143
+ )
144
+
145
+ self.num_attention_heads = config.num_attention_heads
146
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
147
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
148
+
149
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
150
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
151
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
152
+
153
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
154
+ self.position_embedding_type = position_embedding_type or getattr(
155
+ config, "position_embedding_type", "absolute"
156
+ )
157
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
158
+ self.max_position_embeddings = config.max_position_embeddings
159
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
160
+
161
+ self.is_decoder = config.is_decoder
162
+
163
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
164
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
165
+ x = x.view(new_x_shape)
166
+ return x.permute(0, 2, 1, 3)
167
+
168
+ def forward(
169
+ self,
170
+ hidden_states: torch.Tensor,
171
+ attention_mask: Optional[torch.FloatTensor] = None,
172
+ head_mask: Optional[torch.FloatTensor] = None,
173
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
174
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
175
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
176
+ output_attentions: Optional[bool] = False,
177
+ ) -> Tuple[torch.Tensor]:
178
+ mixed_query_layer = self.query(hidden_states)
179
+
180
+ # If this is instantiated as a cross-attention module, the keys
181
+ # and values come from an encoder; the attention mask needs to be
182
+ # such that the encoder's padding tokens are not attended to.
183
+ is_cross_attention = encoder_hidden_states is not None
184
+
185
+ if is_cross_attention and past_key_value is not None:
186
+ # reuse k,v, cross_attentions
187
+ key_layer = past_key_value[0]
188
+ value_layer = past_key_value[1]
189
+ attention_mask = encoder_attention_mask
190
+ elif is_cross_attention:
191
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
192
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
193
+ attention_mask = encoder_attention_mask
194
+ elif past_key_value is not None:
195
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
196
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
197
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
198
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
199
+ else:
200
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
201
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
202
+
203
+ query_layer = self.transpose_for_scores(mixed_query_layer)
204
+
205
+ use_cache = past_key_value is not None
206
+ if self.is_decoder:
207
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
208
+ # Further calls to cross_attention layer can then reuse all cross-attention
209
+ # key/value_states (first "if" case)
210
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
211
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
212
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
213
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
214
+ past_key_value = (key_layer, value_layer)
215
+
216
+ # Take the dot product between "query" and "key" to get the raw attention scores.
217
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
218
+
219
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
220
+ query_length, key_length = query_layer.shape[2], key_layer.shape[2]
221
+ if use_cache:
222
+ position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
223
+ -1, 1
224
+ )
225
+ else:
226
+ position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
227
+ position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
228
+ distance = position_ids_l - position_ids_r
229
+
230
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
231
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
232
+
233
+ if self.position_embedding_type == "relative_key":
234
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
235
+ attention_scores = attention_scores + relative_position_scores
236
+ elif self.position_embedding_type == "relative_key_query":
237
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
238
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
239
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
240
+
241
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
242
+ if attention_mask is not None:
243
+ # Apply the attention mask is (precomputed for all layers in ErnieModel forward() function)
244
+ attention_scores = attention_scores + attention_mask
245
+
246
+ # Normalize the attention scores to probabilities.
247
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
248
+
249
+ # This is actually dropping out entire tokens to attend to, which might
250
+ # seem a bit unusual, but is taken from the original Transformer paper.
251
+ attention_probs = self.dropout(attention_probs)
252
+
253
+ # Mask heads if we want to
254
+ if head_mask is not None:
255
+ attention_probs = attention_probs * head_mask
256
+
257
+ context_layer = torch.matmul(attention_probs, value_layer)
258
+
259
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
260
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
261
+ context_layer = context_layer.view(new_context_layer_shape)
262
+
263
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
264
+
265
+ if self.is_decoder:
266
+ outputs = outputs + (past_key_value,)
267
+ return outputs
268
+
269
+
270
+ # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->Ernie
271
+ class ErnieSelfOutput(nn.Module):
272
+ def __init__(self, config):
273
+ super().__init__()
274
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
275
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
276
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
277
+
278
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
279
+ hidden_states = self.dense(hidden_states)
280
+ hidden_states = self.dropout(hidden_states)
281
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
282
+ return hidden_states
283
+
284
+
285
+ ERNIE_SELF_ATTENTION_CLASSES = {
286
+ "eager": ErnieSelfAttention,
287
+ }
288
+
289
+
290
+ # Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Ernie,BERT->ERNIE
291
+ class ErnieAttention(nn.Module):
292
+ def __init__(self, config, position_embedding_type=None):
293
+ super().__init__()
294
+ self.self = ERNIE_SELF_ATTENTION_CLASSES[config._attn_implementation](
295
+ config, position_embedding_type=position_embedding_type
296
+ )
297
+ self.output = ErnieSelfOutput(config)
298
+ self.pruned_heads = set()
299
+
300
+ def prune_heads(self, heads):
301
+ if len(heads) == 0:
302
+ return
303
+ heads, index = find_pruneable_heads_and_indices(
304
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
305
+ )
306
+
307
+ # Prune linear layers
308
+ self.self.query = prune_linear_layer(self.self.query, index)
309
+ self.self.key = prune_linear_layer(self.self.key, index)
310
+ self.self.value = prune_linear_layer(self.self.value, index)
311
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
312
+
313
+ # Update hyper params and store pruned heads
314
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
315
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
316
+ self.pruned_heads = self.pruned_heads.union(heads)
317
+
318
+ def forward(
319
+ self,
320
+ hidden_states: torch.Tensor,
321
+ attention_mask: Optional[torch.FloatTensor] = None,
322
+ head_mask: Optional[torch.FloatTensor] = None,
323
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
324
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
325
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
326
+ output_attentions: Optional[bool] = False,
327
+ ) -> Tuple[torch.Tensor]:
328
+ self_outputs = self.self(
329
+ hidden_states,
330
+ attention_mask,
331
+ head_mask,
332
+ encoder_hidden_states,
333
+ encoder_attention_mask,
334
+ past_key_value,
335
+ output_attentions,
336
+ )
337
+ attention_output = self.output(self_outputs[0], hidden_states)
338
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
339
+ return outputs
340
+
341
+
342
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Ernie
343
+ class ErnieIntermediate(nn.Module):
344
+ def __init__(self, config):
345
+ super().__init__()
346
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
347
+ if isinstance(config.hidden_act, str):
348
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
349
+ else:
350
+ self.intermediate_act_fn = config.hidden_act
351
+
352
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
353
+ hidden_states = self.dense(hidden_states)
354
+ hidden_states = self.intermediate_act_fn(hidden_states)
355
+ return hidden_states
356
+
357
+
358
+ # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->Ernie
359
+ class ErnieOutput(nn.Module):
360
+ def __init__(self, config):
361
+ super().__init__()
362
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
363
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
364
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
365
+
366
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
367
+ hidden_states = self.dense(hidden_states)
368
+ hidden_states = self.dropout(hidden_states)
369
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
370
+ return hidden_states
371
+
372
+
373
+ # Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Ernie
374
+ class ErnieLayer(nn.Module):
375
+ def __init__(self, config):
376
+ super().__init__()
377
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
378
+ self.seq_len_dim = 1
379
+ self.attention = ErnieAttention(config)
380
+ self.is_decoder = config.is_decoder
381
+ self.add_cross_attention = config.add_cross_attention
382
+ if self.add_cross_attention:
383
+ if not self.is_decoder:
384
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
385
+ self.crossattention = ErnieAttention(config, position_embedding_type="absolute")
386
+ self.intermediate = ErnieIntermediate(config)
387
+ self.output = ErnieOutput(config)
388
+
389
+ def forward(
390
+ self,
391
+ hidden_states: torch.Tensor,
392
+ attention_mask: Optional[torch.FloatTensor] = None,
393
+ head_mask: Optional[torch.FloatTensor] = None,
394
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
395
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
396
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
397
+ output_attentions: Optional[bool] = False,
398
+ ) -> Tuple[torch.Tensor]:
399
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
400
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
401
+ self_attention_outputs = self.attention(
402
+ hidden_states,
403
+ attention_mask,
404
+ head_mask,
405
+ output_attentions=output_attentions,
406
+ past_key_value=self_attn_past_key_value,
407
+ )
408
+ attention_output = self_attention_outputs[0]
409
+
410
+ # if decoder, the last output is tuple of self-attn cache
411
+ if self.is_decoder:
412
+ outputs = self_attention_outputs[1:-1]
413
+ present_key_value = self_attention_outputs[-1]
414
+ else:
415
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
416
+
417
+ cross_attn_present_key_value = None
418
+ if self.is_decoder and encoder_hidden_states is not None:
419
+ if not hasattr(self, "crossattention"):
420
+ raise ValueError(
421
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
422
+ " by setting `config.add_cross_attention=True`"
423
+ )
424
+
425
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
426
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
427
+ cross_attention_outputs = self.crossattention(
428
+ attention_output,
429
+ attention_mask,
430
+ head_mask,
431
+ encoder_hidden_states,
432
+ encoder_attention_mask,
433
+ cross_attn_past_key_value,
434
+ output_attentions,
435
+ )
436
+ attention_output = cross_attention_outputs[0]
437
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
438
+
439
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
440
+ cross_attn_present_key_value = cross_attention_outputs[-1]
441
+ present_key_value = present_key_value + cross_attn_present_key_value
442
+
443
+ layer_output = apply_chunking_to_forward(
444
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
445
+ )
446
+ outputs = (layer_output,) + outputs
447
+
448
+ # if decoder, return the attn key/values as the last output
449
+ if self.is_decoder:
450
+ outputs = outputs + (present_key_value,)
451
+
452
+ return outputs
453
+
454
+ def feed_forward_chunk(self, attention_output):
455
+ intermediate_output = self.intermediate(attention_output)
456
+ layer_output = self.output(intermediate_output, attention_output)
457
+ return layer_output
458
+
459
+
460
+ # Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Ernie
461
+ class ErnieEncoder(nn.Module):
462
+ def __init__(self, config):
463
+ super().__init__()
464
+ self.config = config
465
+ self.layer = nn.ModuleList([ErnieLayer(config) for _ in range(config.num_hidden_layers)])
466
+ self.gradient_checkpointing = False
467
+
468
+ def forward(
469
+ self,
470
+ hidden_states: torch.Tensor,
471
+ attention_mask: Optional[torch.FloatTensor] = None,
472
+ head_mask: Optional[torch.FloatTensor] = None,
473
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
474
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
475
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
476
+ use_cache: Optional[bool] = None,
477
+ output_attentions: Optional[bool] = False,
478
+ output_hidden_states: Optional[bool] = False,
479
+ return_dict: Optional[bool] = True,
480
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
481
+ all_hidden_states = () if output_hidden_states else None
482
+ all_self_attentions = () if output_attentions else None
483
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
484
+
485
+ if self.gradient_checkpointing and self.training:
486
+ if use_cache:
487
+ logger.warning_once(
488
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
489
+ )
490
+ use_cache = False
491
+
492
+ next_decoder_cache = () if use_cache else None
493
+ for i, layer_module in enumerate(self.layer):
494
+ if output_hidden_states:
495
+ all_hidden_states = all_hidden_states + (hidden_states,)
496
+
497
+ layer_head_mask = head_mask[i] if head_mask is not None else None
498
+ past_key_value = past_key_values[i] if past_key_values is not None else None
499
+
500
+ if self.gradient_checkpointing and self.training:
501
+ layer_outputs = self._gradient_checkpointing_func(
502
+ layer_module.__call__,
503
+ hidden_states,
504
+ attention_mask,
505
+ layer_head_mask,
506
+ encoder_hidden_states,
507
+ encoder_attention_mask,
508
+ past_key_value,
509
+ output_attentions,
510
+ )
511
+ else:
512
+ layer_outputs = layer_module(
513
+ hidden_states,
514
+ attention_mask,
515
+ layer_head_mask,
516
+ encoder_hidden_states,
517
+ encoder_attention_mask,
518
+ past_key_value,
519
+ output_attentions,
520
+ )
521
+
522
+ hidden_states = layer_outputs[0]
523
+ if use_cache:
524
+ next_decoder_cache += (layer_outputs[-1],)
525
+ if output_attentions:
526
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
527
+ if self.config.add_cross_attention:
528
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
529
+
530
+ if output_hidden_states:
531
+ all_hidden_states = all_hidden_states + (hidden_states,)
532
+
533
+ if not return_dict:
534
+ return tuple(
535
+ v
536
+ for v in [
537
+ hidden_states,
538
+ next_decoder_cache,
539
+ all_hidden_states,
540
+ all_self_attentions,
541
+ all_cross_attentions,
542
+ ]
543
+ if v is not None
544
+ )
545
+ return BaseModelOutputWithPastAndCrossAttentions(
546
+ last_hidden_state=hidden_states,
547
+ past_key_values=next_decoder_cache,
548
+ hidden_states=all_hidden_states,
549
+ attentions=all_self_attentions,
550
+ cross_attentions=all_cross_attentions,
551
+ )
552
+
553
+
554
+ # Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->Ernie
555
+ class ErniePooler(nn.Module):
556
+ def __init__(self, config):
557
+ super().__init__()
558
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
559
+ self.activation = nn.Tanh()
560
+
561
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
562
+ # We "pool" the model by simply taking the hidden state corresponding
563
+ # to the first token.
564
+ first_token_tensor = hidden_states[:, 0]
565
+ pooled_output = self.dense(first_token_tensor)
566
+ pooled_output = self.activation(pooled_output)
567
+ return pooled_output
568
+
569
+
570
+ # Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->Ernie
571
+ class ErniePredictionHeadTransform(nn.Module):
572
+ def __init__(self, config):
573
+ super().__init__()
574
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
575
+ if isinstance(config.hidden_act, str):
576
+ self.transform_act_fn = ACT2FN[config.hidden_act]
577
+ else:
578
+ self.transform_act_fn = config.hidden_act
579
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
580
+
581
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
582
+ hidden_states = self.dense(hidden_states)
583
+ hidden_states = self.transform_act_fn(hidden_states)
584
+ hidden_states = self.LayerNorm(hidden_states)
585
+ return hidden_states
586
+
587
+
588
+ # Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->Ernie
589
+ class ErnieLMPredictionHead(nn.Module):
590
+ def __init__(self, config):
591
+ super().__init__()
592
+ self.transform = ErniePredictionHeadTransform(config)
593
+
594
+ # The output weights are the same as the input embeddings, but there is
595
+ # an output-only bias for each token.
596
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
597
+
598
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
599
+
600
+ # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
601
+ self.decoder.bias = self.bias
602
+
603
+ def _tie_weights(self):
604
+ self.decoder.bias = self.bias
605
+
606
+ def forward(self, hidden_states):
607
+ hidden_states = self.transform(hidden_states)
608
+ hidden_states = self.decoder(hidden_states)
609
+ return hidden_states
610
+
611
+
612
+ # Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->Ernie
613
+ class ErnieOnlyMLMHead(nn.Module):
614
+ def __init__(self, config):
615
+ super().__init__()
616
+ self.predictions = ErnieLMPredictionHead(config)
617
+
618
+ def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
619
+ prediction_scores = self.predictions(sequence_output)
620
+ return prediction_scores
621
+
622
+
623
+ # Copied from transformers.models.bert.modeling_bert.BertOnlyNSPHead with Bert->Ernie
624
+ class ErnieOnlyNSPHead(nn.Module):
625
+ def __init__(self, config):
626
+ super().__init__()
627
+ self.seq_relationship = nn.Linear(config.hidden_size, 2)
628
+
629
+ def forward(self, pooled_output):
630
+ seq_relationship_score = self.seq_relationship(pooled_output)
631
+ return seq_relationship_score
632
+
633
+
634
+ # Copied from transformers.models.bert.modeling_bert.BertPreTrainingHeads with Bert->Ernie
635
+ class ErniePreTrainingHeads(nn.Module):
636
+ def __init__(self, config):
637
+ super().__init__()
638
+ self.predictions = ErnieLMPredictionHead(config)
639
+ self.seq_relationship = nn.Linear(config.hidden_size, 2)
640
+
641
+ def forward(self, sequence_output, pooled_output):
642
+ prediction_scores = self.predictions(sequence_output)
643
+ seq_relationship_score = self.seq_relationship(pooled_output)
644
+ return prediction_scores, seq_relationship_score
645
+
646
+
647
+ class ErniePreTrainedModel(PreTrainedModel):
648
+ """
649
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
650
+ models.
651
+ """
652
+
653
+ config_class = ErnieConfig
654
+ base_model_prefix = "ernie"
655
+ supports_gradient_checkpointing = True
656
+
657
+ def _init_weights(self, module):
658
+ """Initialize the weights"""
659
+ if isinstance(module, nn.Linear):
660
+ # Slightly different from the TF version which uses truncated_normal for initialization
661
+ # cf https://github.com/pytorch/pytorch/pull/5617
662
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
663
+ if module.bias is not None:
664
+ module.bias.data.zero_()
665
+ elif isinstance(module, nn.Embedding):
666
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
667
+ if module.padding_idx is not None:
668
+ module.weight.data[module.padding_idx].zero_()
669
+ elif isinstance(module, nn.LayerNorm):
670
+ module.bias.data.zero_()
671
+ module.weight.data.fill_(1.0)
672
+
673
+
674
+ @dataclass
675
+ # Copied from transformers.models.bert.modeling_bert.BertForPreTrainingOutput with Bert->Ernie
676
+ class ErnieForPreTrainingOutput(ModelOutput):
677
+ """
678
+ Output type of [`ErnieForPreTraining`].
679
+
680
+ Args:
681
+ loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
682
+ Total loss as the sum of the masked language modeling loss and the next sequence prediction
683
+ (classification) loss.
684
+ prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
685
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
686
+ seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`):
687
+ Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
688
+ before SoftMax).
689
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
690
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
691
+ shape `(batch_size, sequence_length, hidden_size)`.
692
+
693
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
694
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
695
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
696
+ sequence_length)`.
697
+
698
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
699
+ heads.
700
+ """
701
+
702
+ loss: Optional[torch.FloatTensor] = None
703
+ prediction_logits: torch.FloatTensor = None
704
+ seq_relationship_logits: torch.FloatTensor = None
705
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
706
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
707
+
708
+
709
+ ERNIE_START_DOCSTRING = r"""
710
+
711
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
712
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
713
+ etc.)
714
+
715
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
716
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
717
+ and behavior.
718
+
719
+ Parameters:
720
+ config ([`ErnieConfig`]): Model configuration class with all the parameters of the model.
721
+ Initializing with a config file does not load the weights associated with the model, only the
722
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
723
+ """
724
+
725
+ ERNIE_INPUTS_DOCSTRING = r"""
726
+ Args:
727
+ input_ids (`torch.LongTensor` of shape `({0})`):
728
+ Indices of input sequence tokens in the vocabulary.
729
+
730
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
731
+ [`PreTrainedTokenizer.__call__`] for details.
732
+
733
+ [What are input IDs?](../glossary#input-ids)
734
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
735
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
736
+
737
+ - 1 for tokens that are **not masked**,
738
+ - 0 for tokens that are **masked**.
739
+
740
+ [What are attention masks?](../glossary#attention-mask)
741
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
742
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
743
+ 1]`:
744
+
745
+ - 0 corresponds to a *sentence A* token,
746
+ - 1 corresponds to a *sentence B* token.
747
+
748
+ [What are token type IDs?](../glossary#token-type-ids)
749
+ task_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
750
+ Task type embedding is a special embedding to represent the characteristic of different tasks, such as
751
+ word-aware pre-training task, structure-aware pre-training task and semantic-aware pre-training task. We
752
+ assign a `task_type_id` to each task and the `task_type_id` is in the range `[0,
753
+ config.task_type_vocab_size-1]
754
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
755
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
756
+ config.max_position_embeddings - 1]`.
757
+
758
+ [What are position IDs?](../glossary#position-ids)
759
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
760
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
761
+
762
+ - 1 indicates the head is **not masked**,
763
+ - 0 indicates the head is **masked**.
764
+
765
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
766
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
767
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
768
+ model's internal embedding lookup matrix.
769
+ output_attentions (`bool`, *optional*):
770
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
771
+ tensors for more detail.
772
+ output_hidden_states (`bool`, *optional*):
773
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
774
+ more detail.
775
+ return_dict (`bool`, *optional*):
776
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
777
+ """
778
+
779
+
780
+ @add_start_docstrings(
781
+ "The bare Ernie Model transformer outputting raw hidden-states without any specific head on top.",
782
+ ERNIE_START_DOCSTRING,
783
+ )
784
+ class ErnieModel(ErniePreTrainedModel):
785
+ """
786
+
787
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
788
+ cross-attention is added between the self-attention layers, following the architecture described in [Attention is
789
+ all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
790
+ Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
791
+
792
+ To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
793
+ to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
794
+ `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
795
+ """
796
+
797
+ # Copied from transformers.models.clap.modeling_clap.ClapTextModel.__init__ with ClapText->Ernie
798
+ def __init__(self, config, add_pooling_layer=True):
799
+ super().__init__(config)
800
+ self.config = config
801
+
802
+ self.embeddings = ErnieEmbeddings(config)
803
+ self.encoder = ErnieEncoder(config)
804
+
805
+ self.pooler = ErniePooler(config) if add_pooling_layer else None
806
+
807
+ # Initialize weights and apply final processing
808
+ self.post_init()
809
+
810
+ # Copied from transformers.models.bert.modeling_bert.BertModel.get_input_embeddings
811
+ def get_input_embeddings(self):
812
+ return self.embeddings.word_embeddings
813
+
814
+ # Copied from transformers.models.bert.modeling_bert.BertModel.set_input_embeddings
815
+ def set_input_embeddings(self, value):
816
+ self.embeddings.word_embeddings = value
817
+
818
+ # Copied from transformers.models.bert.modeling_bert.BertModel._prune_heads
819
+ def _prune_heads(self, heads_to_prune):
820
+ """
821
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
822
+ class PreTrainedModel
823
+ """
824
+ for layer, heads in heads_to_prune.items():
825
+ self.encoder.layer[layer].attention.prune_heads(heads)
826
+
827
+ @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
828
+ @add_code_sample_docstrings(
829
+ checkpoint=_CHECKPOINT_FOR_DOC,
830
+ output_type=BaseModelOutputWithPoolingAndCrossAttentions,
831
+ config_class=_CONFIG_FOR_DOC,
832
+ )
833
+ def forward(
834
+ self,
835
+ input_ids: Optional[torch.Tensor] = None,
836
+ attention_mask: Optional[torch.Tensor] = None,
837
+ token_type_ids: Optional[torch.Tensor] = None,
838
+ task_type_ids: Optional[torch.Tensor] = None,
839
+ position_ids: Optional[torch.Tensor] = None,
840
+ head_mask: Optional[torch.Tensor] = None,
841
+ inputs_embeds: Optional[torch.Tensor] = None,
842
+ encoder_hidden_states: Optional[torch.Tensor] = None,
843
+ encoder_attention_mask: Optional[torch.Tensor] = None,
844
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
845
+ use_cache: Optional[bool] = None,
846
+ output_attentions: Optional[bool] = None,
847
+ output_hidden_states: Optional[bool] = None,
848
+ return_dict: Optional[bool] = None,
849
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
850
+ r"""
851
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
852
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
853
+ the model is configured as a decoder.
854
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
855
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
856
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
857
+
858
+ - 1 for tokens that are **not masked**,
859
+ - 0 for tokens that are **masked**.
860
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
861
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
862
+
863
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
864
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
865
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
866
+ use_cache (`bool`, *optional*):
867
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
868
+ `past_key_values`).
869
+ """
870
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
871
+ output_hidden_states = (
872
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
873
+ )
874
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
875
+
876
+ if self.config.is_decoder:
877
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
878
+ else:
879
+ use_cache = False
880
+
881
+ if input_ids is not None and inputs_embeds is not None:
882
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
883
+ elif input_ids is not None:
884
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
885
+ input_shape = input_ids.size()
886
+ elif inputs_embeds is not None:
887
+ input_shape = inputs_embeds.size()[:-1]
888
+ else:
889
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
890
+
891
+ batch_size, seq_length = input_shape
892
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
893
+
894
+ # past_key_values_length
895
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
896
+
897
+ if attention_mask is None:
898
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
899
+
900
+ if token_type_ids is None:
901
+ if hasattr(self.embeddings, "token_type_ids"):
902
+ buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
903
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
904
+ token_type_ids = buffered_token_type_ids_expanded
905
+ else:
906
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
907
+
908
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
909
+ # ourselves in which case we just need to make it broadcastable to all heads.
910
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
911
+
912
+ # If a 2D or 3D attention mask is provided for the cross-attention
913
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
914
+ if self.config.is_decoder and encoder_hidden_states is not None:
915
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
916
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
917
+ if encoder_attention_mask is None:
918
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
919
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
920
+ else:
921
+ encoder_extended_attention_mask = None
922
+
923
+ # Prepare head mask if needed
924
+ # 1.0 in head_mask indicate we keep the head
925
+ # attention_probs has shape bsz x n_heads x N x N
926
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
927
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
928
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
929
+
930
+ embedding_output = self.embeddings(
931
+ input_ids=input_ids,
932
+ position_ids=position_ids,
933
+ token_type_ids=token_type_ids,
934
+ task_type_ids=task_type_ids,
935
+ inputs_embeds=inputs_embeds,
936
+ past_key_values_length=past_key_values_length,
937
+ )
938
+ encoder_outputs = self.encoder(
939
+ embedding_output,
940
+ attention_mask=extended_attention_mask,
941
+ head_mask=head_mask,
942
+ encoder_hidden_states=encoder_hidden_states,
943
+ encoder_attention_mask=encoder_extended_attention_mask,
944
+ past_key_values=past_key_values,
945
+ use_cache=use_cache,
946
+ output_attentions=output_attentions,
947
+ output_hidden_states=output_hidden_states,
948
+ return_dict=return_dict,
949
+ )
950
+ sequence_output = encoder_outputs[0]
951
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
952
+
953
+ if not return_dict:
954
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
955
+
956
+ return BaseModelOutputWithPoolingAndCrossAttentions(
957
+ last_hidden_state=sequence_output,
958
+ pooler_output=pooled_output,
959
+ past_key_values=encoder_outputs.past_key_values,
960
+ hidden_states=encoder_outputs.hidden_states,
961
+ attentions=encoder_outputs.attentions,
962
+ cross_attentions=encoder_outputs.cross_attentions,
963
+ )
964
+
965
+
966
+ @add_start_docstrings(
967
+ """
968
+ Ernie Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next
969
+ sentence prediction (classification)` head.
970
+ """,
971
+ ERNIE_START_DOCSTRING,
972
+ )
973
+ class ErnieForPreTraining(ErniePreTrainedModel):
974
+ _tied_weights_keys = ["cls.predictions.decoder.bias", "cls.predictions.decoder.weight"]
975
+
976
+ # Copied from transformers.models.bert.modeling_bert.BertForPreTraining.__init__ with Bert->Ernie,bert->ernie
977
+ def __init__(self, config):
978
+ super().__init__(config)
979
+
980
+ self.ernie = ErnieModel(config)
981
+ self.cls = ErniePreTrainingHeads(config)
982
+
983
+ # Initialize weights and apply final processing
984
+ self.post_init()
985
+
986
+ # Copied from transformers.models.bert.modeling_bert.BertForPreTraining.get_output_embeddings
987
+ def get_output_embeddings(self):
988
+ return self.cls.predictions.decoder
989
+
990
+ # Copied from transformers.models.bert.modeling_bert.BertForPreTraining.set_output_embeddings
991
+ def set_output_embeddings(self, new_embeddings):
992
+ self.cls.predictions.decoder = new_embeddings
993
+ self.cls.predictions.bias = new_embeddings.bias
994
+
995
+ @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
996
+ @replace_return_docstrings(output_type=ErnieForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
997
+ def forward(
998
+ self,
999
+ input_ids: Optional[torch.Tensor] = None,
1000
+ attention_mask: Optional[torch.Tensor] = None,
1001
+ token_type_ids: Optional[torch.Tensor] = None,
1002
+ task_type_ids: Optional[torch.Tensor] = None,
1003
+ position_ids: Optional[torch.Tensor] = None,
1004
+ head_mask: Optional[torch.Tensor] = None,
1005
+ inputs_embeds: Optional[torch.Tensor] = None,
1006
+ labels: Optional[torch.Tensor] = None,
1007
+ next_sentence_label: Optional[torch.Tensor] = None,
1008
+ output_attentions: Optional[bool] = None,
1009
+ output_hidden_states: Optional[bool] = None,
1010
+ return_dict: Optional[bool] = None,
1011
+ ) -> Union[Tuple[torch.Tensor], ErnieForPreTrainingOutput]:
1012
+ r"""
1013
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1014
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1015
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked),
1016
+ the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1017
+ next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1018
+ Labels for computing the next sequence prediction (classification) loss. Input should be a sequence
1019
+ pair (see `input_ids` docstring) Indices should be in `[0, 1]`:
1020
+
1021
+ - 0 indicates sequence B is a continuation of sequence A,
1022
+ - 1 indicates sequence B is a random sequence.
1023
+ kwargs (`Dict[str, any]`, *optional*, defaults to `{}`):
1024
+ Used to hide legacy arguments that have been deprecated.
1025
+
1026
+ Returns:
1027
+
1028
+ Example:
1029
+
1030
+ ```python
1031
+ >>> from transformers import AutoTokenizer, ErnieForPreTraining
1032
+ >>> import torch
1033
+
1034
+ >>> tokenizer = AutoTokenizer.from_pretrained("nghuyong/ernie-1.0-base-zh")
1035
+ >>> model = ErnieForPreTraining.from_pretrained("nghuyong/ernie-1.0-base-zh")
1036
+
1037
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
1038
+ >>> outputs = model(**inputs)
1039
+
1040
+ >>> prediction_logits = outputs.prediction_logits
1041
+ >>> seq_relationship_logits = outputs.seq_relationship_logits
1042
+ ```
1043
+ """
1044
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1045
+
1046
+ outputs = self.ernie(
1047
+ input_ids,
1048
+ attention_mask=attention_mask,
1049
+ token_type_ids=token_type_ids,
1050
+ task_type_ids=task_type_ids,
1051
+ position_ids=position_ids,
1052
+ head_mask=head_mask,
1053
+ inputs_embeds=inputs_embeds,
1054
+ output_attentions=output_attentions,
1055
+ output_hidden_states=output_hidden_states,
1056
+ return_dict=return_dict,
1057
+ )
1058
+
1059
+ sequence_output, pooled_output = outputs[:2]
1060
+ prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
1061
+
1062
+ total_loss = None
1063
+ if labels is not None and next_sentence_label is not None:
1064
+ loss_fct = CrossEntropyLoss()
1065
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1066
+ next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
1067
+ total_loss = masked_lm_loss + next_sentence_loss
1068
+
1069
+ if not return_dict:
1070
+ output = (prediction_scores, seq_relationship_score) + outputs[2:]
1071
+ return ((total_loss,) + output) if total_loss is not None else output
1072
+
1073
+ return ErnieForPreTrainingOutput(
1074
+ loss=total_loss,
1075
+ prediction_logits=prediction_scores,
1076
+ seq_relationship_logits=seq_relationship_score,
1077
+ hidden_states=outputs.hidden_states,
1078
+ attentions=outputs.attentions,
1079
+ )
1080
+
1081
+
1082
+ @add_start_docstrings(
1083
+ """Ernie Model with a `language modeling` head on top for CLM fine-tuning.""", ERNIE_START_DOCSTRING
1084
+ )
1085
+ class ErnieForCausalLM(ErniePreTrainedModel, GenerationMixin):
1086
+ _tied_weights_keys = ["cls.predictions.decoder.bias", "cls.predictions.decoder.weight"]
1087
+
1088
+ # Copied from transformers.models.bert.modeling_bert.BertLMHeadModel.__init__ with BertLMHeadModel->ErnieForCausalLM,Bert->Ernie,bert->ernie
1089
+ def __init__(self, config):
1090
+ super().__init__(config)
1091
+
1092
+ if not config.is_decoder:
1093
+ logger.warning("If you want to use `ErnieForCausalLM` as a standalone, add `is_decoder=True.`")
1094
+
1095
+ self.ernie = ErnieModel(config, add_pooling_layer=False)
1096
+ self.cls = ErnieOnlyMLMHead(config)
1097
+
1098
+ # Initialize weights and apply final processing
1099
+ self.post_init()
1100
+
1101
+ # Copied from transformers.models.bert.modeling_bert.BertLMHeadModel.get_output_embeddings
1102
+ def get_output_embeddings(self):
1103
+ return self.cls.predictions.decoder
1104
+
1105
+ # Copied from transformers.models.bert.modeling_bert.BertLMHeadModel.set_output_embeddings
1106
+ def set_output_embeddings(self, new_embeddings):
1107
+ self.cls.predictions.decoder = new_embeddings
1108
+ self.cls.predictions.bias = new_embeddings.bias
1109
+
1110
+ @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1111
+ @add_code_sample_docstrings(
1112
+ checkpoint=_CHECKPOINT_FOR_DOC,
1113
+ output_type=CausalLMOutputWithCrossAttentions,
1114
+ config_class=_CONFIG_FOR_DOC,
1115
+ )
1116
+ def forward(
1117
+ self,
1118
+ input_ids: Optional[torch.Tensor] = None,
1119
+ attention_mask: Optional[torch.Tensor] = None,
1120
+ token_type_ids: Optional[torch.Tensor] = None,
1121
+ task_type_ids: Optional[torch.Tensor] = None,
1122
+ position_ids: Optional[torch.Tensor] = None,
1123
+ head_mask: Optional[torch.Tensor] = None,
1124
+ inputs_embeds: Optional[torch.Tensor] = None,
1125
+ encoder_hidden_states: Optional[torch.Tensor] = None,
1126
+ encoder_attention_mask: Optional[torch.Tensor] = None,
1127
+ labels: Optional[torch.Tensor] = None,
1128
+ past_key_values: Optional[List[torch.Tensor]] = None,
1129
+ use_cache: Optional[bool] = None,
1130
+ output_attentions: Optional[bool] = None,
1131
+ output_hidden_states: Optional[bool] = None,
1132
+ return_dict: Optional[bool] = None,
1133
+ ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
1134
+ r"""
1135
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1136
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
1137
+ the model is configured as a decoder.
1138
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
1139
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
1140
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1141
+
1142
+ - 1 for tokens that are **not masked**,
1143
+ - 0 for tokens that are **masked**.
1144
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1145
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
1146
+ `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
1147
+ ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`
1148
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
1149
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1150
+
1151
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1152
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1153
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1154
+ use_cache (`bool`, *optional*):
1155
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1156
+ `past_key_values`).
1157
+ """
1158
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1159
+ if labels is not None:
1160
+ use_cache = False
1161
+
1162
+ outputs = self.ernie(
1163
+ input_ids,
1164
+ attention_mask=attention_mask,
1165
+ token_type_ids=token_type_ids,
1166
+ task_type_ids=task_type_ids,
1167
+ position_ids=position_ids,
1168
+ head_mask=head_mask,
1169
+ inputs_embeds=inputs_embeds,
1170
+ encoder_hidden_states=encoder_hidden_states,
1171
+ encoder_attention_mask=encoder_attention_mask,
1172
+ past_key_values=past_key_values,
1173
+ use_cache=use_cache,
1174
+ output_attentions=output_attentions,
1175
+ output_hidden_states=output_hidden_states,
1176
+ return_dict=return_dict,
1177
+ )
1178
+
1179
+ sequence_output = outputs[0]
1180
+ prediction_scores = self.cls(sequence_output)
1181
+
1182
+ lm_loss = None
1183
+ if labels is not None:
1184
+ # we are doing next-token prediction; shift prediction scores and input ids by one
1185
+ shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
1186
+ labels = labels[:, 1:].contiguous()
1187
+ loss_fct = CrossEntropyLoss()
1188
+ lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1189
+
1190
+ if not return_dict:
1191
+ output = (prediction_scores,) + outputs[2:]
1192
+ return ((lm_loss,) + output) if lm_loss is not None else output
1193
+
1194
+ return CausalLMOutputWithCrossAttentions(
1195
+ loss=lm_loss,
1196
+ logits=prediction_scores,
1197
+ past_key_values=outputs.past_key_values,
1198
+ hidden_states=outputs.hidden_states,
1199
+ attentions=outputs.attentions,
1200
+ cross_attentions=outputs.cross_attentions,
1201
+ )
1202
+
1203
+ # Copied from transformers.models.bert.modeling_bert.BertLMHeadModel._reorder_cache
1204
+ def _reorder_cache(self, past_key_values, beam_idx):
1205
+ reordered_past = ()
1206
+ for layer_past in past_key_values:
1207
+ reordered_past += (
1208
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1209
+ )
1210
+ return reordered_past
1211
+
1212
+
1213
+ @add_start_docstrings("""Ernie Model with a `language modeling` head on top.""", ERNIE_START_DOCSTRING)
1214
+ class ErnieForMaskedLM(ErniePreTrainedModel):
1215
+ _tied_weights_keys = ["cls.predictions.decoder.bias", "cls.predictions.decoder.weight"]
1216
+
1217
+ # Copied from transformers.models.bert.modeling_bert.BertForMaskedLM.__init__ with Bert->Ernie,bert->ernie
1218
+ def __init__(self, config):
1219
+ super().__init__(config)
1220
+
1221
+ if config.is_decoder:
1222
+ logger.warning(
1223
+ "If you want to use `ErnieForMaskedLM` make sure `config.is_decoder=False` for "
1224
+ "bi-directional self-attention."
1225
+ )
1226
+
1227
+ self.ernie = ErnieModel(config, add_pooling_layer=False)
1228
+ self.cls = ErnieOnlyMLMHead(config)
1229
+
1230
+ # Initialize weights and apply final processing
1231
+ self.post_init()
1232
+
1233
+ # Copied from transformers.models.bert.modeling_bert.BertForMaskedLM.get_output_embeddings
1234
+ def get_output_embeddings(self):
1235
+ return self.cls.predictions.decoder
1236
+
1237
+ # Copied from transformers.models.bert.modeling_bert.BertForMaskedLM.set_output_embeddings
1238
+ def set_output_embeddings(self, new_embeddings):
1239
+ self.cls.predictions.decoder = new_embeddings
1240
+ self.cls.predictions.bias = new_embeddings.bias
1241
+
1242
+ @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1243
+ @add_code_sample_docstrings(
1244
+ checkpoint=_CHECKPOINT_FOR_DOC,
1245
+ output_type=MaskedLMOutput,
1246
+ config_class=_CONFIG_FOR_DOC,
1247
+ expected_output="'paris'",
1248
+ expected_loss=0.88,
1249
+ )
1250
+ def forward(
1251
+ self,
1252
+ input_ids: Optional[torch.Tensor] = None,
1253
+ attention_mask: Optional[torch.Tensor] = None,
1254
+ token_type_ids: Optional[torch.Tensor] = None,
1255
+ task_type_ids: Optional[torch.Tensor] = None,
1256
+ position_ids: Optional[torch.Tensor] = None,
1257
+ head_mask: Optional[torch.Tensor] = None,
1258
+ inputs_embeds: Optional[torch.Tensor] = None,
1259
+ encoder_hidden_states: Optional[torch.Tensor] = None,
1260
+ encoder_attention_mask: Optional[torch.Tensor] = None,
1261
+ labels: Optional[torch.Tensor] = None,
1262
+ output_attentions: Optional[bool] = None,
1263
+ output_hidden_states: Optional[bool] = None,
1264
+ return_dict: Optional[bool] = None,
1265
+ ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]:
1266
+ r"""
1267
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1268
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1269
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1270
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1271
+ """
1272
+
1273
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1274
+
1275
+ outputs = self.ernie(
1276
+ input_ids,
1277
+ attention_mask=attention_mask,
1278
+ token_type_ids=token_type_ids,
1279
+ task_type_ids=task_type_ids,
1280
+ position_ids=position_ids,
1281
+ head_mask=head_mask,
1282
+ inputs_embeds=inputs_embeds,
1283
+ encoder_hidden_states=encoder_hidden_states,
1284
+ encoder_attention_mask=encoder_attention_mask,
1285
+ output_attentions=output_attentions,
1286
+ output_hidden_states=output_hidden_states,
1287
+ return_dict=return_dict,
1288
+ )
1289
+
1290
+ sequence_output = outputs[0]
1291
+ prediction_scores = self.cls(sequence_output)
1292
+
1293
+ masked_lm_loss = None
1294
+ if labels is not None:
1295
+ loss_fct = CrossEntropyLoss() # -100 index = padding token
1296
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1297
+
1298
+ if not return_dict:
1299
+ output = (prediction_scores,) + outputs[2:]
1300
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1301
+
1302
+ return MaskedLMOutput(
1303
+ loss=masked_lm_loss,
1304
+ logits=prediction_scores,
1305
+ hidden_states=outputs.hidden_states,
1306
+ attentions=outputs.attentions,
1307
+ )
1308
+
1309
+ # Copied from transformers.models.bert.modeling_bert.BertForMaskedLM.prepare_inputs_for_generation
1310
+ def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
1311
+ input_shape = input_ids.shape
1312
+ effective_batch_size = input_shape[0]
1313
+
1314
+ # add a dummy token
1315
+ if self.config.pad_token_id is None:
1316
+ raise ValueError("The PAD token should be defined for generation")
1317
+
1318
+ attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
1319
+ dummy_token = torch.full(
1320
+ (effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
1321
+ )
1322
+ input_ids = torch.cat([input_ids, dummy_token], dim=1)
1323
+
1324
+ return {"input_ids": input_ids, "attention_mask": attention_mask}
1325
+
1326
+
1327
+ @add_start_docstrings(
1328
+ """Ernie Model with a `next sentence prediction (classification)` head on top.""",
1329
+ ERNIE_START_DOCSTRING,
1330
+ )
1331
+ class ErnieForNextSentencePrediction(ErniePreTrainedModel):
1332
+ # Copied from transformers.models.bert.modeling_bert.BertForNextSentencePrediction.__init__ with Bert->Ernie,bert->ernie
1333
+ def __init__(self, config):
1334
+ super().__init__(config)
1335
+
1336
+ self.ernie = ErnieModel(config)
1337
+ self.cls = ErnieOnlyNSPHead(config)
1338
+
1339
+ # Initialize weights and apply final processing
1340
+ self.post_init()
1341
+
1342
+ @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1343
+ @replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
1344
+ def forward(
1345
+ self,
1346
+ input_ids: Optional[torch.Tensor] = None,
1347
+ attention_mask: Optional[torch.Tensor] = None,
1348
+ token_type_ids: Optional[torch.Tensor] = None,
1349
+ task_type_ids: Optional[torch.Tensor] = None,
1350
+ position_ids: Optional[torch.Tensor] = None,
1351
+ head_mask: Optional[torch.Tensor] = None,
1352
+ inputs_embeds: Optional[torch.Tensor] = None,
1353
+ labels: Optional[torch.Tensor] = None,
1354
+ output_attentions: Optional[bool] = None,
1355
+ output_hidden_states: Optional[bool] = None,
1356
+ return_dict: Optional[bool] = None,
1357
+ **kwargs,
1358
+ ) -> Union[Tuple[torch.Tensor], NextSentencePredictorOutput]:
1359
+ r"""
1360
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1361
+ Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
1362
+ (see `input_ids` docstring). Indices should be in `[0, 1]`:
1363
+
1364
+ - 0 indicates sequence B is a continuation of sequence A,
1365
+ - 1 indicates sequence B is a random sequence.
1366
+
1367
+ Returns:
1368
+
1369
+ Example:
1370
+
1371
+ ```python
1372
+ >>> from transformers import AutoTokenizer, ErnieForNextSentencePrediction
1373
+ >>> import torch
1374
+
1375
+ >>> tokenizer = AutoTokenizer.from_pretrained("nghuyong/ernie-1.0-base-zh")
1376
+ >>> model = ErnieForNextSentencePrediction.from_pretrained("nghuyong/ernie-1.0-base-zh")
1377
+
1378
+ >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
1379
+ >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
1380
+ >>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt")
1381
+
1382
+ >>> outputs = model(**encoding, labels=torch.LongTensor([1]))
1383
+ >>> logits = outputs.logits
1384
+ >>> assert logits[0, 0] < logits[0, 1] # next sentence was random
1385
+ ```
1386
+ """
1387
+
1388
+ if "next_sentence_label" in kwargs:
1389
+ warnings.warn(
1390
+ "The `next_sentence_label` argument is deprecated and will be removed in a future version, use"
1391
+ " `labels` instead.",
1392
+ FutureWarning,
1393
+ )
1394
+ labels = kwargs.pop("next_sentence_label")
1395
+
1396
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1397
+
1398
+ outputs = self.ernie(
1399
+ input_ids,
1400
+ attention_mask=attention_mask,
1401
+ token_type_ids=token_type_ids,
1402
+ task_type_ids=task_type_ids,
1403
+ position_ids=position_ids,
1404
+ head_mask=head_mask,
1405
+ inputs_embeds=inputs_embeds,
1406
+ output_attentions=output_attentions,
1407
+ output_hidden_states=output_hidden_states,
1408
+ return_dict=return_dict,
1409
+ )
1410
+
1411
+ pooled_output = outputs[1]
1412
+
1413
+ seq_relationship_scores = self.cls(pooled_output)
1414
+
1415
+ next_sentence_loss = None
1416
+ if labels is not None:
1417
+ loss_fct = CrossEntropyLoss()
1418
+ next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1))
1419
+
1420
+ if not return_dict:
1421
+ output = (seq_relationship_scores,) + outputs[2:]
1422
+ return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output
1423
+
1424
+ return NextSentencePredictorOutput(
1425
+ loss=next_sentence_loss,
1426
+ logits=seq_relationship_scores,
1427
+ hidden_states=outputs.hidden_states,
1428
+ attentions=outputs.attentions,
1429
+ )
1430
+
1431
+
1432
+ @add_start_docstrings(
1433
+ """
1434
+ Ernie Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
1435
+ output) e.g. for GLUE tasks.
1436
+ """,
1437
+ ERNIE_START_DOCSTRING,
1438
+ )
1439
+ class ErnieForSequenceClassification(ErniePreTrainedModel):
1440
+ # Copied from transformers.models.bert.modeling_bert.BertForSequenceClassification.__init__ with Bert->Ernie,bert->ernie
1441
+ def __init__(self, config):
1442
+ super().__init__(config)
1443
+ self.num_labels = config.num_labels
1444
+ self.config = config
1445
+
1446
+ self.ernie = ErnieModel(config)
1447
+ classifier_dropout = (
1448
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1449
+ )
1450
+ self.dropout = nn.Dropout(classifier_dropout)
1451
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1452
+
1453
+ # Initialize weights and apply final processing
1454
+ self.post_init()
1455
+
1456
+ @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1457
+ def forward(
1458
+ self,
1459
+ input_ids: Optional[torch.Tensor] = None,
1460
+ attention_mask: Optional[torch.Tensor] = None,
1461
+ token_type_ids: Optional[torch.Tensor] = None,
1462
+ task_type_ids: Optional[torch.Tensor] = None,
1463
+ position_ids: Optional[torch.Tensor] = None,
1464
+ head_mask: Optional[torch.Tensor] = None,
1465
+ inputs_embeds: Optional[torch.Tensor] = None,
1466
+ labels: Optional[torch.Tensor] = None,
1467
+ output_attentions: Optional[bool] = None,
1468
+ output_hidden_states: Optional[bool] = None,
1469
+ return_dict: Optional[bool] = None,
1470
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
1471
+ r"""
1472
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1473
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1474
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1475
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1476
+ """
1477
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1478
+
1479
+ outputs = self.ernie(
1480
+ input_ids,
1481
+ attention_mask=attention_mask,
1482
+ token_type_ids=token_type_ids,
1483
+ task_type_ids=task_type_ids,
1484
+ position_ids=position_ids,
1485
+ head_mask=head_mask,
1486
+ inputs_embeds=inputs_embeds,
1487
+ output_attentions=output_attentions,
1488
+ output_hidden_states=output_hidden_states,
1489
+ return_dict=return_dict,
1490
+ )
1491
+
1492
+ pooled_output = outputs[1]
1493
+
1494
+ pooled_output = self.dropout(pooled_output)
1495
+ logits = self.classifier(pooled_output)
1496
+
1497
+ loss = None
1498
+ if labels is not None:
1499
+ if self.config.problem_type is None:
1500
+ if self.num_labels == 1:
1501
+ self.config.problem_type = "regression"
1502
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1503
+ self.config.problem_type = "single_label_classification"
1504
+ else:
1505
+ self.config.problem_type = "multi_label_classification"
1506
+
1507
+ if self.config.problem_type == "regression":
1508
+ loss_fct = MSELoss()
1509
+ if self.num_labels == 1:
1510
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1511
+ else:
1512
+ loss = loss_fct(logits, labels)
1513
+ elif self.config.problem_type == "single_label_classification":
1514
+ loss_fct = CrossEntropyLoss()
1515
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1516
+ elif self.config.problem_type == "multi_label_classification":
1517
+ loss_fct = BCEWithLogitsLoss()
1518
+ loss = loss_fct(logits, labels)
1519
+ if not return_dict:
1520
+ output = (logits,) + outputs[2:]
1521
+ return ((loss,) + output) if loss is not None else output
1522
+
1523
+ return SequenceClassifierOutput(
1524
+ loss=loss,
1525
+ logits=logits,
1526
+ hidden_states=outputs.hidden_states,
1527
+ attentions=outputs.attentions,
1528
+ )
1529
+
1530
+
1531
+ @add_start_docstrings(
1532
+ """
1533
+ Ernie Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
1534
+ softmax) e.g. for RocStories/SWAG tasks.
1535
+ """,
1536
+ ERNIE_START_DOCSTRING,
1537
+ )
1538
+ class ErnieForMultipleChoice(ErniePreTrainedModel):
1539
+ # Copied from transformers.models.bert.modeling_bert.BertForMultipleChoice.__init__ with Bert->Ernie,bert->ernie
1540
+ def __init__(self, config):
1541
+ super().__init__(config)
1542
+
1543
+ self.ernie = ErnieModel(config)
1544
+ classifier_dropout = (
1545
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1546
+ )
1547
+ self.dropout = nn.Dropout(classifier_dropout)
1548
+ self.classifier = nn.Linear(config.hidden_size, 1)
1549
+
1550
+ # Initialize weights and apply final processing
1551
+ self.post_init()
1552
+
1553
+ @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
1554
+ @add_code_sample_docstrings(
1555
+ checkpoint=_CHECKPOINT_FOR_DOC,
1556
+ output_type=MultipleChoiceModelOutput,
1557
+ config_class=_CONFIG_FOR_DOC,
1558
+ )
1559
+ def forward(
1560
+ self,
1561
+ input_ids: Optional[torch.Tensor] = None,
1562
+ attention_mask: Optional[torch.Tensor] = None,
1563
+ token_type_ids: Optional[torch.Tensor] = None,
1564
+ task_type_ids: Optional[torch.Tensor] = None,
1565
+ position_ids: Optional[torch.Tensor] = None,
1566
+ head_mask: Optional[torch.Tensor] = None,
1567
+ inputs_embeds: Optional[torch.Tensor] = None,
1568
+ labels: Optional[torch.Tensor] = None,
1569
+ output_attentions: Optional[bool] = None,
1570
+ output_hidden_states: Optional[bool] = None,
1571
+ return_dict: Optional[bool] = None,
1572
+ ) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]:
1573
+ r"""
1574
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1575
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
1576
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
1577
+ `input_ids` above)
1578
+ """
1579
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1580
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
1581
+
1582
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
1583
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
1584
+ token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
1585
+ position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
1586
+ inputs_embeds = (
1587
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
1588
+ if inputs_embeds is not None
1589
+ else None
1590
+ )
1591
+
1592
+ outputs = self.ernie(
1593
+ input_ids,
1594
+ attention_mask=attention_mask,
1595
+ token_type_ids=token_type_ids,
1596
+ task_type_ids=task_type_ids,
1597
+ position_ids=position_ids,
1598
+ head_mask=head_mask,
1599
+ inputs_embeds=inputs_embeds,
1600
+ output_attentions=output_attentions,
1601
+ output_hidden_states=output_hidden_states,
1602
+ return_dict=return_dict,
1603
+ )
1604
+
1605
+ pooled_output = outputs[1]
1606
+
1607
+ pooled_output = self.dropout(pooled_output)
1608
+ logits = self.classifier(pooled_output)
1609
+ reshaped_logits = logits.view(-1, num_choices)
1610
+
1611
+ loss = None
1612
+ if labels is not None:
1613
+ loss_fct = CrossEntropyLoss()
1614
+ loss = loss_fct(reshaped_logits, labels)
1615
+
1616
+ if not return_dict:
1617
+ output = (reshaped_logits,) + outputs[2:]
1618
+ return ((loss,) + output) if loss is not None else output
1619
+
1620
+ return MultipleChoiceModelOutput(
1621
+ loss=loss,
1622
+ logits=reshaped_logits,
1623
+ hidden_states=outputs.hidden_states,
1624
+ attentions=outputs.attentions,
1625
+ )
1626
+
1627
+
1628
+ @add_start_docstrings(
1629
+ """
1630
+ Ernie Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1631
+ Named-Entity-Recognition (NER) tasks.
1632
+ """,
1633
+ ERNIE_START_DOCSTRING,
1634
+ )
1635
+ class ErnieForTokenClassification(ErniePreTrainedModel):
1636
+ # Copied from transformers.models.bert.modeling_bert.BertForTokenClassification.__init__ with Bert->Ernie,bert->ernie
1637
+ def __init__(self, config):
1638
+ super().__init__(config)
1639
+ self.num_labels = config.num_labels
1640
+
1641
+ self.ernie = ErnieModel(config, add_pooling_layer=False)
1642
+ classifier_dropout = (
1643
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1644
+ )
1645
+ self.dropout = nn.Dropout(classifier_dropout)
1646
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1647
+
1648
+ # Initialize weights and apply final processing
1649
+ self.post_init()
1650
+
1651
+ @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1652
+ def forward(
1653
+ self,
1654
+ input_ids: Optional[torch.Tensor] = None,
1655
+ attention_mask: Optional[torch.Tensor] = None,
1656
+ token_type_ids: Optional[torch.Tensor] = None,
1657
+ task_type_ids: Optional[torch.Tensor] = None,
1658
+ position_ids: Optional[torch.Tensor] = None,
1659
+ head_mask: Optional[torch.Tensor] = None,
1660
+ inputs_embeds: Optional[torch.Tensor] = None,
1661
+ labels: Optional[torch.Tensor] = None,
1662
+ output_attentions: Optional[bool] = None,
1663
+ output_hidden_states: Optional[bool] = None,
1664
+ return_dict: Optional[bool] = None,
1665
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
1666
+ r"""
1667
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1668
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1669
+ """
1670
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1671
+
1672
+ outputs = self.ernie(
1673
+ input_ids,
1674
+ attention_mask=attention_mask,
1675
+ token_type_ids=token_type_ids,
1676
+ task_type_ids=task_type_ids,
1677
+ position_ids=position_ids,
1678
+ head_mask=head_mask,
1679
+ inputs_embeds=inputs_embeds,
1680
+ output_attentions=output_attentions,
1681
+ output_hidden_states=output_hidden_states,
1682
+ return_dict=return_dict,
1683
+ )
1684
+
1685
+ sequence_output = outputs[0]
1686
+
1687
+ sequence_output = self.dropout(sequence_output)
1688
+ logits = self.classifier(sequence_output)
1689
+
1690
+ loss = None
1691
+ if labels is not None:
1692
+ loss_fct = CrossEntropyLoss()
1693
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1694
+
1695
+ if not return_dict:
1696
+ output = (logits,) + outputs[2:]
1697
+ return ((loss,) + output) if loss is not None else output
1698
+
1699
+ return TokenClassifierOutput(
1700
+ loss=loss,
1701
+ logits=logits,
1702
+ hidden_states=outputs.hidden_states,
1703
+ attentions=outputs.attentions,
1704
+ )
1705
+
1706
+
1707
+ @add_start_docstrings(
1708
+ """
1709
+ Ernie Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1710
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1711
+ """,
1712
+ ERNIE_START_DOCSTRING,
1713
+ )
1714
+ class ErnieForQuestionAnswering(ErniePreTrainedModel):
1715
+ # Copied from transformers.models.bert.modeling_bert.BertForQuestionAnswering.__init__ with Bert->Ernie,bert->ernie
1716
+ def __init__(self, config):
1717
+ super().__init__(config)
1718
+ self.num_labels = config.num_labels
1719
+
1720
+ self.ernie = ErnieModel(config, add_pooling_layer=False)
1721
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1722
+
1723
+ # Initialize weights and apply final processing
1724
+ self.post_init()
1725
+
1726
+ @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1727
+ def forward(
1728
+ self,
1729
+ input_ids: Optional[torch.Tensor] = None,
1730
+ attention_mask: Optional[torch.Tensor] = None,
1731
+ token_type_ids: Optional[torch.Tensor] = None,
1732
+ task_type_ids: Optional[torch.Tensor] = None,
1733
+ position_ids: Optional[torch.Tensor] = None,
1734
+ head_mask: Optional[torch.Tensor] = None,
1735
+ inputs_embeds: Optional[torch.Tensor] = None,
1736
+ start_positions: Optional[torch.Tensor] = None,
1737
+ end_positions: Optional[torch.Tensor] = None,
1738
+ output_attentions: Optional[bool] = None,
1739
+ output_hidden_states: Optional[bool] = None,
1740
+ return_dict: Optional[bool] = None,
1741
+ ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]:
1742
+ r"""
1743
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1744
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1745
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1746
+ are not taken into account for computing the loss.
1747
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1748
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1749
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1750
+ are not taken into account for computing the loss.
1751
+ """
1752
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1753
+
1754
+ outputs = self.ernie(
1755
+ input_ids,
1756
+ attention_mask=attention_mask,
1757
+ token_type_ids=token_type_ids,
1758
+ task_type_ids=task_type_ids,
1759
+ position_ids=position_ids,
1760
+ head_mask=head_mask,
1761
+ inputs_embeds=inputs_embeds,
1762
+ output_attentions=output_attentions,
1763
+ output_hidden_states=output_hidden_states,
1764
+ return_dict=return_dict,
1765
+ )
1766
+
1767
+ sequence_output = outputs[0]
1768
+
1769
+ logits = self.qa_outputs(sequence_output)
1770
+ start_logits, end_logits = logits.split(1, dim=-1)
1771
+ start_logits = start_logits.squeeze(-1).contiguous()
1772
+ end_logits = end_logits.squeeze(-1).contiguous()
1773
+
1774
+ total_loss = None
1775
+ if start_positions is not None and end_positions is not None:
1776
+ # If we are on multi-GPU, split add a dimension
1777
+ if len(start_positions.size()) > 1:
1778
+ start_positions = start_positions.squeeze(-1)
1779
+ if len(end_positions.size()) > 1:
1780
+ end_positions = end_positions.squeeze(-1)
1781
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1782
+ ignored_index = start_logits.size(1)
1783
+ start_positions = start_positions.clamp(0, ignored_index)
1784
+ end_positions = end_positions.clamp(0, ignored_index)
1785
+
1786
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1787
+ start_loss = loss_fct(start_logits, start_positions)
1788
+ end_loss = loss_fct(end_logits, end_positions)
1789
+ total_loss = (start_loss + end_loss) / 2
1790
+
1791
+ if not return_dict:
1792
+ output = (start_logits, end_logits) + outputs[2:]
1793
+ return ((total_loss,) + output) if total_loss is not None else output
1794
+
1795
+ return QuestionAnsweringModelOutput(
1796
+ loss=total_loss,
1797
+ start_logits=start_logits,
1798
+ end_logits=end_logits,
1799
+ hidden_states=outputs.hidden_states,
1800
+ attentions=outputs.attentions,
1801
+ )
1802
+
1803
+
1804
+ __all__ = [
1805
+ "ErnieForCausalLM",
1806
+ "ErnieForMaskedLM",
1807
+ "ErnieForMultipleChoice",
1808
+ "ErnieForNextSentencePrediction",
1809
+ "ErnieForPreTraining",
1810
+ "ErnieForQuestionAnswering",
1811
+ "ErnieForSequenceClassification",
1812
+ "ErnieForTokenClassification",
1813
+ "ErnieModel",
1814
+ "ErniePreTrainedModel",
1815
+ ]
vlmpy310/lib/python3.10/site-packages/transformers/models/longformer/__init__.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import _LazyModule
17
+ from ...utils.import_utils import define_import_structure
18
+
19
+
20
+ if TYPE_CHECKING:
21
+ from .configuration_longformer import *
22
+ from .modeling_longformer import *
23
+ from .modeling_tf_longformer import *
24
+ from .tokenization_longformer import *
25
+ from .tokenization_longformer_fast import *
26
+ else:
27
+ import sys
28
+
29
+ _file = globals()["__file__"]
30
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
vlmpy310/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (661 Bytes). View file
 
vlmpy310/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/configuration_longformer.cpython-310.pyc ADDED
Binary file (8.16 kB). View file
 
vlmpy310/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/convert_longformer_original_pytorch_lightning_to_pytorch.cpython-310.pyc ADDED
Binary file (2.27 kB). View file
 
vlmpy310/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/modeling_longformer.cpython-310.pyc ADDED
Binary file (77.5 kB). View file
 
vlmpy310/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/modeling_tf_longformer.cpython-310.pyc ADDED
Binary file (84 kB). View file
 
vlmpy310/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/tokenization_longformer.cpython-310.pyc ADDED
Binary file (15.3 kB). View file
 
vlmpy310/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/tokenization_longformer_fast.cpython-310.pyc ADDED
Binary file (9.44 kB). View file
 
vlmpy310/lib/python3.10/site-packages/transformers/models/longformer/configuration_longformer.py ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The Allen Institute for AI team and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Longformer configuration"""
16
+
17
+ from collections import OrderedDict
18
+ from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
19
+
20
+ from ...configuration_utils import PretrainedConfig
21
+ from ...onnx import OnnxConfig
22
+ from ...utils import TensorType, logging
23
+
24
+
25
+ if TYPE_CHECKING:
26
+ from ...onnx.config import PatchingSpec
27
+ from ...tokenization_utils_base import PreTrainedTokenizerBase
28
+
29
+
30
+ logger = logging.get_logger(__name__)
31
+
32
+
33
+ class LongformerConfig(PretrainedConfig):
34
+ r"""
35
+ This is the configuration class to store the configuration of a [`LongformerModel`] or a [`TFLongformerModel`]. It
36
+ is used to instantiate a Longformer model according to the specified arguments, defining the model architecture.
37
+
38
+ This is the configuration class to store the configuration of a [`LongformerModel`]. It is used to instantiate an
39
+ Longformer model according to the specified arguments, defining the model architecture. Instantiating a
40
+ configuration with the defaults will yield a similar configuration to that of the LongFormer
41
+ [allenai/longformer-base-4096](https://huggingface.co/allenai/longformer-base-4096) architecture with a sequence
42
+ length 4,096.
43
+
44
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
45
+ documentation from [`PretrainedConfig`] for more information.
46
+
47
+ Args:
48
+ vocab_size (`int`, *optional*, defaults to 30522):
49
+ Vocabulary size of the Longformer model. Defines the number of different tokens that can be represented by
50
+ the `inputs_ids` passed when calling [`LongformerModel`] or [`TFLongformerModel`].
51
+ hidden_size (`int`, *optional*, defaults to 768):
52
+ Dimensionality of the encoder layers and the pooler layer.
53
+ num_hidden_layers (`int`, *optional*, defaults to 12):
54
+ Number of hidden layers in the Transformer encoder.
55
+ num_attention_heads (`int`, *optional*, defaults to 12):
56
+ Number of attention heads for each attention layer in the Transformer encoder.
57
+ intermediate_size (`int`, *optional*, defaults to 3072):
58
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
59
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
60
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
61
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
62
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
63
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
64
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
65
+ The dropout ratio for the attention probabilities.
66
+ max_position_embeddings (`int`, *optional*, defaults to 512):
67
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
68
+ just in case (e.g., 512 or 1024 or 2048).
69
+ type_vocab_size (`int`, *optional*, defaults to 2):
70
+ The vocabulary size of the `token_type_ids` passed when calling [`LongformerModel`] or
71
+ [`TFLongformerModel`].
72
+ initializer_range (`float`, *optional*, defaults to 0.02):
73
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
74
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
75
+ The epsilon used by the layer normalization layers.
76
+ attention_window (`int` or `List[int]`, *optional*, defaults to 512):
77
+ Size of an attention window around each token. If an `int`, use the same size for all layers. To specify a
78
+ different window size for each layer, use a `List[int]` where `len(attention_window) == num_hidden_layers`.
79
+
80
+ Example:
81
+
82
+ ```python
83
+ >>> from transformers import LongformerConfig, LongformerModel
84
+
85
+ >>> # Initializing a Longformer configuration
86
+ >>> configuration = LongformerConfig()
87
+
88
+ >>> # Initializing a model from the configuration
89
+ >>> model = LongformerModel(configuration)
90
+
91
+ >>> # Accessing the model configuration
92
+ >>> configuration = model.config
93
+ ```"""
94
+
95
+ model_type = "longformer"
96
+
97
+ def __init__(
98
+ self,
99
+ attention_window: Union[List[int], int] = 512,
100
+ sep_token_id: int = 2,
101
+ pad_token_id: int = 1,
102
+ bos_token_id: int = 0,
103
+ eos_token_id: int = 2,
104
+ vocab_size: int = 30522,
105
+ hidden_size: int = 768,
106
+ num_hidden_layers: int = 12,
107
+ num_attention_heads: int = 12,
108
+ intermediate_size: int = 3072,
109
+ hidden_act: str = "gelu",
110
+ hidden_dropout_prob: float = 0.1,
111
+ attention_probs_dropout_prob: float = 0.1,
112
+ max_position_embeddings: int = 512,
113
+ type_vocab_size: int = 2,
114
+ initializer_range: float = 0.02,
115
+ layer_norm_eps: float = 1e-12,
116
+ onnx_export: bool = False,
117
+ **kwargs,
118
+ ):
119
+ """Constructs LongformerConfig."""
120
+ super().__init__(pad_token_id=pad_token_id, **kwargs)
121
+
122
+ self.attention_window = attention_window
123
+ self.sep_token_id = sep_token_id
124
+ self.bos_token_id = bos_token_id
125
+ self.eos_token_id = eos_token_id
126
+ self.vocab_size = vocab_size
127
+ self.hidden_size = hidden_size
128
+ self.num_hidden_layers = num_hidden_layers
129
+ self.num_attention_heads = num_attention_heads
130
+ self.hidden_act = hidden_act
131
+ self.intermediate_size = intermediate_size
132
+ self.hidden_dropout_prob = hidden_dropout_prob
133
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
134
+ self.max_position_embeddings = max_position_embeddings
135
+ self.type_vocab_size = type_vocab_size
136
+ self.initializer_range = initializer_range
137
+ self.layer_norm_eps = layer_norm_eps
138
+ self.onnx_export = onnx_export
139
+
140
+
141
+ class LongformerOnnxConfig(OnnxConfig):
142
+ def __init__(self, config: "PretrainedConfig", task: str = "default", patching_specs: "List[PatchingSpec]" = None):
143
+ super().__init__(config, task, patching_specs)
144
+ config.onnx_export = True
145
+
146
+ @property
147
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
148
+ if self.task == "multiple-choice":
149
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
150
+ else:
151
+ dynamic_axis = {0: "batch", 1: "sequence"}
152
+ return OrderedDict(
153
+ [
154
+ ("input_ids", dynamic_axis),
155
+ ("attention_mask", dynamic_axis),
156
+ ("global_attention_mask", dynamic_axis),
157
+ ]
158
+ )
159
+
160
+ @property
161
+ def outputs(self) -> Mapping[str, Mapping[int, str]]:
162
+ outputs = super().outputs
163
+ if self.task == "default":
164
+ outputs["pooler_output"] = {0: "batch"}
165
+ return outputs
166
+
167
+ @property
168
+ def atol_for_validation(self) -> float:
169
+ """
170
+ What absolute tolerance value to use during model conversion validation.
171
+
172
+ Returns:
173
+ Float absolute tolerance value.
174
+ """
175
+ return 1e-4
176
+
177
+ @property
178
+ def default_onnx_opset(self) -> int:
179
+ # needs to be >= 14 to support tril operator
180
+ return max(super().default_onnx_opset, 14)
181
+
182
+ def generate_dummy_inputs(
183
+ self,
184
+ tokenizer: "PreTrainedTokenizerBase",
185
+ batch_size: int = -1,
186
+ seq_length: int = -1,
187
+ is_pair: bool = False,
188
+ framework: Optional[TensorType] = None,
189
+ ) -> Mapping[str, Any]:
190
+ inputs = super().generate_dummy_inputs(
191
+ preprocessor=tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
192
+ )
193
+ import torch
194
+
195
+ # for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
196
+ # makes the export fail randomly
197
+ inputs["global_attention_mask"] = torch.zeros_like(inputs["input_ids"])
198
+ # make every second token global
199
+ inputs["global_attention_mask"][:, ::2] = 1
200
+
201
+ return inputs
202
+
203
+
204
+ __all__ = ["LongformerConfig", "LongformerOnnxConfig"]
vlmpy310/lib/python3.10/site-packages/transformers/models/longformer/convert_longformer_original_pytorch_lightning_to_pytorch.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert RoBERTa checkpoint."""
16
+
17
+ import argparse
18
+
19
+ import pytorch_lightning as pl
20
+ import torch
21
+ from torch import nn
22
+
23
+ from transformers import LongformerForQuestionAnswering, LongformerModel
24
+
25
+
26
+ class LightningModel(pl.LightningModule):
27
+ def __init__(self, model):
28
+ super().__init__()
29
+ self.model = model
30
+ self.num_labels = 2
31
+ self.qa_outputs = nn.Linear(self.model.config.hidden_size, self.num_labels)
32
+
33
+ # implement only because lightning requires to do so
34
+ def forward(self):
35
+ pass
36
+
37
+
38
+ def convert_longformer_qa_checkpoint_to_pytorch(
39
+ longformer_model: str, longformer_question_answering_ckpt_path: str, pytorch_dump_folder_path: str
40
+ ):
41
+ # load longformer model from model identifier
42
+ longformer = LongformerModel.from_pretrained(longformer_model)
43
+ lightning_model = LightningModel(longformer)
44
+
45
+ ckpt = torch.load(longformer_question_answering_ckpt_path, map_location=torch.device("cpu"))
46
+ lightning_model.load_state_dict(ckpt["state_dict"])
47
+
48
+ # init longformer question answering model
49
+ longformer_for_qa = LongformerForQuestionAnswering.from_pretrained(longformer_model)
50
+
51
+ # transfer weights
52
+ longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict())
53
+ longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict())
54
+ longformer_for_qa.eval()
55
+
56
+ # save model
57
+ longformer_for_qa.save_pretrained(pytorch_dump_folder_path)
58
+
59
+ print(f"Conversion successful. Model saved under {pytorch_dump_folder_path}")
60
+
61
+
62
+ if __name__ == "__main__":
63
+ parser = argparse.ArgumentParser()
64
+ # Required parameters
65
+ parser.add_argument(
66
+ "--longformer_model",
67
+ default=None,
68
+ type=str,
69
+ required=True,
70
+ help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
71
+ )
72
+ parser.add_argument(
73
+ "--longformer_question_answering_ckpt_path",
74
+ default=None,
75
+ type=str,
76
+ required=True,
77
+ help="Path the official PyTorch Lightning Checkpoint.",
78
+ )
79
+ parser.add_argument(
80
+ "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
81
+ )
82
+ args = parser.parse_args()
83
+ convert_longformer_qa_checkpoint_to_pytorch(
84
+ args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
85
+ )
vlmpy310/lib/python3.10/site-packages/transformers/models/longformer/modeling_longformer.py ADDED
The diff for this file is too large to render. See raw diff
 
vlmpy310/lib/python3.10/site-packages/transformers/models/longformer/modeling_tf_longformer.py ADDED
The diff for this file is too large to render. See raw diff
 
vlmpy310/lib/python3.10/site-packages/transformers/models/longformer/tokenization_longformer.py ADDED
@@ -0,0 +1,402 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The Allen Institute for AI team and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import json
17
+ import os
18
+ from functools import lru_cache
19
+ from typing import List, Optional, Tuple
20
+
21
+ import regex as re
22
+
23
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer
24
+ from ...utils import logging
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+
30
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
31
+
32
+
33
+ @lru_cache()
34
+ # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
35
+ def bytes_to_unicode():
36
+ """
37
+ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
38
+ characters the bpe code barfs on.
39
+
40
+ The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
41
+ if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
42
+ decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
43
+ tables between utf-8 bytes and unicode strings.
44
+ """
45
+ bs = (
46
+ list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
47
+ )
48
+ cs = bs[:]
49
+ n = 0
50
+ for b in range(2**8):
51
+ if b not in bs:
52
+ bs.append(b)
53
+ cs.append(2**8 + n)
54
+ n += 1
55
+ cs = [chr(n) for n in cs]
56
+ return dict(zip(bs, cs))
57
+
58
+
59
+ # Copied from transformers.models.roberta.tokenization_roberta.get_pairs
60
+ def get_pairs(word):
61
+ """
62
+ Return set of symbol pairs in a word.
63
+
64
+ Word is represented as tuple of symbols (symbols being variable-length strings).
65
+ """
66
+ pairs = set()
67
+ prev_char = word[0]
68
+ for char in word[1:]:
69
+ pairs.add((prev_char, char))
70
+ prev_char = char
71
+ return pairs
72
+
73
+
74
+ # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer with FacebookAI/roberta-base->allenai/longformer-base-4096, RoBERTa->Longformer all-casing, RobertaTokenizer->LongformerTokenizer
75
+ class LongformerTokenizer(PreTrainedTokenizer):
76
+ """
77
+ Constructs a Longformer tokenizer, derived from the GPT-2 tokenizer, using byte-level Byte-Pair-Encoding.
78
+
79
+ This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
80
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
81
+
82
+ ```python
83
+ >>> from transformers import LongformerTokenizer
84
+
85
+ >>> tokenizer = LongformerTokenizer.from_pretrained("allenai/longformer-base-4096")
86
+ >>> tokenizer("Hello world")["input_ids"]
87
+ [0, 31414, 232, 2]
88
+
89
+ >>> tokenizer(" Hello world")["input_ids"]
90
+ [0, 20920, 232, 2]
91
+ ```
92
+
93
+ You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
94
+ call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
95
+
96
+ <Tip>
97
+
98
+ When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one).
99
+
100
+ </Tip>
101
+
102
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
103
+ this superclass for more information regarding those methods.
104
+
105
+ Args:
106
+ vocab_file (`str`):
107
+ Path to the vocabulary file.
108
+ merges_file (`str`):
109
+ Path to the merges file.
110
+ errors (`str`, *optional*, defaults to `"replace"`):
111
+ Paradigm to follow when decoding bytes to UTF-8. See
112
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
113
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
114
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
115
+
116
+ <Tip>
117
+
118
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
119
+ sequence. The token used is the `cls_token`.
120
+
121
+ </Tip>
122
+
123
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
124
+ The end of sequence token.
125
+
126
+ <Tip>
127
+
128
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
129
+ The token used is the `sep_token`.
130
+
131
+ </Tip>
132
+
133
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
134
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
135
+ sequence classification or for a text and a question for question answering. It is also used as the last
136
+ token of a sequence built with special tokens.
137
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
138
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
139
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
140
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
141
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
142
+ token instead.
143
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
144
+ The token used for padding, for example when batching sequences of different lengths.
145
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
146
+ The token used for masking values. This is the token used when training this model with masked language
147
+ modeling. This is the token which the model will try to predict.
148
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
149
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
150
+ other word. (Longformer tokenizer detect beginning of words by the preceding space).
151
+ """
152
+
153
+ vocab_files_names = VOCAB_FILES_NAMES
154
+ model_input_names = ["input_ids", "attention_mask"]
155
+
156
+ def __init__(
157
+ self,
158
+ vocab_file,
159
+ merges_file,
160
+ errors="replace",
161
+ bos_token="<s>",
162
+ eos_token="</s>",
163
+ sep_token="</s>",
164
+ cls_token="<s>",
165
+ unk_token="<unk>",
166
+ pad_token="<pad>",
167
+ mask_token="<mask>",
168
+ add_prefix_space=False,
169
+ **kwargs,
170
+ ):
171
+ bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
172
+ pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
173
+ eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
174
+ unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
175
+ sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
176
+ cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
177
+
178
+ # Mask token behave like a normal word, i.e. include the space before it
179
+ mask_token = (
180
+ AddedToken(mask_token, lstrip=True, rstrip=False, normalized=False)
181
+ if isinstance(mask_token, str)
182
+ else mask_token
183
+ )
184
+
185
+ # these special tokens are not part of the vocab.json, let's add them in the correct order
186
+
187
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
188
+ self.encoder = json.load(vocab_handle)
189
+ self.decoder = {v: k for k, v in self.encoder.items()}
190
+ self.errors = errors # how to handle errors in decoding
191
+ self.byte_encoder = bytes_to_unicode()
192
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
193
+ with open(merges_file, encoding="utf-8") as merges_handle:
194
+ bpe_merges = merges_handle.read().split("\n")[1:-1]
195
+ bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
196
+ self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
197
+ self.cache = {}
198
+ self.add_prefix_space = add_prefix_space
199
+
200
+ # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
201
+ self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
202
+
203
+ super().__init__(
204
+ errors=errors,
205
+ bos_token=bos_token,
206
+ eos_token=eos_token,
207
+ unk_token=unk_token,
208
+ sep_token=sep_token,
209
+ cls_token=cls_token,
210
+ pad_token=pad_token,
211
+ mask_token=mask_token,
212
+ add_prefix_space=add_prefix_space,
213
+ **kwargs,
214
+ )
215
+
216
+ @property
217
+ def vocab_size(self):
218
+ return len(self.encoder)
219
+
220
+ def get_vocab(self):
221
+ vocab = dict(self.encoder).copy()
222
+ vocab.update(self.added_tokens_encoder)
223
+ return vocab
224
+
225
+ def bpe(self, token):
226
+ if token in self.cache:
227
+ return self.cache[token]
228
+ word = tuple(token)
229
+ pairs = get_pairs(word)
230
+
231
+ if not pairs:
232
+ return token
233
+
234
+ while True:
235
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
236
+ if bigram not in self.bpe_ranks:
237
+ break
238
+ first, second = bigram
239
+ new_word = []
240
+ i = 0
241
+ while i < len(word):
242
+ try:
243
+ j = word.index(first, i)
244
+ except ValueError:
245
+ new_word.extend(word[i:])
246
+ break
247
+ else:
248
+ new_word.extend(word[i:j])
249
+ i = j
250
+
251
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
252
+ new_word.append(first + second)
253
+ i += 2
254
+ else:
255
+ new_word.append(word[i])
256
+ i += 1
257
+ new_word = tuple(new_word)
258
+ word = new_word
259
+ if len(word) == 1:
260
+ break
261
+ else:
262
+ pairs = get_pairs(word)
263
+ word = " ".join(word)
264
+ self.cache[token] = word
265
+ return word
266
+
267
+ def _tokenize(self, text):
268
+ """Tokenize a string."""
269
+ bpe_tokens = []
270
+ for token in re.findall(self.pat, text):
271
+ token = "".join(
272
+ self.byte_encoder[b] for b in token.encode("utf-8")
273
+ ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
274
+ bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
275
+ return bpe_tokens
276
+
277
+ def _convert_token_to_id(self, token):
278
+ """Converts a token (str) in an id using the vocab."""
279
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
280
+
281
+ def _convert_id_to_token(self, index):
282
+ """Converts an index (integer) in a token (str) using the vocab."""
283
+ return self.decoder.get(index)
284
+
285
+ def convert_tokens_to_string(self, tokens):
286
+ """Converts a sequence of tokens (string) in a single string."""
287
+ text = "".join(tokens)
288
+ text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
289
+ return text
290
+
291
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
292
+ if not os.path.isdir(save_directory):
293
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
294
+ return
295
+ vocab_file = os.path.join(
296
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
297
+ )
298
+ merge_file = os.path.join(
299
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
300
+ )
301
+
302
+ with open(vocab_file, "w", encoding="utf-8") as f:
303
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
304
+
305
+ index = 0
306
+ with open(merge_file, "w", encoding="utf-8") as writer:
307
+ writer.write("#version: 0.2\n")
308
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
309
+ if index != token_index:
310
+ logger.warning(
311
+ f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
312
+ " Please check that the tokenizer is not corrupted!"
313
+ )
314
+ index = token_index
315
+ writer.write(" ".join(bpe_tokens) + "\n")
316
+ index += 1
317
+
318
+ return vocab_file, merge_file
319
+
320
+ def build_inputs_with_special_tokens(
321
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
322
+ ) -> List[int]:
323
+ """
324
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
325
+ adding special tokens. A Longformer sequence has the following format:
326
+
327
+ - single sequence: `<s> X </s>`
328
+ - pair of sequences: `<s> A </s></s> B </s>`
329
+
330
+ Args:
331
+ token_ids_0 (`List[int]`):
332
+ List of IDs to which the special tokens will be added.
333
+ token_ids_1 (`List[int]`, *optional*):
334
+ Optional second list of IDs for sequence pairs.
335
+
336
+ Returns:
337
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
338
+ """
339
+ if token_ids_1 is None:
340
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
341
+ cls = [self.cls_token_id]
342
+ sep = [self.sep_token_id]
343
+ return cls + token_ids_0 + sep + sep + token_ids_1 + sep
344
+
345
+ def get_special_tokens_mask(
346
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
347
+ ) -> List[int]:
348
+ """
349
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
350
+ special tokens using the tokenizer `prepare_for_model` method.
351
+
352
+ Args:
353
+ token_ids_0 (`List[int]`):
354
+ List of IDs.
355
+ token_ids_1 (`List[int]`, *optional*):
356
+ Optional second list of IDs for sequence pairs.
357
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
358
+ Whether or not the token list is already formatted with special tokens for the model.
359
+
360
+ Returns:
361
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
362
+ """
363
+ if already_has_special_tokens:
364
+ return super().get_special_tokens_mask(
365
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
366
+ )
367
+
368
+ if token_ids_1 is None:
369
+ return [1] + ([0] * len(token_ids_0)) + [1]
370
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
371
+
372
+ def create_token_type_ids_from_sequences(
373
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
374
+ ) -> List[int]:
375
+ """
376
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. Longformer does not
377
+ make use of token type ids, therefore a list of zeros is returned.
378
+
379
+ Args:
380
+ token_ids_0 (`List[int]`):
381
+ List of IDs.
382
+ token_ids_1 (`List[int]`, *optional*):
383
+ Optional second list of IDs for sequence pairs.
384
+
385
+ Returns:
386
+ `List[int]`: List of zeros.
387
+ """
388
+ sep = [self.sep_token_id]
389
+ cls = [self.cls_token_id]
390
+
391
+ if token_ids_1 is None:
392
+ return len(cls + token_ids_0 + sep) * [0]
393
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
394
+
395
+ def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
396
+ add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space)
397
+ if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()):
398
+ text = " " + text
399
+ return (text, kwargs)
400
+
401
+
402
+ __all__ = ["LongformerTokenizer"]
vlmpy310/lib/python3.10/site-packages/transformers/models/longformer/tokenization_longformer_fast.py ADDED
@@ -0,0 +1,265 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The Allen Institute for AI team and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Fast Tokenization classes for Longformer."""
16
+
17
+ import json
18
+ from typing import List, Optional, Tuple
19
+
20
+ from tokenizers import processors
21
+
22
+ from ...tokenization_utils_base import AddedToken, BatchEncoding
23
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
24
+ from ...utils import logging
25
+ from .tokenization_longformer import LongformerTokenizer
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
31
+
32
+
33
+ # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast with FacebookAI/roberta-base->allenai/longformer-base-4096, RoBERTa->Longformer all-casing, Roberta->Longformer
34
+ class LongformerTokenizerFast(PreTrainedTokenizerFast):
35
+ """
36
+ Construct a "fast" Longformer tokenizer (backed by HuggingFace's *tokenizers* library), derived from the GPT-2
37
+ tokenizer, using byte-level Byte-Pair-Encoding.
38
+
39
+ This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
40
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
41
+
42
+ ```python
43
+ >>> from transformers import LongformerTokenizerFast
44
+
45
+ >>> tokenizer = LongformerTokenizerFast.from_pretrained("allenai/longformer-base-4096")
46
+ >>> tokenizer("Hello world")["input_ids"]
47
+ [0, 31414, 232, 2]
48
+
49
+ >>> tokenizer(" Hello world")["input_ids"]
50
+ [0, 20920, 232, 2]
51
+ ```
52
+
53
+ You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
54
+ call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
55
+
56
+ <Tip>
57
+
58
+ When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`.
59
+
60
+ </Tip>
61
+
62
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
63
+ refer to this superclass for more information regarding those methods.
64
+
65
+ Args:
66
+ vocab_file (`str`):
67
+ Path to the vocabulary file.
68
+ merges_file (`str`):
69
+ Path to the merges file.
70
+ errors (`str`, *optional*, defaults to `"replace"`):
71
+ Paradigm to follow when decoding bytes to UTF-8. See
72
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
73
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
74
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
75
+
76
+ <Tip>
77
+
78
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
79
+ sequence. The token used is the `cls_token`.
80
+
81
+ </Tip>
82
+
83
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
84
+ The end of sequence token.
85
+
86
+ <Tip>
87
+
88
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
89
+ The token used is the `sep_token`.
90
+
91
+ </Tip>
92
+
93
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
94
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
95
+ sequence classification or for a text and a question for question answering. It is also used as the last
96
+ token of a sequence built with special tokens.
97
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
98
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
99
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
100
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
101
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
102
+ token instead.
103
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
104
+ The token used for padding, for example when batching sequences of different lengths.
105
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
106
+ The token used for masking values. This is the token used when training this model with masked language
107
+ modeling. This is the token which the model will try to predict.
108
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
109
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
110
+ other word. (Longformer tokenizer detect beginning of words by the preceding space).
111
+ trim_offsets (`bool`, *optional*, defaults to `True`):
112
+ Whether the post processing step should trim offsets to avoid including whitespaces.
113
+ """
114
+
115
+ vocab_files_names = VOCAB_FILES_NAMES
116
+ model_input_names = ["input_ids", "attention_mask"]
117
+ slow_tokenizer_class = LongformerTokenizer
118
+
119
+ def __init__(
120
+ self,
121
+ vocab_file=None,
122
+ merges_file=None,
123
+ tokenizer_file=None,
124
+ errors="replace",
125
+ bos_token="<s>",
126
+ eos_token="</s>",
127
+ sep_token="</s>",
128
+ cls_token="<s>",
129
+ unk_token="<unk>",
130
+ pad_token="<pad>",
131
+ mask_token="<mask>",
132
+ add_prefix_space=False,
133
+ trim_offsets=True,
134
+ **kwargs,
135
+ ):
136
+ mask_token = (
137
+ AddedToken(mask_token, lstrip=True, rstrip=False, normalized=False)
138
+ if isinstance(mask_token, str)
139
+ else mask_token
140
+ )
141
+ super().__init__(
142
+ vocab_file,
143
+ merges_file,
144
+ tokenizer_file=tokenizer_file,
145
+ errors=errors,
146
+ bos_token=bos_token,
147
+ eos_token=eos_token,
148
+ sep_token=sep_token,
149
+ cls_token=cls_token,
150
+ unk_token=unk_token,
151
+ pad_token=pad_token,
152
+ mask_token=mask_token,
153
+ add_prefix_space=add_prefix_space,
154
+ trim_offsets=trim_offsets,
155
+ **kwargs,
156
+ )
157
+
158
+ tokenizer_component = "post_processor"
159
+ tokenizer_component_instance = getattr(self.backend_tokenizer, tokenizer_component, None)
160
+ if tokenizer_component_instance:
161
+ state = json.loads(tokenizer_component_instance.__getstate__())
162
+
163
+ # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
164
+ if "sep" in state:
165
+ state["sep"] = tuple(state["sep"])
166
+ if "cls" in state:
167
+ state["cls"] = tuple(state["cls"])
168
+
169
+ changes_to_apply = False
170
+
171
+ if state.get("add_prefix_space", add_prefix_space) != add_prefix_space:
172
+ state["add_prefix_space"] = add_prefix_space
173
+ changes_to_apply = True
174
+
175
+ if state.get("trim_offsets", trim_offsets) != trim_offsets:
176
+ state["trim_offsets"] = trim_offsets
177
+ changes_to_apply = True
178
+
179
+ if changes_to_apply:
180
+ component_class = getattr(processors, state.pop("type"))
181
+ new_value = component_class(**state)
182
+ setattr(self.backend_tokenizer, tokenizer_component, new_value)
183
+
184
+ @property
185
+ def mask_token(self) -> str:
186
+ """
187
+ `str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not
188
+ having been set.
189
+
190
+ Longformer tokenizer has a special mask token to be usable in the fill-mask pipeline. The mask token will greedily
191
+ comprise the space before the *<mask>*.
192
+ """
193
+ if self._mask_token is None:
194
+ if self.verbose:
195
+ logger.error("Using mask_token, but it is not set yet.")
196
+ return None
197
+ return str(self._mask_token)
198
+
199
+ @mask_token.setter
200
+ def mask_token(self, value):
201
+ """
202
+ Overriding the default behavior of the mask token to have it eat the space before it.
203
+
204
+ This is needed to preserve backward compatibility with all the previously used models based on Longformer.
205
+ """
206
+ # Mask token behave like a normal word, i.e. include the space before it
207
+ # So we set lstrip to True
208
+ value = AddedToken(value, lstrip=True, rstrip=False) if isinstance(value, str) else value
209
+ self._mask_token = value
210
+
211
+ def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding:
212
+ is_split_into_words = kwargs.get("is_split_into_words", False)
213
+ assert self.add_prefix_space or not is_split_into_words, (
214
+ f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
215
+ "to use it with pretokenized inputs."
216
+ )
217
+
218
+ return super()._batch_encode_plus(*args, **kwargs)
219
+
220
+ def _encode_plus(self, *args, **kwargs) -> BatchEncoding:
221
+ is_split_into_words = kwargs.get("is_split_into_words", False)
222
+
223
+ assert self.add_prefix_space or not is_split_into_words, (
224
+ f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
225
+ "to use it with pretokenized inputs."
226
+ )
227
+
228
+ return super()._encode_plus(*args, **kwargs)
229
+
230
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
231
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
232
+ return tuple(files)
233
+
234
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
235
+ output = [self.bos_token_id] + token_ids_0 + [self.eos_token_id]
236
+ if token_ids_1 is None:
237
+ return output
238
+
239
+ return output + [self.eos_token_id] + token_ids_1 + [self.eos_token_id]
240
+
241
+ def create_token_type_ids_from_sequences(
242
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
243
+ ) -> List[int]:
244
+ """
245
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. Longformer does not
246
+ make use of token type ids, therefore a list of zeros is returned.
247
+
248
+ Args:
249
+ token_ids_0 (`List[int]`):
250
+ List of IDs.
251
+ token_ids_1 (`List[int]`, *optional*):
252
+ Optional second list of IDs for sequence pairs.
253
+
254
+ Returns:
255
+ `List[int]`: List of zeros.
256
+ """
257
+ sep = [self.sep_token_id]
258
+ cls = [self.cls_token_id]
259
+
260
+ if token_ids_1 is None:
261
+ return len(cls + token_ids_0 + sep) * [0]
262
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
263
+
264
+
265
+ __all__ = ["LongformerTokenizerFast"]
vlmpy310/lib/python3.10/site-packages/transformers/models/musicgen/__init__.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import _LazyModule
17
+ from ...utils.import_utils import define_import_structure
18
+
19
+
20
+ if TYPE_CHECKING:
21
+ from .configuration_musicgen import *
22
+ from .modeling_musicgen import *
23
+ from .processing_musicgen import *
24
+ else:
25
+ import sys
26
+
27
+ _file = globals()["__file__"]
28
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
vlmpy310/lib/python3.10/site-packages/transformers/models/musicgen/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (577 Bytes). View file
 
vlmpy310/lib/python3.10/site-packages/transformers/models/musicgen/__pycache__/configuration_musicgen.cpython-310.pyc ADDED
Binary file (9.47 kB). View file
 
vlmpy310/lib/python3.10/site-packages/transformers/models/musicgen/__pycache__/convert_musicgen_transformers.cpython-310.pyc ADDED
Binary file (6.25 kB). View file
 
vlmpy310/lib/python3.10/site-packages/transformers/models/musicgen/__pycache__/modeling_musicgen.cpython-310.pyc ADDED
Binary file (78.2 kB). View file
 
vlmpy310/lib/python3.10/site-packages/transformers/models/musicgen/__pycache__/processing_musicgen.cpython-310.pyc ADDED
Binary file (4.5 kB). View file
 
vlmpy310/lib/python3.10/site-packages/transformers/models/musicgen/configuration_musicgen.py ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Meta AI and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """MusicGen model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+ from ..auto.configuration_auto import AutoConfig
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+
25
+ class MusicgenDecoderConfig(PretrainedConfig):
26
+ r"""
27
+ This is the configuration class to store the configuration of an [`MusicgenDecoder`]. It is used to instantiate a
28
+ MusicGen decoder according to the specified arguments, defining the model architecture. Instantiating a
29
+ configuration with the defaults will yield a similar configuration to that of the MusicGen
30
+ [facebook/musicgen-small](https://huggingface.co/facebook/musicgen-small) architecture.
31
+
32
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
33
+ documentation from [`PretrainedConfig`] for more information.
34
+
35
+
36
+ Args:
37
+ vocab_size (`int`, *optional*, defaults to 2048):
38
+ Vocabulary size of the MusicgenDecoder model. Defines the number of different tokens that can be
39
+ represented by the `inputs_ids` passed when calling [`MusicgenDecoder`].
40
+ hidden_size (`int`, *optional*, defaults to 1024):
41
+ Dimensionality of the layers and the pooler layer.
42
+ num_hidden_layers (`int`, *optional*, defaults to 24):
43
+ Number of decoder layers.
44
+ num_attention_heads (`int`, *optional*, defaults to 16):
45
+ Number of attention heads for each attention layer in the Transformer block.
46
+ ffn_dim (`int`, *optional*, defaults to 4096):
47
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer block.
48
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
49
+ The non-linear activation function (function or string) in the decoder and pooler. If string, `"gelu"`,
50
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
51
+ dropout (`float`, *optional*, defaults to 0.1):
52
+ The dropout probability for all fully connected layers in the embeddings, text_encoder, and pooler.
53
+ attention_dropout (`float`, *optional*, defaults to 0.0):
54
+ The dropout ratio for the attention probabilities.
55
+ activation_dropout (`float`, *optional*, defaults to 0.0):
56
+ The dropout ratio for activations inside the fully connected layer.
57
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
58
+ The maximum sequence length that this model might ever be used with. Typically, set this to something large
59
+ just in case (e.g., 512 or 1024 or 2048).
60
+ initializer_factor (`float`, *optional*, defaults to 0.02):
61
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
62
+ layerdrop (`float`, *optional*, defaults to 0.0):
63
+ The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
64
+ for more details.
65
+ scale_embedding (`bool`, *optional*, defaults to `False`):
66
+ Scale embeddings by diving by sqrt(hidden_size).
67
+ use_cache (`bool`, *optional*, defaults to `True`):
68
+ Whether the model should return the last key/values attentions (not used by all models)
69
+ num_codebooks (`int`, *optional*, defaults to 4):
70
+ The number of parallel codebooks forwarded to the model.
71
+ tie_word_embeddings(`bool`, *optional*, defaults to `False`):
72
+ Whether input and output word embeddings should be tied.
73
+ audio_channels (`int`, *optional*, defaults to 1
74
+ Number of channels in the audio data. Either 1 for mono or 2 for stereo. Stereo models generate a separate
75
+ audio stream for the left/right output channels. Mono models generate a single audio stream output.
76
+ """
77
+
78
+ model_type = "musicgen_decoder"
79
+ base_config_key = "decoder_config"
80
+ keys_to_ignore_at_inference = ["past_key_values"]
81
+
82
+ def __init__(
83
+ self,
84
+ vocab_size=2048,
85
+ max_position_embeddings=2048,
86
+ num_hidden_layers=24,
87
+ ffn_dim=4096,
88
+ num_attention_heads=16,
89
+ layerdrop=0.0,
90
+ use_cache=True,
91
+ activation_function="gelu",
92
+ hidden_size=1024,
93
+ dropout=0.1,
94
+ attention_dropout=0.0,
95
+ activation_dropout=0.0,
96
+ initializer_factor=0.02,
97
+ scale_embedding=False,
98
+ num_codebooks=4,
99
+ audio_channels=1,
100
+ pad_token_id=2048,
101
+ bos_token_id=2048,
102
+ eos_token_id=None,
103
+ tie_word_embeddings=False,
104
+ **kwargs,
105
+ ):
106
+ self.vocab_size = vocab_size
107
+ self.max_position_embeddings = max_position_embeddings
108
+ self.hidden_size = hidden_size
109
+ self.ffn_dim = ffn_dim
110
+ self.num_hidden_layers = num_hidden_layers
111
+ self.num_attention_heads = num_attention_heads
112
+ self.dropout = dropout
113
+ self.attention_dropout = attention_dropout
114
+ self.activation_dropout = activation_dropout
115
+ self.activation_function = activation_function
116
+ self.initializer_factor = initializer_factor
117
+ self.layerdrop = layerdrop
118
+ self.use_cache = use_cache
119
+ self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
120
+ self.num_codebooks = num_codebooks
121
+
122
+ if audio_channels not in [1, 2]:
123
+ raise ValueError(f"Expected 1 (mono) or 2 (stereo) audio channels, got {audio_channels} channels.")
124
+ self.audio_channels = audio_channels
125
+
126
+ super().__init__(
127
+ pad_token_id=pad_token_id,
128
+ bos_token_id=bos_token_id,
129
+ eos_token_id=eos_token_id,
130
+ tie_word_embeddings=tie_word_embeddings,
131
+ **kwargs,
132
+ )
133
+
134
+
135
+ class MusicgenConfig(PretrainedConfig):
136
+ r"""
137
+ This is the configuration class to store the configuration of a [`MusicgenModel`]. It is used to instantiate a
138
+ MusicGen model according to the specified arguments, defining the text encoder, audio encoder and MusicGen decoder
139
+ configs.
140
+
141
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
142
+ documentation from [`PretrainedConfig`] for more information.
143
+
144
+ Args:
145
+ kwargs (*optional*):
146
+ Dictionary of keyword arguments. Notably:
147
+
148
+ - **text_encoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that
149
+ defines the text encoder config.
150
+ - **audio_encoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that
151
+ defines the audio encoder config.
152
+ - **decoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines
153
+ the decoder config.
154
+
155
+ Example:
156
+
157
+ ```python
158
+ >>> from transformers import (
159
+ ... MusicgenConfig,
160
+ ... MusicgenDecoderConfig,
161
+ ... T5Config,
162
+ ... EncodecConfig,
163
+ ... MusicgenForConditionalGeneration,
164
+ ... )
165
+
166
+ >>> # Initializing text encoder, audio encoder, and decoder model configurations
167
+ >>> text_encoder_config = T5Config()
168
+ >>> audio_encoder_config = EncodecConfig()
169
+ >>> decoder_config = MusicgenDecoderConfig()
170
+
171
+ >>> configuration = MusicgenConfig.from_sub_models_config(
172
+ ... text_encoder_config, audio_encoder_config, decoder_config
173
+ ... )
174
+
175
+ >>> # Initializing a MusicgenForConditionalGeneration (with random weights) from the facebook/musicgen-small style configuration
176
+ >>> model = MusicgenForConditionalGeneration(configuration)
177
+
178
+ >>> # Accessing the model configuration
179
+ >>> configuration = model.config
180
+ >>> config_text_encoder = model.config.text_encoder
181
+ >>> config_audio_encoder = model.config.audio_encoder
182
+ >>> config_decoder = model.config.decoder
183
+
184
+ >>> # Saving the model, including its configuration
185
+ >>> model.save_pretrained("musicgen-model")
186
+
187
+ >>> # loading model and config from pretrained folder
188
+ >>> musicgen_config = MusicgenConfig.from_pretrained("musicgen-model")
189
+ >>> model = MusicgenForConditionalGeneration.from_pretrained("musicgen-model", config=musicgen_config)
190
+ ```"""
191
+
192
+ model_type = "musicgen"
193
+ sub_configs = {
194
+ "text_encoder": AutoConfig,
195
+ "audio_encoder": AutoConfig,
196
+ "decoder": MusicgenDecoderConfig,
197
+ }
198
+ is_composition = True
199
+
200
+ def __init__(self, **kwargs):
201
+ super().__init__(**kwargs)
202
+ if "text_encoder" not in kwargs or "audio_encoder" not in kwargs or "decoder" not in kwargs:
203
+ raise ValueError("Config has to be initialized with text_encoder, audio_encoder and decoder config")
204
+
205
+ text_encoder_config = kwargs.pop("text_encoder")
206
+ text_encoder_model_type = text_encoder_config.pop("model_type")
207
+
208
+ audio_encoder_config = kwargs.pop("audio_encoder")
209
+ audio_encoder_model_type = audio_encoder_config.pop("model_type")
210
+
211
+ decoder_config = kwargs.pop("decoder")
212
+
213
+ self.text_encoder = AutoConfig.for_model(text_encoder_model_type, **text_encoder_config)
214
+ self.audio_encoder = AutoConfig.for_model(audio_encoder_model_type, **audio_encoder_config)
215
+ self.decoder = MusicgenDecoderConfig(**decoder_config)
216
+ self.is_encoder_decoder = True
217
+
218
+ @classmethod
219
+ def from_sub_models_config(
220
+ cls,
221
+ text_encoder_config: PretrainedConfig,
222
+ audio_encoder_config: PretrainedConfig,
223
+ decoder_config: MusicgenDecoderConfig,
224
+ **kwargs,
225
+ ):
226
+ r"""
227
+ Instantiate a [`MusicgenConfig`] (or a derived class) from text encoder, audio encoder and decoder
228
+ configurations.
229
+
230
+ Returns:
231
+ [`MusicgenConfig`]: An instance of a configuration object
232
+ """
233
+
234
+ return cls(
235
+ text_encoder=text_encoder_config.to_dict(),
236
+ audio_encoder=audio_encoder_config.to_dict(),
237
+ decoder=decoder_config.to_dict(),
238
+ **kwargs,
239
+ )
240
+
241
+ @property
242
+ # This is a property because you might want to change the codec model on the fly
243
+ def sampling_rate(self):
244
+ return self.audio_encoder.sampling_rate
245
+
246
+
247
+ __all__ = ["MusicgenConfig", "MusicgenDecoderConfig"]
vlmpy310/lib/python3.10/site-packages/transformers/models/musicgen/convert_musicgen_transformers.py ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert MusicGen checkpoints from the original repository."""
16
+
17
+ import argparse
18
+ from pathlib import Path
19
+ from typing import Dict, OrderedDict, Tuple
20
+
21
+ import torch
22
+ from audiocraft.models import MusicGen
23
+
24
+ from transformers import (
25
+ AutoFeatureExtractor,
26
+ AutoTokenizer,
27
+ EncodecModel,
28
+ MusicgenDecoderConfig,
29
+ MusicgenForConditionalGeneration,
30
+ MusicgenProcessor,
31
+ T5EncoderModel,
32
+ )
33
+ from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
34
+ from transformers.utils import logging
35
+
36
+
37
+ logging.set_verbosity_info()
38
+ logger = logging.get_logger(__name__)
39
+
40
+
41
+ EXPECTED_MISSING_KEYS = ["model.decoder.embed_positions.weights"]
42
+
43
+
44
+ def rename_keys(name):
45
+ if "emb" in name:
46
+ name = name.replace("emb", "model.decoder.embed_tokens")
47
+ if "transformer" in name:
48
+ name = name.replace("transformer", "model.decoder")
49
+ if "cross_attention" in name:
50
+ name = name.replace("cross_attention", "encoder_attn")
51
+ if "linear1" in name:
52
+ name = name.replace("linear1", "fc1")
53
+ if "linear2" in name:
54
+ name = name.replace("linear2", "fc2")
55
+ if "norm1" in name:
56
+ name = name.replace("norm1", "self_attn_layer_norm")
57
+ if "norm_cross" in name:
58
+ name = name.replace("norm_cross", "encoder_attn_layer_norm")
59
+ if "norm2" in name:
60
+ name = name.replace("norm2", "final_layer_norm")
61
+ if "out_norm" in name:
62
+ name = name.replace("out_norm", "model.decoder.layer_norm")
63
+ if "linears" in name:
64
+ name = name.replace("linears", "lm_heads")
65
+ if "condition_provider.conditioners.description.output_proj" in name:
66
+ name = name.replace("condition_provider.conditioners.description.output_proj", "enc_to_dec_proj")
67
+ return name
68
+
69
+
70
+ def rename_state_dict(state_dict: OrderedDict, hidden_size: int) -> Tuple[Dict, Dict]:
71
+ """Function that takes the fairseq Musicgen state dict and renames it according to the HF
72
+ module names. It further partitions the state dict into the decoder (LM) state dict, and that for the
73
+ encoder-decoder projection."""
74
+ keys = list(state_dict.keys())
75
+ enc_dec_proj_state_dict = {}
76
+ for key in keys:
77
+ val = state_dict.pop(key)
78
+ key = rename_keys(key)
79
+ if "in_proj_weight" in key:
80
+ # split fused qkv proj
81
+ state_dict[key.replace("in_proj_weight", "q_proj.weight")] = val[:hidden_size, :]
82
+ state_dict[key.replace("in_proj_weight", "k_proj.weight")] = val[hidden_size : 2 * hidden_size, :]
83
+ state_dict[key.replace("in_proj_weight", "v_proj.weight")] = val[-hidden_size:, :]
84
+ elif "enc_to_dec_proj" in key:
85
+ enc_dec_proj_state_dict[key[len("enc_to_dec_proj.") :]] = val
86
+ else:
87
+ state_dict[key] = val
88
+ return state_dict, enc_dec_proj_state_dict
89
+
90
+
91
+ def decoder_config_from_checkpoint(checkpoint: str) -> MusicgenDecoderConfig:
92
+ if checkpoint.endswith("small"):
93
+ # default config values
94
+ hidden_size = 1024
95
+ num_hidden_layers = 24
96
+ num_attention_heads = 16
97
+ elif checkpoint.endswith("medium"):
98
+ hidden_size = 1536
99
+ num_hidden_layers = 48
100
+ num_attention_heads = 24
101
+ elif checkpoint.endswith("large"):
102
+ hidden_size = 2048
103
+ num_hidden_layers = 48
104
+ num_attention_heads = 32
105
+ else:
106
+ raise ValueError(
107
+ "Checkpoint should be one of `['small', 'medium', 'large']` for the mono checkpoints, "
108
+ "`['facebook/musicgen-stereo-small', 'facebook/musicgen-stereo-medium', 'facebook/musicgen-stereo-large']` "
109
+ f"for the stereo checkpoints, or a custom checkpoint with the checkpoint size as a suffix, got {checkpoint}."
110
+ )
111
+
112
+ if "stereo" in checkpoint:
113
+ audio_channels = 2
114
+ num_codebooks = 8
115
+ else:
116
+ audio_channels = 1
117
+ num_codebooks = 4
118
+
119
+ config = MusicgenDecoderConfig(
120
+ hidden_size=hidden_size,
121
+ ffn_dim=hidden_size * 4,
122
+ num_hidden_layers=num_hidden_layers,
123
+ num_attention_heads=num_attention_heads,
124
+ num_codebooks=num_codebooks,
125
+ audio_channels=audio_channels,
126
+ )
127
+ return config
128
+
129
+
130
+ @torch.no_grad()
131
+ def convert_musicgen_checkpoint(
132
+ checkpoint, pytorch_dump_folder=None, repo_id=None, device="cpu", safe_serialization=False
133
+ ):
134
+ fairseq_model = MusicGen.get_pretrained(checkpoint, device=device)
135
+ decoder_config = decoder_config_from_checkpoint(checkpoint)
136
+
137
+ decoder_state_dict = fairseq_model.lm.state_dict()
138
+ decoder_state_dict, enc_dec_proj_state_dict = rename_state_dict(
139
+ decoder_state_dict, hidden_size=decoder_config.hidden_size
140
+ )
141
+
142
+ text_encoder = T5EncoderModel.from_pretrained("google-t5/t5-base")
143
+ audio_encoder = EncodecModel.from_pretrained("facebook/encodec_32khz")
144
+ decoder = MusicgenForCausalLM(decoder_config).eval()
145
+
146
+ # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
147
+ missing_keys, unexpected_keys = decoder.load_state_dict(decoder_state_dict, strict=False)
148
+
149
+ for key in missing_keys.copy():
150
+ if key.startswith(("text_encoder", "audio_encoder")) or key in EXPECTED_MISSING_KEYS:
151
+ missing_keys.remove(key)
152
+
153
+ if len(missing_keys) > 0:
154
+ raise ValueError(f"Missing key(s) in state_dict: {missing_keys}")
155
+
156
+ if len(unexpected_keys) > 0:
157
+ raise ValueError(f"Unexpected key(s) in state_dict: {unexpected_keys}")
158
+
159
+ # init the composite model
160
+ model = MusicgenForConditionalGeneration(text_encoder=text_encoder, audio_encoder=audio_encoder, decoder=decoder)
161
+
162
+ # load the pre-trained enc-dec projection (from the decoder state dict)
163
+ model.enc_to_dec_proj.load_state_dict(enc_dec_proj_state_dict)
164
+
165
+ # check we can do a forward pass
166
+ input_ids = torch.arange(0, 2 * decoder_config.num_codebooks, dtype=torch.long).reshape(2, -1)
167
+ decoder_input_ids = input_ids.reshape(2 * decoder_config.num_codebooks, -1)
168
+
169
+ with torch.no_grad():
170
+ logits = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids).logits
171
+
172
+ if logits.shape != (2 * decoder_config.num_codebooks, 1, 2048):
173
+ raise ValueError("Incorrect shape for logits")
174
+
175
+ # now construct the processor
176
+ tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-base")
177
+ feature_extractor = AutoFeatureExtractor.from_pretrained(
178
+ "facebook/encodec_32khz", padding_side="left", feature_size=decoder_config.audio_channels
179
+ )
180
+
181
+ processor = MusicgenProcessor(feature_extractor=feature_extractor, tokenizer=tokenizer)
182
+
183
+ # set the appropriate bos/pad token ids
184
+ model.generation_config.decoder_start_token_id = 2048
185
+ model.generation_config.pad_token_id = 2048
186
+
187
+ # set other default generation config params
188
+ model.generation_config.max_length = int(30 * audio_encoder.config.frame_rate)
189
+ model.generation_config.do_sample = True
190
+ model.generation_config.guidance_scale = 3.0
191
+
192
+ if pytorch_dump_folder is not None:
193
+ Path(pytorch_dump_folder).mkdir(exist_ok=True)
194
+ logger.info(f"Saving model {checkpoint} to {pytorch_dump_folder}")
195
+ model.save_pretrained(pytorch_dump_folder, safe_serialization=safe_serialization)
196
+ processor.save_pretrained(pytorch_dump_folder)
197
+
198
+ if repo_id:
199
+ logger.info(f"Pushing model {checkpoint} to {repo_id}")
200
+ model.push_to_hub(repo_id, safe_serialization=safe_serialization)
201
+ processor.push_to_hub(repo_id)
202
+
203
+
204
+ if __name__ == "__main__":
205
+ parser = argparse.ArgumentParser()
206
+ # Required parameters
207
+ parser.add_argument(
208
+ "--checkpoint",
209
+ default="small",
210
+ type=str,
211
+ help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: "
212
+ "`['small', 'medium', 'large']` for the mono checkpoints, "
213
+ "`['facebook/musicgen-stereo-small', 'facebook/musicgen-stereo-medium', 'facebook/musicgen-stereo-large']` "
214
+ "for the stereo checkpoints, or a custom checkpoint with the checkpoint size as a suffix.",
215
+ )
216
+ parser.add_argument(
217
+ "--pytorch_dump_folder",
218
+ required=True,
219
+ default=None,
220
+ type=str,
221
+ help="Path to the output PyTorch model directory.",
222
+ )
223
+ parser.add_argument(
224
+ "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
225
+ )
226
+ parser.add_argument(
227
+ "--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
228
+ )
229
+ parser.add_argument(
230
+ "--safe_serialization",
231
+ action="store_true",
232
+ help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).",
233
+ )
234
+
235
+ args = parser.parse_args()
236
+ convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
vlmpy310/lib/python3.10/site-packages/transformers/models/musicgen/modeling_musicgen.py ADDED
The diff for this file is too large to render. See raw diff
 
vlmpy310/lib/python3.10/site-packages/transformers/models/musicgen/processing_musicgen.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Text/audio processor class for MusicGen
17
+ """
18
+
19
+ from typing import List, Optional
20
+
21
+ import numpy as np
22
+
23
+ from ...processing_utils import ProcessorMixin
24
+ from ...utils import to_numpy
25
+
26
+
27
+ class MusicgenProcessor(ProcessorMixin):
28
+ r"""
29
+ Constructs a MusicGen processor which wraps an EnCodec feature extractor and a T5 tokenizer into a single processor
30
+ class.
31
+
32
+ [`MusicgenProcessor`] offers all the functionalities of [`EncodecFeatureExtractor`] and [`TTokenizer`]. See
33
+ [`~MusicgenProcessor.__call__`] and [`~MusicgenProcessor.decode`] for more information.
34
+
35
+ Args:
36
+ feature_extractor (`EncodecFeatureExtractor`):
37
+ An instance of [`EncodecFeatureExtractor`]. The feature extractor is a required input.
38
+ tokenizer (`T5Tokenizer`):
39
+ An instance of [`T5Tokenizer`]. The tokenizer is a required input.
40
+ """
41
+
42
+ feature_extractor_class = "EncodecFeatureExtractor"
43
+ tokenizer_class = ("T5Tokenizer", "T5TokenizerFast")
44
+
45
+ def __init__(self, feature_extractor, tokenizer):
46
+ super().__init__(feature_extractor, tokenizer)
47
+ self.current_processor = self.feature_extractor
48
+ self._in_target_context_manager = False
49
+
50
+ def get_decoder_prompt_ids(self, task=None, language=None, no_timestamps=True):
51
+ return self.tokenizer.get_decoder_prompt_ids(task=task, language=language, no_timestamps=no_timestamps)
52
+
53
+ def __call__(self, *args, **kwargs):
54
+ """
55
+ Forwards the `audio` argument to EncodecFeatureExtractor's [`~EncodecFeatureExtractor.__call__`] and the `text`
56
+ argument to [`~T5Tokenizer.__call__`]. Please refer to the doctsring of the above two methods for more
57
+ information.
58
+ """
59
+ # For backward compatibility
60
+ if self._in_target_context_manager:
61
+ return self.current_processor(*args, **kwargs)
62
+
63
+ audio = kwargs.pop("audio", None)
64
+ sampling_rate = kwargs.pop("sampling_rate", None)
65
+ text = kwargs.pop("text", None)
66
+ if len(args) > 0:
67
+ audio = args[0]
68
+ args = args[1:]
69
+
70
+ if audio is None and text is None:
71
+ raise ValueError("You need to specify either an `audio` or `text` input to process.")
72
+
73
+ if text is not None:
74
+ inputs = self.tokenizer(text, **kwargs)
75
+
76
+ if audio is not None:
77
+ audio_inputs = self.feature_extractor(audio, *args, sampling_rate=sampling_rate, **kwargs)
78
+
79
+ if audio is None:
80
+ return inputs
81
+
82
+ elif text is None:
83
+ return audio_inputs
84
+
85
+ else:
86
+ inputs["input_values"] = audio_inputs["input_values"]
87
+ if "padding_mask" in audio_inputs:
88
+ inputs["padding_mask"] = audio_inputs["padding_mask"]
89
+ return inputs
90
+
91
+ def batch_decode(self, *args, **kwargs):
92
+ """
93
+ This method is used to decode either batches of audio outputs from the MusicGen model, or batches of token ids
94
+ from the tokenizer. In the case of decoding token ids, this method forwards all its arguments to T5Tokenizer's
95
+ [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information.
96
+ """
97
+ audio_values = kwargs.pop("audio", None)
98
+ padding_mask = kwargs.pop("padding_mask", None)
99
+
100
+ if len(args) > 0:
101
+ audio_values = args[0]
102
+ args = args[1:]
103
+
104
+ if audio_values is not None:
105
+ return self._decode_audio(audio_values, padding_mask=padding_mask)
106
+ else:
107
+ return self.tokenizer.batch_decode(*args, **kwargs)
108
+
109
+ def decode(self, *args, **kwargs):
110
+ """
111
+ This method forwards all its arguments to T5Tokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the
112
+ docstring of this method for more information.
113
+ """
114
+ return self.tokenizer.decode(*args, **kwargs)
115
+
116
+ def _decode_audio(self, audio_values, padding_mask: Optional = None) -> List[np.ndarray]:
117
+ """
118
+ This method strips any padding from the audio values to return a list of numpy audio arrays.
119
+ """
120
+ audio_values = to_numpy(audio_values)
121
+ bsz, channels, seq_len = audio_values.shape
122
+
123
+ if padding_mask is None:
124
+ return list(audio_values)
125
+
126
+ padding_mask = to_numpy(padding_mask)
127
+
128
+ # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
129
+ # token (so that the generated audio values are **not** treated as padded tokens)
130
+ difference = seq_len - padding_mask.shape[-1]
131
+ padding_value = 1 - self.feature_extractor.padding_value
132
+ padding_mask = np.pad(padding_mask, ((0, 0), (0, difference)), "constant", constant_values=padding_value)
133
+
134
+ audio_values = audio_values.tolist()
135
+ for i in range(bsz):
136
+ sliced_audio = np.asarray(audio_values[i])[
137
+ padding_mask[i][None, :] != self.feature_extractor.padding_value
138
+ ]
139
+ audio_values[i] = sliced_audio.reshape(channels, -1)
140
+
141
+ return audio_values
142
+
143
+
144
+ __all__ = ["MusicgenProcessor"]
vlmpy310/lib/python3.10/site-packages/transformers/models/paligemma/__init__.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import _LazyModule
17
+ from ...utils.import_utils import define_import_structure
18
+
19
+
20
+ if TYPE_CHECKING:
21
+ from .configuration_paligemma import *
22
+ from .modeling_paligemma import *
23
+ from .processing_paligemma import *
24
+ else:
25
+ import sys
26
+
27
+ _file = globals()["__file__"]
28
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
vlmpy310/lib/python3.10/site-packages/transformers/models/paligemma/configuration_paligemma.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 Microsoft Research & University of Wisconsin-Madison and the HuggingFace Inc. team. All rights reserved.
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """PaliGemmamodel configuration"""
15
+
16
+ import warnings
17
+
18
+ from ...configuration_utils import PretrainedConfig
19
+ from ...utils import logging
20
+ from ..auto import CONFIG_MAPPING, AutoConfig
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ class PaliGemmaConfig(PretrainedConfig):
27
+ r"""
28
+ This is the configuration class to store the configuration of a [`PaliGemmaForConditionalGeneration`]. It is used to instantiate an
29
+ PaliGemmamodel according to the specified arguments, defining the model architecture. Instantiating a configuration
30
+ with the defaults will yield a similar configuration to that of the PaliGemma-2B.
31
+
32
+ e.g. [paligemma-hf/paligemma-2b](https://huggingface.co/paligemma-hf/paligemma-2b)
33
+
34
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
35
+ documentation from [`PretrainedConfig`] for more information.
36
+
37
+ Args:
38
+ vision_config (`PaliGemmaVisionConfig`, *optional*):
39
+ Custom vision config or dict
40
+ text_config (`Union[AutoConfig, dict]`, *optional*):
41
+ The config object of the text backbone. Can be any of `LlamaConfig` or `MistralConfig`.
42
+ ignore_index (`int`, *optional*, defaults to -100):
43
+ The ignore index for the loss function.
44
+ image_token_index (`int`, *optional*, defaults to 256000):
45
+ The image token index to encode the image prompt.
46
+ vocab_size (`int`, *optional*, defaults to 257152):
47
+ Vocabulary size of the PaliGemmamodel. Defines the number of different tokens that can be represented by the
48
+ `inputs_ids` passed when calling [`~PaliGemmaForConditionalGeneration`]
49
+ projection_dim (`int`, *optional*, defaults to 2048):
50
+ Dimension of the multimodal projection space.
51
+ hidden_size (`int`, *optional*, defaults to 2048):
52
+ Dimension of the hidden layer of the Language model.
53
+
54
+ Example:
55
+
56
+ ```python
57
+ >>> from transformers import PaliGemmaForConditionalGeneration, PaliGemmaConfig, SiglipVisionConfig, GemmaConfig
58
+
59
+ >>> # Initializing a Siglip-like vision config
60
+ >>> vision_config = SiglipVisionConfig()
61
+
62
+ >>> # Initializing a PaliGemma config
63
+ >>> text_config = GemmaConfig()
64
+
65
+ >>> # Initializing a PaliGemma paligemma-3b-224 style configuration
66
+ >>> configuration = PaliGemmaConfig(vision_config, text_config)
67
+
68
+ >>> # Initializing a model from the paligemma-3b-224 style configuration
69
+ >>> model = PaliGemmaForConditionalGeneration(configuration)
70
+
71
+ >>> # Accessing the model configuration
72
+ >>> configuration = model.config
73
+ ```"""
74
+
75
+ model_type = "paligemma"
76
+ sub_configs = {"text_config": AutoConfig, "vision_config": AutoConfig}
77
+
78
+ def __init__(
79
+ self,
80
+ vision_config=None,
81
+ text_config=None,
82
+ ignore_index=-100,
83
+ image_token_index=256000,
84
+ vocab_size=257152,
85
+ projection_dim=2048,
86
+ hidden_size=2048,
87
+ **kwargs,
88
+ ):
89
+ self._ignore_index = ignore_index
90
+ self.image_token_index = image_token_index
91
+ self._vocab_size = vocab_size
92
+ self.projection_dim = projection_dim
93
+ self.hidden_size = hidden_size
94
+ self.vision_config = vision_config
95
+ self.is_encoder_decoder = False
96
+
97
+ if isinstance(self.vision_config, dict):
98
+ vision_config["model_type"] = (
99
+ vision_config["model_type"] if "model_type" in vision_config else "siglip_vision_model"
100
+ )
101
+ self.vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config)
102
+ elif vision_config is None:
103
+ self.vision_config = CONFIG_MAPPING["siglip_vision_model"](
104
+ intermediate_size=4096,
105
+ hidden_size=1152,
106
+ patch_size=14,
107
+ image_size=224,
108
+ num_hidden_layers=27,
109
+ num_attention_heads=16,
110
+ vocab_size=257152,
111
+ vision_use_head=False,
112
+ )
113
+
114
+ self.text_config = text_config
115
+ if isinstance(self.text_config, dict):
116
+ text_config["model_type"] = text_config["model_type"] if "model_type" in text_config else "gemma"
117
+ self.text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config)
118
+ elif text_config is None:
119
+ self.text_config = CONFIG_MAPPING["gemma"](
120
+ hidden_size=2048,
121
+ num_hidden_layers=18,
122
+ intermediate_size=16384,
123
+ num_attention_heads=8,
124
+ num_key_value_heads=1,
125
+ is_encoder_decoder=False,
126
+ vocab_size=vocab_size,
127
+ )
128
+ self.text_config.num_image_tokens = (self.vision_config.image_size // self.vision_config.patch_size) ** 2
129
+ self.vision_config.projection_dim = projection_dim
130
+ super().__init__(**kwargs)
131
+
132
+ @property
133
+ def ignore_index(self):
134
+ warnings.warn(
135
+ "The `ignore_index` attribute is deprecated and will be removed in v4.47.",
136
+ FutureWarning,
137
+ )
138
+ return self._ignore_index
139
+
140
+ @ignore_index.setter
141
+ def ignore_index(self, value):
142
+ self._ignore_index = value
143
+
144
+ def to_dict(self):
145
+ output = super().to_dict()
146
+ output.pop("_ignore_index", None)
147
+ return output
148
+
149
+
150
+ __all__ = ["PaliGemmaConfig"]
vlmpy310/lib/python3.10/site-packages/transformers/models/paligemma/modeling_paligemma.py ADDED
@@ -0,0 +1,618 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """PyTorch PaliGemmamodel."""
16
+
17
+ from dataclasses import dataclass
18
+ from typing import List, Optional, Tuple, Union
19
+
20
+ import torch
21
+ import torch.utils.checkpoint
22
+ from torch import nn
23
+
24
+ from ...cache_utils import Cache, HybridCache, StaticCache
25
+ from ...generation import GenerationMixin
26
+ from ...modeling_utils import PreTrainedModel
27
+ from ...utils import (
28
+ ModelOutput,
29
+ add_start_docstrings,
30
+ add_start_docstrings_to_model_forward,
31
+ is_flash_attn_2_available,
32
+ logging,
33
+ replace_return_docstrings,
34
+ )
35
+ from .configuration_paligemma import PaliGemmaConfig
36
+
37
+
38
+ if is_flash_attn_2_available():
39
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
40
+
41
+ from ..auto import AutoModel, AutoModelForCausalLM
42
+
43
+
44
+ logger = logging.get_logger(__name__)
45
+
46
+ _CONFIG_FOR_DOC = "PaliGemmaConfig"
47
+
48
+
49
+ # Adapted from transformers.models.llama.modeling_llama.LlamaModel._prepare_4d_causal_attention_mask_with_cache_position
50
+ # But Paligemma has no causal mask on prefix
51
+ def _prepare_4d_causal_attention_mask_with_cache_position(
52
+ attention_mask: torch.Tensor,
53
+ sequence_length: int,
54
+ target_length: int,
55
+ dtype: torch.dtype,
56
+ device: torch.device,
57
+ min_dtype: float,
58
+ cache_position: torch.Tensor,
59
+ batch_size: int,
60
+ is_training: bool = False,
61
+ token_type_ids: torch.Tensor = None,
62
+ **kwargs,
63
+ ):
64
+ """
65
+ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
66
+ `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
67
+
68
+ Args:
69
+ attention_mask (`torch.Tensor`):
70
+ A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`.
71
+ sequence_length (`int`):
72
+ The sequence length being processed.
73
+ target_length (`int`):
74
+ The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet.
75
+ dtype (`torch.dtype`):
76
+ The dtype to use for the 4D attention mask.
77
+ device (`torch.device`):
78
+ The device to plcae the 4D attention mask on.
79
+ min_dtype (`float`):
80
+ The minimum value representable with the dtype `dtype`.
81
+ cache_position (`torch.Tensor`):
82
+ Indices depicting the position of the input sequence tokens in the sequence.
83
+ batch_size (`torch.Tensor`):
84
+ Batch size.
85
+ is_training (`bool`):
86
+ Whether the model is in training mode or in inference. The condition is checked by presence/absence of `token_type_ids/labels`
87
+ """
88
+ if attention_mask is not None and attention_mask.dim() == 4:
89
+ # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
90
+ causal_mask = attention_mask
91
+ else:
92
+ causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device)
93
+ # Causal diagonal mask only if training, otherwise attend to the whole prefix. Training-specific attn for prefix is handled below
94
+ if sequence_length != 1:
95
+ if is_training:
96
+ causal_mask = torch.triu(causal_mask, diagonal=1)
97
+ else:
98
+ causal_mask[:, :sequence_length] = 0.0
99
+
100
+ causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1)
101
+ causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
102
+ if attention_mask is not None:
103
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
104
+ mask_length = attention_mask.shape[-1]
105
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device)
106
+ padding_mask = padding_mask == 0
107
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
108
+ padding_mask, min_dtype
109
+ )
110
+ # we are training thus we need to create a full mask on the image + prefix but causal on suffix
111
+ if is_training:
112
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
113
+ token_type_ids[:, None, None, :].to(causal_mask.device) == 0, 0
114
+ )
115
+ return causal_mask
116
+
117
+
118
+ @dataclass
119
+ class PaliGemmaCausalLMOutputWithPast(ModelOutput):
120
+ """
121
+ Base class for PaliGemmacausal language model (or autoregressive) outputs.
122
+
123
+ Args:
124
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
125
+ Language modeling loss (for next-token prediction).
126
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.text_config.vocab_size)`):
127
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
128
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
129
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
130
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`)
131
+
132
+ Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
133
+ `past_key_values` input) to speed up sequential decoding.
134
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
135
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
136
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
137
+
138
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
139
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
140
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
141
+ sequence_length)`.
142
+
143
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
144
+ heads.
145
+ image_hidden_states (`torch.FloatTensor`, *optional*):
146
+ A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
147
+ image_hidden_states of the model produced by the vision encoder after projecting last hidden state.
148
+ """
149
+
150
+ loss: Optional[torch.FloatTensor] = None
151
+ logits: torch.FloatTensor = None
152
+ past_key_values: Optional[Union[List[torch.FloatTensor], Cache]] = None
153
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
154
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
155
+ image_hidden_states: Optional[torch.FloatTensor] = None
156
+
157
+
158
+ class PaliGemmaMultiModalProjector(nn.Module):
159
+ def __init__(self, config: PaliGemmaConfig):
160
+ super().__init__()
161
+ self.linear = nn.Linear(config.vision_config.hidden_size, config.vision_config.projection_dim, bias=True)
162
+
163
+ def forward(self, image_features):
164
+ hidden_states = self.linear(image_features)
165
+
166
+ return hidden_states
167
+
168
+
169
+ PALIGEMMA_START_DOCSTRING = r"""
170
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
171
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
172
+ etc.)
173
+
174
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
175
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
176
+ and behavior.
177
+
178
+ Parameters:
179
+ config ([`PaliGemmaConfig`] or [`PaliGemmaVisionConfig`]):
180
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
181
+ load the weights associated with the model, only the configuration. Check out the
182
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
183
+ """
184
+
185
+
186
+ @add_start_docstrings(
187
+ "The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
188
+ PALIGEMMA_START_DOCSTRING,
189
+ )
190
+ class PaliGemmaPreTrainedModel(PreTrainedModel):
191
+ config_class = PaliGemmaConfig
192
+ base_model_prefix = "model"
193
+ supports_gradient_checkpointing = True
194
+ _no_split_modules = ["PaliGemmaMultiModalProjector"]
195
+ _skip_keys_device_placement = "past_key_values"
196
+ _supports_cache_class = True
197
+ _supports_quantized_cache = True
198
+ _supports_static_cache = True
199
+ _supports_flash_attn_2 = True
200
+ _supports_sdpa = True
201
+
202
+ def _init_weights(self, module):
203
+ # important: this ported version of PaliGemmaisn't meant for training from scratch - only
204
+ # inference and fine-tuning
205
+ std = (
206
+ self.config.initializer_range
207
+ if hasattr(self.config, "initializer_range")
208
+ else self.config.text_config.initializer_range
209
+ )
210
+
211
+ if hasattr(module, "class_embedding"):
212
+ module.class_embedding.data.normal_(mean=0.0, std=std)
213
+
214
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
215
+ module.weight.data.normal_(mean=0.0, std=std)
216
+ if module.bias is not None:
217
+ module.bias.data.zero_()
218
+ elif isinstance(module, nn.Embedding):
219
+ module.weight.data.normal_(mean=0.0, std=std)
220
+ if module.padding_idx is not None:
221
+ module.weight.data[module.padding_idx].zero_()
222
+
223
+
224
+ PALIGEMMA_INPUTS_DOCSTRING = r"""
225
+ Args:
226
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
227
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
228
+ it.
229
+
230
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
231
+ [`PreTrainedTokenizer.__call__`] for details.
232
+
233
+ [What are input IDs?](../glossary#input-ids)
234
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)):
235
+ The tensors corresponding to the input images. Pixel values can be obtained using
236
+ [`AutoImageProcessor`]. See [`SiglipImageProcessor.__call__`] for details ([]`PaliGemmaProcessor`] uses
237
+ [`SiglipImageProcessor`] for processing images).
238
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
239
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
240
+
241
+ - 1 for tokens that are **not masked**,
242
+ - 0 for tokens that are **masked**.
243
+
244
+ [What are attention masks?](../glossary#attention-mask)
245
+
246
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
247
+ [`PreTrainedTokenizer.__call__`] for details.
248
+
249
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
250
+ `past_key_values`).
251
+
252
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
253
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
254
+ information on the default strategy.
255
+
256
+ - 1 indicates the head is **not masked**,
257
+ - 0 indicates the head is **masked**.
258
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
259
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
260
+ config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
261
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
262
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
263
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
264
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
265
+
266
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
267
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
268
+
269
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
270
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
271
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
272
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
273
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
274
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
275
+ model's internal embedding lookup matrix.
276
+ use_cache (`bool`, *optional*):
277
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
278
+ `past_key_values`).
279
+ output_attentions (`bool`, *optional*):
280
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
281
+ tensors for more detail.
282
+ output_hidden_states (`bool`, *optional*):
283
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
284
+ more detail.
285
+ return_dict (`bool`, *optional*):
286
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
287
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
288
+ Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
289
+ this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
290
+ the complete sequence length.
291
+ """
292
+
293
+
294
+ @add_start_docstrings(
295
+ """The PALIGEMMA model which consists of a vision backbone and a language model.""",
296
+ PALIGEMMA_START_DOCSTRING,
297
+ )
298
+ class PaliGemmaForConditionalGeneration(PaliGemmaPreTrainedModel, GenerationMixin):
299
+ def __init__(self, config: PaliGemmaConfig):
300
+ super().__init__(config)
301
+ self.vision_tower = AutoModel.from_config(config=config.vision_config)
302
+ self.multi_modal_projector = PaliGemmaMultiModalProjector(config)
303
+ self.vocab_size = config.text_config.vocab_size
304
+
305
+ language_model = AutoModelForCausalLM.from_config(config=config.text_config)
306
+
307
+ if language_model._tied_weights_keys is not None:
308
+ self._tied_weights_keys = [f"language_model.{k}" for k in language_model._tied_weights_keys]
309
+ self.language_model = language_model
310
+
311
+ self.pad_token_id = self.config.pad_token_id if self.config.pad_token_id is not None else -1
312
+ self.post_init()
313
+
314
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.get_input_embeddings with Llava->PaliGemma
315
+ def get_input_embeddings(self):
316
+ return self.language_model.get_input_embeddings()
317
+
318
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.set_input_embeddings with Llava->PaliGemma
319
+ def set_input_embeddings(self, value):
320
+ self.language_model.set_input_embeddings(value)
321
+
322
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.get_output_embeddings with Llava->PaliGemma
323
+ def get_output_embeddings(self):
324
+ return self.language_model.get_output_embeddings()
325
+
326
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.set_output_embeddings with Llava->PaliGemma
327
+ def set_output_embeddings(self, new_embeddings):
328
+ self.language_model.set_output_embeddings(new_embeddings)
329
+
330
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.set_decoder with Llava->PaliGemma
331
+ def set_decoder(self, decoder):
332
+ self.language_model.set_decoder(decoder)
333
+
334
+ # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration.get_decoder with Llava->PaliGemma
335
+ def get_decoder(self):
336
+ return self.language_model.get_decoder()
337
+
338
+ def _update_causal_mask(
339
+ self,
340
+ attention_mask,
341
+ token_type_ids,
342
+ past_key_values,
343
+ cache_position,
344
+ input_ids=None,
345
+ inputs_embeds=None,
346
+ is_training: bool = False,
347
+ ):
348
+ if self.config.text_config._attn_implementation == "flash_attention_2":
349
+ if attention_mask is not None and 0.0 in attention_mask:
350
+ return attention_mask
351
+ return None
352
+
353
+ using_static_cache = isinstance(past_key_values, StaticCache)
354
+ min_dtype = torch.finfo(self.dtype).min
355
+ inputs_lead_dim = input_ids.shape[0] if input_ids is not None else inputs_embeds.shape[0]
356
+ sequence_length = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
357
+ if using_static_cache:
358
+ target_length = past_key_values.get_max_cache_shape()
359
+ elif isinstance(past_key_values, HybridCache):
360
+ target_length = past_key_values.get_max_cache_shape()
361
+ else:
362
+ target_length = (
363
+ attention_mask.shape[-1]
364
+ if isinstance(attention_mask, torch.Tensor)
365
+ else cache_position[0] + sequence_length + 1
366
+ )
367
+
368
+ if attention_mask is not None and attention_mask.dim() == 4:
369
+ # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
370
+ return attention_mask
371
+
372
+ causal_mask = torch.full(
373
+ (sequence_length, target_length), fill_value=min_dtype, dtype=self.dtype, device=cache_position.device
374
+ )
375
+ # Causal diagonal mask only if training, otherwise attend to the whole prefix. Training-specific attn for prefix is handled below
376
+ if sequence_length != 1:
377
+ if is_training:
378
+ causal_mask = torch.triu(causal_mask, diagonal=1)
379
+ else:
380
+ causal_mask[:, :sequence_length] = 0.0
381
+
382
+ causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1)
383
+ causal_mask = causal_mask[None, None, :, :].expand(inputs_lead_dim, 1, -1, -1)
384
+ if attention_mask is not None:
385
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
386
+ mask_length = attention_mask.shape[-1]
387
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device)
388
+ padding_mask = padding_mask == 0
389
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
390
+ padding_mask, min_dtype
391
+ )
392
+ # we are training thus we need to create a full mask on the image + prefix but causal on suffix
393
+ if is_training:
394
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
395
+ token_type_ids[:, None, None, :].to(causal_mask.device) == 0, 0
396
+ )
397
+ return causal_mask
398
+
399
+ def get_image_features(self, pixel_values: torch.FloatTensor):
400
+ """
401
+ Obtains image last hidden states from the vision tower and apply multimodal projection.
402
+
403
+ Args:
404
+ pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`)
405
+ The tensors corresponding to the input images.
406
+ Returns:
407
+ image_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`).
408
+ """
409
+ image_outputs = self.vision_tower(pixel_values)
410
+ selected_image_feature = image_outputs.last_hidden_state
411
+ image_features = self.multi_modal_projector(selected_image_feature)
412
+ image_features = image_features / (self.config.text_config.hidden_size**0.5)
413
+ return image_features
414
+
415
+ @add_start_docstrings_to_model_forward(PALIGEMMA_INPUTS_DOCSTRING)
416
+ @replace_return_docstrings(output_type=PaliGemmaCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
417
+ def forward(
418
+ self,
419
+ input_ids: torch.LongTensor = None,
420
+ pixel_values: torch.FloatTensor = None,
421
+ attention_mask: Optional[torch.Tensor] = None,
422
+ position_ids: Optional[torch.LongTensor] = None,
423
+ past_key_values: Optional[Union[List[torch.FloatTensor], Cache]] = None,
424
+ token_type_ids: Optional[torch.LongTensor] = None,
425
+ cache_position: Optional[torch.LongTensor] = None,
426
+ inputs_embeds: Optional[torch.FloatTensor] = None,
427
+ labels: Optional[torch.LongTensor] = None,
428
+ use_cache: Optional[bool] = None,
429
+ output_attentions: Optional[bool] = None,
430
+ output_hidden_states: Optional[bool] = None,
431
+ return_dict: Optional[bool] = None,
432
+ num_logits_to_keep: int = 0,
433
+ ) -> Union[Tuple, PaliGemmaCausalLMOutputWithPast]:
434
+ r"""
435
+ Args:
436
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
437
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
438
+ config.text_config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
439
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.text_config.vocab_size]`.
440
+
441
+ num_logits_to_keep (`int`, *optional*):
442
+ Calculate logits for the last `num_logits_to_keep` tokens. If `0`, calculate logits for all
443
+ `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
444
+ token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
445
+
446
+ Returns:
447
+
448
+ Example:
449
+
450
+ ```python
451
+ >>> from PIL import Image
452
+ >>> import requests
453
+ >>> from transformers import AutoProcessor, PaliGemmaForConditionalGeneration
454
+
455
+ >>> model = PaliGemmaForConditionalGeneration.from_pretrained("google/PaliGemma-test-224px-hf")
456
+ >>> processor = AutoProcessor.from_pretrained("google/PaliGemma-test-224px-hf")
457
+
458
+ >>> prompt = "answer en Where is the cow standing?"
459
+ >>> url = "https://huggingface.co/gv-hf/PaliGemma-test-224px-hf/resolve/main/cow_beach_1.png"
460
+ >>> image = Image.open(requests.get(url, stream=True).raw)
461
+
462
+ >>> inputs = processor(images=image, text=prompt, return_tensors="pt")
463
+
464
+ >>> # Generate
465
+ >>> generate_ids = model.generate(**inputs, max_length=30)
466
+ >>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
467
+ "answer en Where is the cow standing?\nbeach"
468
+ ```"""
469
+
470
+ if (input_ids is None) ^ (inputs_embeds is not None):
471
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
472
+
473
+ if pixel_values is not None and inputs_embeds is not None:
474
+ raise ValueError(
475
+ "You cannot specify both pixel_values and inputs_embeds at the same time, and must specify either one"
476
+ )
477
+
478
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
479
+ output_hidden_states = (
480
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
481
+ )
482
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
483
+
484
+ is_training = token_type_ids is not None and labels is not None
485
+
486
+ if inputs_embeds is None:
487
+ inputs_embeds = self.get_input_embeddings()(input_ids)
488
+
489
+ if cache_position is None:
490
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
491
+ cache_position = torch.arange(
492
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
493
+ )
494
+
495
+ if position_ids is None:
496
+ position_ids = cache_position.unsqueeze(0) + 1 # Paligemma positions are 1-indexed
497
+
498
+ # Merge text and images
499
+ if pixel_values is not None:
500
+ image_features = self.get_image_features(pixel_values)
501
+
502
+ special_image_mask = (input_ids == self.config.image_token_index).unsqueeze(-1)
503
+ special_image_mask = special_image_mask.expand_as(inputs_embeds).to(inputs_embeds.device)
504
+ if inputs_embeds[special_image_mask].numel() != image_features.numel():
505
+ image_tokens_in_text = torch.sum(input_ids == self.config.image_token_index)
506
+ raise ValueError(
507
+ f"Number of images does not match number of special image tokens in the input text. "
508
+ f"Got {image_tokens_in_text} image tokens in the text but {image_features.shape[0] * image_features.shape[1]} "
509
+ "tokens from image embeddings."
510
+ )
511
+ image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype)
512
+ inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features)
513
+
514
+ # mask out pad-token-ids in labels for BC
515
+ if labels is not None and self.pad_token_id in labels:
516
+ logger.warning_once(
517
+ "`labels` contains `pad_token_id` which will be masked with `config.ignore_index`. "
518
+ "You have to mask out `pad_token_id` when preparing `labels`, this behavior will be removed in v.4.46.",
519
+ )
520
+ labels = torch.where(input_ids == self.pad_token_id, self.config.ignore_index, labels)
521
+
522
+ causal_mask = self._update_causal_mask(
523
+ attention_mask, token_type_ids, past_key_values, cache_position, input_ids, inputs_embeds, is_training
524
+ )
525
+ outputs = self.language_model(
526
+ attention_mask=causal_mask,
527
+ position_ids=position_ids,
528
+ past_key_values=past_key_values,
529
+ inputs_embeds=inputs_embeds,
530
+ use_cache=use_cache,
531
+ output_attentions=output_attentions,
532
+ output_hidden_states=output_hidden_states,
533
+ return_dict=return_dict,
534
+ cache_position=cache_position,
535
+ num_logits_to_keep=num_logits_to_keep,
536
+ )
537
+
538
+ logits = outputs.logits
539
+ loss = None
540
+ if labels is not None:
541
+ # Upcast to float if we need to compute the loss to avoid potential precision issues
542
+ logits = logits.float()
543
+ shift_logits = logits[..., :-1, :]
544
+ shift_labels = labels[..., 1:]
545
+ if attention_mask is not None:
546
+ # we use the input attention mask to shift the logits and labels, because it is 2D.
547
+ # we also crop attn mask in case it is longer, which happens in PrefixTuning with peft
548
+ shift_attention_mask = attention_mask[:, -shift_logits.shape[1] :].to(logits.device)
549
+ shift_logits = shift_logits[shift_attention_mask.to(logits.device) != 0].contiguous()
550
+ shift_labels = shift_labels[shift_attention_mask.to(shift_labels.device) != 0].contiguous()
551
+ else:
552
+ shift_logits = shift_logits.contiguous()
553
+ shift_labels = shift_labels.contiguous()
554
+ # Flatten the tokens
555
+ loss_fct = nn.CrossEntropyLoss()
556
+
557
+ flat_logits = shift_logits.view(-1, self.config.text_config.vocab_size)
558
+ flat_labels = shift_labels.view(-1).to(shift_logits.device)
559
+ loss = loss_fct(flat_logits, flat_labels)
560
+ if not return_dict:
561
+ output = (logits,) + outputs[1:]
562
+ return (loss,) + output if loss is not None else output
563
+
564
+ return PaliGemmaCausalLMOutputWithPast(
565
+ loss=loss,
566
+ logits=logits,
567
+ past_key_values=outputs.past_key_values,
568
+ hidden_states=outputs.hidden_states,
569
+ attentions=outputs.attentions,
570
+ image_hidden_states=image_features if pixel_values is not None else None,
571
+ )
572
+
573
+ def prepare_inputs_for_generation(
574
+ self,
575
+ input_ids,
576
+ past_key_values=None,
577
+ inputs_embeds=None,
578
+ cache_position=None,
579
+ position_ids=None,
580
+ pixel_values=None,
581
+ attention_mask=None,
582
+ token_type_ids=None,
583
+ use_cache=True,
584
+ num_logits_to_keep=None,
585
+ labels=None,
586
+ **kwargs,
587
+ ):
588
+ # Overwritten -- custom `position_ids` and `pixel_values` handling
589
+ model_inputs = self.language_model.prepare_inputs_for_generation(
590
+ input_ids,
591
+ past_key_values=past_key_values,
592
+ inputs_embeds=inputs_embeds,
593
+ attention_mask=attention_mask,
594
+ position_ids=position_ids,
595
+ cache_position=cache_position,
596
+ use_cache=use_cache,
597
+ num_logits_to_keep=num_logits_to_keep,
598
+ token_type_ids=token_type_ids,
599
+ **kwargs,
600
+ )
601
+
602
+ # position_ids in Paligemma are 1-indexed
603
+ if model_inputs.get("position_ids") is not None:
604
+ model_inputs["position_ids"] += 1
605
+ # If we're in cached decoding stage, pixel values should be None because input ids do not contain special image token anymore
606
+ # Otherwise we need pixel values to be passed to model. NOTE: use_cache=False needs pixel_values always
607
+ if cache_position[0] == 0:
608
+ model_inputs["pixel_values"] = pixel_values
609
+ is_training = token_type_ids is not None and labels is not None
610
+ if cache_position[0] == 0 and isinstance(past_key_values, HybridCache):
611
+ causal_mask = self._update_causal_mask(
612
+ attention_mask, token_type_ids, past_key_values, cache_position, input_ids, inputs_embeds, is_training
613
+ )
614
+ model_inputs["attention_mask"] = causal_mask
615
+ return model_inputs
616
+
617
+
618
+ __all__ = ["PaliGemmaForConditionalGeneration", "PaliGemmaPreTrainedModel"]
vlmpy310/lib/python3.10/site-packages/transformers/models/starcoder2/__init__.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import _LazyModule
17
+ from ...utils.import_utils import define_import_structure
18
+
19
+
20
+ if TYPE_CHECKING:
21
+ from .configuration_starcoder2 import *
22
+ from .modeling_starcoder2 import *
23
+ else:
24
+ import sys
25
+
26
+ _file = globals()["__file__"]
27
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
vlmpy310/lib/python3.10/site-packages/transformers/models/starcoder2/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (552 Bytes). View file
 
vlmpy310/lib/python3.10/site-packages/transformers/models/starcoder2/__pycache__/configuration_starcoder2.cpython-310.pyc ADDED
Binary file (9.4 kB). View file
 
vlmpy310/lib/python3.10/site-packages/transformers/models/starcoder2/__pycache__/modeling_starcoder2.cpython-310.pyc ADDED
Binary file (34.2 kB). View file
 
vlmpy310/lib/python3.10/site-packages/transformers/models/starcoder2/__pycache__/modular_starcoder2.cpython-310.pyc ADDED
Binary file (7.94 kB). View file
 
vlmpy310/lib/python3.10/site-packages/transformers/models/starcoder2/configuration_starcoder2.py ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 BigCode and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Starcoder2 model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...modeling_rope_utils import rope_config_validation
19
+ from ...utils import logging
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+
25
+ class Starcoder2Config(PretrainedConfig):
26
+ r"""
27
+ This is the configuration class to store the configuration of a [`Starcoder2Model`]. It is used to instantiate a
28
+ Starcoder2 model according to the specified arguments, defining the model architecture. Instantiating a configuration
29
+ with the defaults will yield a similar configuration to that of the [bigcode/starcoder2-7b](https://huggingface.co/bigcode/starcoder2-7b) model.
30
+
31
+
32
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
33
+ documentation from [`PretrainedConfig`] for more information.
34
+
35
+
36
+ Args:
37
+ vocab_size (`int`, *optional*, defaults to 49152):
38
+ Vocabulary size of the Starcoder2 model. Defines the number of different tokens that can be represented by the
39
+ `inputs_ids` passed when calling [`Starcoder2Model`]
40
+ hidden_size (`int`, *optional*, defaults to 3072):
41
+ Dimension of the hidden representations.
42
+ intermediate_size (`int`, *optional*, defaults to 12288):
43
+ Dimension of the MLP representations.
44
+ num_hidden_layers (`int`, *optional*, defaults to 30):
45
+ Number of hidden layers in the Transformer encoder.
46
+ num_attention_heads (`int`, *optional*, defaults to 24):
47
+ Number of attention heads for each attention layer in the Transformer encoder.
48
+ num_key_value_heads (`int`, *optional*, defaults to 2):
49
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
50
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
51
+ `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
52
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
53
+ by meanpooling all the original heads within that group. For more details checkout [this
54
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `8`.
55
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
56
+ The non-linear activation function (function or string) in the decoder.
57
+ max_position_embeddings (`int`, *optional*, defaults to 4096):
58
+ The maximum sequence length that this model might ever be used with. Starcoder2's sliding window attention
59
+ allows sequence of up to 4096*32 tokens.
60
+ initializer_range (`float`, *optional*, defaults to 0.02):
61
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
62
+ norm_epsilon (`float`, *optional*, defaults to 1e-05):
63
+ Epsilon value for the layer norm
64
+ use_cache (`bool`, *optional*, defaults to `True`):
65
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
66
+ relevant if `config.is_decoder=True`.
67
+ bos_token_id (`int`, *optional*, defaults to 50256):
68
+ The id of the "beginning-of-sequence" token.
69
+ eos_token_id (`int`, *optional*, defaults to 50256):
70
+ The id of the "end-of-sequence" token.
71
+ rope_theta (`float`, *optional*, defaults to 10000.0):
72
+ The base period of the RoPE embeddings.
73
+ rope_scaling (`Dict`, *optional*):
74
+ Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
75
+ and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
76
+ accordingly.
77
+ Expected contents:
78
+ `rope_type` (`str`):
79
+ The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
80
+ 'llama3'], with 'default' being the original RoPE implementation.
81
+ `factor` (`float`, *optional*):
82
+ Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
83
+ most scaling types, a `factor` of x will enable the model to handle sequences of length x *
84
+ original maximum pre-trained length.
85
+ `original_max_position_embeddings` (`int`, *optional*):
86
+ Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
87
+ pretraining.
88
+ `attention_factor` (`float`, *optional*):
89
+ Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
90
+ computation. If unspecified, it defaults to value recommended by the implementation, using the
91
+ `factor` field to infer the suggested value.
92
+ `beta_fast` (`float`, *optional*):
93
+ Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
94
+ ramp function. If unspecified, it defaults to 32.
95
+ `beta_slow` (`float`, *optional*):
96
+ Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
97
+ ramp function. If unspecified, it defaults to 1.
98
+ `short_factor` (`List[float]`, *optional*):
99
+ Only used with 'longrope'. The scaling factor to be applied to short contexts (<
100
+ `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
101
+ size divided by the number of attention heads divided by 2
102
+ `long_factor` (`List[float]`, *optional*):
103
+ Only used with 'longrope'. The scaling factor to be applied to long contexts (<
104
+ `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
105
+ size divided by the number of attention heads divided by 2
106
+ `low_freq_factor` (`float`, *optional*):
107
+ Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
108
+ `high_freq_factor` (`float`, *optional*):
109
+ Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
110
+ sliding_window (`int`, *optional*):
111
+ Sliding window attention window size. If not specified, will default to `None` (no sliding window).
112
+ attention_dropout (`float`, *optional*, defaults to 0.0):
113
+ The dropout ratio for the attention probabilities.
114
+ residual_dropout (`float`, *optional*, defaults to 0.0):
115
+ Residual connection dropout value.
116
+ embedding_dropout (`float`, *optional*, defaults to 0.0):
117
+ Embedding dropout.
118
+ use_bias (`bool`, *optional*, defaults to `True`):
119
+ Whether to use bias term on linear layers of the model.
120
+
121
+
122
+ ```python
123
+ >>> from transformers import Starcoder2Model, Starcoder2Config
124
+
125
+ >>> # Initializing a Starcoder2 7B style configuration
126
+ >>> configuration = Starcoder2Config()
127
+
128
+ >>> # Initializing a model from the Starcoder2 7B style configuration
129
+ >>> model = Starcoder2Model(configuration)
130
+
131
+ >>> # Accessing the model configuration
132
+ >>> configuration = model.config
133
+ ```"""
134
+
135
+ model_type = "starcoder2"
136
+ keys_to_ignore_at_inference = ["past_key_values"]
137
+ # Default tensor parallel plan for base model `Starcoder2`
138
+ base_model_tp_plan = {
139
+ "layers.*.self_attn.q_proj": "colwise",
140
+ "layers.*.self_attn.k_proj": "colwise",
141
+ "layers.*.self_attn.v_proj": "colwise",
142
+ "layers.*.self_attn.o_proj": "rowwise",
143
+ "layers.*.mlp.c_fc": "colwise",
144
+ "layers.*.mlp.c_proj": "colwise",
145
+ }
146
+
147
+ def __init__(
148
+ self,
149
+ vocab_size=49152,
150
+ hidden_size=3072,
151
+ intermediate_size=12288,
152
+ num_hidden_layers=30,
153
+ num_attention_heads=24,
154
+ num_key_value_heads=2,
155
+ hidden_act="gelu_pytorch_tanh",
156
+ max_position_embeddings=4096,
157
+ initializer_range=0.018042,
158
+ norm_epsilon=1e-5,
159
+ use_cache=True,
160
+ bos_token_id=50256,
161
+ eos_token_id=50256,
162
+ rope_theta=10000.0,
163
+ rope_scaling=None,
164
+ sliding_window=None,
165
+ attention_dropout=0.0,
166
+ residual_dropout=0.0,
167
+ embedding_dropout=0.0,
168
+ use_bias=True,
169
+ **kwargs,
170
+ ):
171
+ self.vocab_size = vocab_size
172
+ self.max_position_embeddings = max_position_embeddings
173
+ self.hidden_size = hidden_size
174
+ self.intermediate_size = intermediate_size
175
+ self.num_hidden_layers = num_hidden_layers
176
+ self.num_attention_heads = num_attention_heads
177
+ self.sliding_window = sliding_window
178
+ self.use_bias = use_bias
179
+ self.num_key_value_heads = num_key_value_heads
180
+ self.hidden_act = hidden_act
181
+ self.initializer_range = initializer_range
182
+ self.norm_epsilon = norm_epsilon
183
+ self.use_cache = use_cache
184
+ self.rope_theta = rope_theta
185
+ self.rope_scaling = rope_scaling
186
+ self.attention_dropout = attention_dropout
187
+ self.residual_dropout = residual_dropout
188
+ self.embedding_dropout = embedding_dropout
189
+ # Validate the correctness of rotary position embeddings parameters
190
+ # BC: if there is a 'type' field, move it to 'rope_type'.
191
+ if self.rope_scaling is not None and "type" in self.rope_scaling:
192
+ self.rope_scaling["rope_type"] = self.rope_scaling["type"]
193
+ rope_config_validation(self)
194
+
195
+ super().__init__(
196
+ bos_token_id=bos_token_id,
197
+ eos_token_id=eos_token_id,
198
+ **kwargs,
199
+ )
200
+
201
+
202
+ __all__ = ["Starcoder2Config"]
vlmpy310/lib/python3.10/site-packages/transformers/models/starcoder2/modeling_starcoder2.py ADDED
@@ -0,0 +1,1063 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
2
+ # This file was automatically generated from src/transformers/models/starcoder2/modular_starcoder2.py.
3
+ # Do NOT edit this file manually as any edits will be overwritten by the generation of
4
+ # the file from the modular. If any change should be done, please apply the change to the
5
+ # modular_starcoder2.py file directly. One of our CI enforces this.
6
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
7
+ # coding=utf-8
8
+ # Copyright 2024 BigCode and the HuggingFace Inc. team. All rights reserved.
9
+ #
10
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
11
+ # and OPT implementations in this library. It has been modified from its
12
+ # original forms to accommodate minor architectural differences compared
13
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
14
+ #
15
+ # Licensed under the Apache License, Version 2.0 (the "License");
16
+ # you may not use this file except in compliance with the License.
17
+ # You may obtain a copy of the License at
18
+ #
19
+ # http://www.apache.org/licenses/LICENSE-2.0
20
+ #
21
+ # Unless required by applicable law or agreed to in writing, software
22
+ # distributed under the License is distributed on an "AS IS" BASIS,
23
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
24
+ # See the License for the specific language governing permissions and
25
+ # limitations under the License.
26
+
27
+ from typing import Callable, List, Optional, Tuple, Union
28
+
29
+ import torch
30
+ from torch import nn
31
+
32
+ from ...activations import ACT2FN
33
+ from ...cache_utils import Cache, DynamicCache, SlidingWindowCache, StaticCache
34
+ from ...generation import GenerationMixin
35
+ from ...modeling_attn_mask_utils import AttentionMaskConverter
36
+ from ...modeling_flash_attention_utils import FlashAttentionKwargs
37
+ from ...modeling_outputs import (
38
+ BaseModelOutputWithPast,
39
+ CausalLMOutputWithPast,
40
+ SequenceClassifierOutputWithPast,
41
+ TokenClassifierOutput,
42
+ )
43
+ from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS
44
+ from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
45
+ from ...processing_utils import Unpack
46
+ from ...utils import (
47
+ LossKwargs,
48
+ add_code_sample_docstrings,
49
+ add_start_docstrings,
50
+ add_start_docstrings_to_model_forward,
51
+ logging,
52
+ replace_return_docstrings,
53
+ )
54
+ from .configuration_starcoder2 import Starcoder2Config
55
+
56
+
57
+ logger = logging.get_logger(__name__)
58
+ _CHECKPOINT_FOR_DOC = "bigcode/starcoder2-7b"
59
+ _CONFIG_FOR_DOC = "Starcoder2Config"
60
+
61
+
62
+ class Starcoder2MLP(nn.Module):
63
+ def __init__(self, config: Starcoder2Config):
64
+ super().__init__()
65
+ embed_dim = config.hidden_size
66
+ self.c_fc = nn.Linear(embed_dim, config.intermediate_size, bias=config.use_bias)
67
+ self.c_proj = nn.Linear(config.intermediate_size, embed_dim, bias=config.use_bias)
68
+ self.act = ACT2FN[config.hidden_act]
69
+ self.residual_dropout = config.residual_dropout
70
+
71
+ def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor:
72
+ hidden_states = self.c_fc(hidden_states)
73
+ hidden_states = self.act(hidden_states)
74
+ hidden_states = self.c_proj(hidden_states)
75
+ hidden_states = nn.functional.dropout(hidden_states, p=self.residual_dropout, training=self.training)
76
+ return hidden_states
77
+
78
+
79
+ def rotate_half(x):
80
+ """Rotates half the hidden dims of the input."""
81
+ x1 = x[..., : x.shape[-1] // 2]
82
+ x2 = x[..., x.shape[-1] // 2 :]
83
+ return torch.cat((-x2, x1), dim=-1)
84
+
85
+
86
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
87
+ """Applies Rotary Position Embedding to the query and key tensors.
88
+
89
+ Args:
90
+ q (`torch.Tensor`): The query tensor.
91
+ k (`torch.Tensor`): The key tensor.
92
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
93
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
94
+ position_ids (`torch.Tensor`, *optional*):
95
+ Deprecated and unused.
96
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
97
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
98
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
99
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
100
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
101
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
102
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
103
+ Returns:
104
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
105
+ """
106
+ cos = cos.unsqueeze(unsqueeze_dim)
107
+ sin = sin.unsqueeze(unsqueeze_dim)
108
+ q_embed = (q * cos) + (rotate_half(q) * sin)
109
+ k_embed = (k * cos) + (rotate_half(k) * sin)
110
+ return q_embed, k_embed
111
+
112
+
113
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
114
+ """
115
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
116
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
117
+ """
118
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
119
+ if n_rep == 1:
120
+ return hidden_states
121
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
122
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
123
+
124
+
125
+ def eager_attention_forward(
126
+ module: nn.Module,
127
+ query: torch.Tensor,
128
+ key: torch.Tensor,
129
+ value: torch.Tensor,
130
+ attention_mask: Optional[torch.Tensor],
131
+ scaling: float,
132
+ dropout: float = 0.0,
133
+ **kwargs,
134
+ ):
135
+ key_states = repeat_kv(key, module.num_key_value_groups)
136
+ value_states = repeat_kv(value, module.num_key_value_groups)
137
+
138
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
139
+ if attention_mask is not None:
140
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
141
+ attn_weights = attn_weights + causal_mask
142
+
143
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
144
+ attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
145
+ attn_output = torch.matmul(attn_weights, value_states)
146
+ attn_output = attn_output.transpose(1, 2).contiguous()
147
+
148
+ return attn_output, attn_weights
149
+
150
+
151
+ class Starcoder2Attention(nn.Module):
152
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
153
+
154
+ def __init__(self, config: Starcoder2Config, layer_idx: Optional[int] = None):
155
+ super().__init__()
156
+ self.config = config
157
+ self.layer_idx = layer_idx
158
+ self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
159
+ self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
160
+ self.scaling = self.head_dim**-0.5
161
+ self.attention_dropout = config.attention_dropout
162
+ self.is_causal = True
163
+ self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.use_bias)
164
+ self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.use_bias)
165
+ self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.use_bias)
166
+ self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.use_bias)
167
+ self.residual_dropout = config.residual_dropout
168
+
169
+ def forward(
170
+ self,
171
+ hidden_states: torch.Tensor,
172
+ position_embeddings: Tuple[torch.Tensor, torch.Tensor],
173
+ attention_mask: Optional[torch.Tensor],
174
+ past_key_value: Optional[Cache] = None,
175
+ cache_position: Optional[torch.LongTensor] = None,
176
+ **kwargs: Unpack[FlashAttentionKwargs],
177
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
178
+ input_shape = hidden_states.shape[:-1]
179
+ hidden_shape = (*input_shape, -1, self.head_dim)
180
+
181
+ query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
182
+ key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
183
+ value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
184
+
185
+ cos, sin = position_embeddings
186
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
187
+
188
+ if past_key_value is not None:
189
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
190
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
191
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
192
+
193
+ attention_interface: Callable = eager_attention_forward
194
+ if self.config._attn_implementation != "eager":
195
+ if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False):
196
+ logger.warning_once(
197
+ "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to "
198
+ 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
199
+ )
200
+ else:
201
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
202
+
203
+ attn_output, attn_weights = attention_interface(
204
+ self,
205
+ query_states,
206
+ key_states,
207
+ value_states,
208
+ attention_mask,
209
+ dropout=0.0 if not self.training else self.attention_dropout,
210
+ scaling=self.scaling,
211
+ sliding_window=getattr(self.config, "sliding_window", None), # diff with Llama
212
+ **kwargs,
213
+ )
214
+
215
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
216
+ attn_output = self.o_proj(attn_output)
217
+ attn_output = nn.functional.dropout(
218
+ attn_output, p=self.residual_dropout, training=self.training
219
+ ) # diff with Llama
220
+
221
+ return attn_output, attn_weights
222
+
223
+
224
+ class Starcoder2DecoderLayer(nn.Module):
225
+ def __init__(self, config: Starcoder2Config, layer_idx: int):
226
+ super().__init__()
227
+ self.hidden_size = config.hidden_size
228
+ self.self_attn = Starcoder2Attention(config=config, layer_idx=layer_idx)
229
+ self.mlp = Starcoder2MLP(config)
230
+ self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.norm_epsilon)
231
+ self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.norm_epsilon)
232
+
233
+ def forward(
234
+ self,
235
+ hidden_states: torch.Tensor,
236
+ attention_mask: Optional[torch.Tensor] = None,
237
+ position_ids: Optional[torch.LongTensor] = None,
238
+ past_key_value: Optional[Cache] = None,
239
+ output_attentions: Optional[bool] = False,
240
+ use_cache: Optional[bool] = False,
241
+ cache_position: Optional[torch.LongTensor] = None,
242
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC
243
+ **kwargs: Unpack[FlashAttentionKwargs],
244
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
245
+ residual = hidden_states
246
+
247
+ hidden_states = self.input_layernorm(hidden_states)
248
+
249
+ # Self Attention
250
+ hidden_states, self_attn_weights = self.self_attn(
251
+ hidden_states=hidden_states,
252
+ attention_mask=attention_mask,
253
+ position_ids=position_ids,
254
+ past_key_value=past_key_value,
255
+ output_attentions=output_attentions,
256
+ use_cache=use_cache,
257
+ cache_position=cache_position,
258
+ position_embeddings=position_embeddings,
259
+ **kwargs,
260
+ )
261
+ hidden_states = residual + hidden_states
262
+
263
+ # Fully Connected
264
+ residual = hidden_states
265
+ hidden_states = self.post_attention_layernorm(hidden_states)
266
+ hidden_states = self.mlp(hidden_states)
267
+ hidden_states = residual + hidden_states
268
+
269
+ outputs = (hidden_states,)
270
+ if output_attentions:
271
+ outputs += (self_attn_weights,)
272
+
273
+ return outputs
274
+
275
+
276
+ class Starcoder2RotaryEmbedding(nn.Module):
277
+ def __init__(self, config: Starcoder2Config, device=None):
278
+ super().__init__()
279
+ # BC: "rope_type" was originally "type"
280
+ if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
281
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
282
+ else:
283
+ self.rope_type = "default"
284
+ self.max_seq_len_cached = config.max_position_embeddings
285
+ self.original_max_seq_len = config.max_position_embeddings
286
+
287
+ self.config = config
288
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
289
+
290
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
291
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
292
+ self.original_inv_freq = self.inv_freq
293
+
294
+ def _dynamic_frequency_update(self, position_ids, device):
295
+ """
296
+ dynamic RoPE layers should recompute `inv_freq` in the following situations:
297
+ 1 - growing beyond the cached sequence length (allow scaling)
298
+ 2 - the current sequence length is in the original scale (avoid losing precision with small sequences)
299
+ """
300
+ seq_len = torch.max(position_ids) + 1
301
+ if seq_len > self.max_seq_len_cached: # growth
302
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, seq_len=seq_len)
303
+ self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation
304
+ self.max_seq_len_cached = seq_len
305
+
306
+ if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset
307
+ # This .to() is needed if the model has been moved to a device after being initialized (because
308
+ # the buffer is automatically moved, but not the original copy)
309
+ self.original_inv_freq = self.original_inv_freq.to(device)
310
+ self.register_buffer("inv_freq", self.original_inv_freq, persistent=False)
311
+ self.max_seq_len_cached = self.original_max_seq_len
312
+
313
+ @torch.no_grad()
314
+ def forward(self, x, position_ids):
315
+ if "dynamic" in self.rope_type:
316
+ self._dynamic_frequency_update(position_ids, device=x.device)
317
+
318
+ # Core RoPE block
319
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
320
+ position_ids_expanded = position_ids[:, None, :].float()
321
+ # Force float32 (see https://github.com/huggingface/transformers/pull/29285)
322
+ device_type = x.device.type
323
+ device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
324
+ with torch.autocast(device_type=device_type, enabled=False):
325
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
326
+ emb = torch.cat((freqs, freqs), dim=-1)
327
+ cos = emb.cos()
328
+ sin = emb.sin()
329
+
330
+ # Advanced RoPE types (e.g. yarn) apply a post-processing scaling factor, equivalent to scaling attention
331
+ cos = cos * self.attention_scaling
332
+ sin = sin * self.attention_scaling
333
+
334
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
335
+
336
+
337
+ STARCODER2_START_DOCSTRING = r"""
338
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
339
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
340
+ etc.)
341
+
342
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
343
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
344
+ and behavior.
345
+
346
+ Parameters:
347
+ config ([`Starcoder2Config`]):
348
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
349
+ load the weights associated with the model, only the configuration. Check out the
350
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
351
+ """
352
+
353
+
354
+ @add_start_docstrings(
355
+ "The bare Starcoder2 Model outputting raw hidden-states without any specific head on top.",
356
+ STARCODER2_START_DOCSTRING,
357
+ )
358
+ class Starcoder2PreTrainedModel(PreTrainedModel):
359
+ config_class = Starcoder2Config
360
+ base_model_prefix = "model"
361
+ supports_gradient_checkpointing = True
362
+ _no_split_modules = ["Starcoder2DecoderLayer"]
363
+ _skip_keys_device_placement = ["past_key_values"]
364
+ _supports_flash_attn_2 = True
365
+ _supports_sdpa = True
366
+ _supports_flex_attn = True
367
+ _supports_cache_class = True
368
+ _supports_quantized_cache = True
369
+ _supports_static_cache = True
370
+
371
+ def _init_weights(self, module):
372
+ std = self.config.initializer_range
373
+ if isinstance(module, nn.Linear):
374
+ module.weight.data.normal_(mean=0.0, std=std)
375
+ if module.bias is not None:
376
+ module.bias.data.zero_()
377
+ elif isinstance(module, nn.Embedding):
378
+ module.weight.data.normal_(mean=0.0, std=std)
379
+ if module.padding_idx is not None:
380
+ module.weight.data[module.padding_idx].zero_()
381
+
382
+
383
+ STARCODER2_INPUTS_DOCSTRING = r"""
384
+ Args:
385
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
386
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
387
+ it.
388
+
389
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
390
+ [`PreTrainedTokenizer.__call__`] for details.
391
+
392
+ [What are input IDs?](../glossary#input-ids)
393
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
394
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
395
+
396
+ - 1 for tokens that are **not masked**,
397
+ - 0 for tokens that are **masked**.
398
+
399
+ [What are attention masks?](../glossary#attention-mask)
400
+
401
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
402
+ [`PreTrainedTokenizer.__call__`] for details.
403
+
404
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
405
+ `past_key_values`).
406
+
407
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
408
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
409
+ information on the default strategy.
410
+
411
+ - 1 indicates the head is **not masked**,
412
+ - 0 indicates the head is **masked**.
413
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
414
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
415
+ config.n_positions - 1]`.
416
+
417
+ [What are position IDs?](../glossary#position-ids)
418
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
419
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
420
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
421
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
422
+
423
+ Two formats are allowed:
424
+ - a [`~cache_utils.Cache`] instance, see our
425
+ [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache);
426
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
427
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
428
+ cache format.
429
+
430
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
431
+ legacy cache format will be returned.
432
+
433
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
434
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
435
+ of shape `(batch_size, sequence_length)`.
436
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
437
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
438
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
439
+ model's internal embedding lookup matrix.
440
+ use_cache (`bool`, *optional*):
441
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
442
+ `past_key_values`).
443
+ output_attentions (`bool`, *optional*):
444
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
445
+ tensors for more detail.
446
+ output_hidden_states (`bool`, *optional*):
447
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
448
+ more detail.
449
+ return_dict (`bool`, *optional*):
450
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
451
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
452
+ Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
453
+ this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
454
+ the complete sequence length.
455
+ """
456
+
457
+
458
+ @add_start_docstrings(
459
+ "The bare Starcoder2 Model outputting raw hidden-states without any specific head on top.",
460
+ STARCODER2_START_DOCSTRING,
461
+ )
462
+ class Starcoder2Model(Starcoder2PreTrainedModel):
463
+ """
464
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Starcoder2DecoderLayer`]
465
+
466
+ Args:
467
+ config: Starcoder2Config
468
+ """
469
+
470
+ def __init__(self, config: Starcoder2Config):
471
+ super().__init__(config)
472
+ self.padding_idx = config.pad_token_id
473
+ self.vocab_size = config.vocab_size
474
+
475
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
476
+ self.layers = nn.ModuleList(
477
+ [Starcoder2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
478
+ )
479
+ self.norm = nn.LayerNorm(config.hidden_size, eps=config.norm_epsilon)
480
+ self.rotary_emb = Starcoder2RotaryEmbedding(config=config)
481
+ self.gradient_checkpointing = False
482
+ self.embedding_dropout = config.embedding_dropout
483
+
484
+ # Initialize weights and apply final processing
485
+ self.post_init()
486
+
487
+ def get_input_embeddings(self):
488
+ return self.embed_tokens
489
+
490
+ def set_input_embeddings(self, value):
491
+ self.embed_tokens = value
492
+
493
+ @add_start_docstrings_to_model_forward(STARCODER2_INPUTS_DOCSTRING)
494
+ def forward(
495
+ self,
496
+ input_ids: torch.LongTensor = None,
497
+ attention_mask: Optional[torch.Tensor] = None,
498
+ position_ids: Optional[torch.LongTensor] = None,
499
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
500
+ inputs_embeds: Optional[torch.FloatTensor] = None,
501
+ use_cache: Optional[bool] = None,
502
+ output_attentions: Optional[bool] = None,
503
+ output_hidden_states: Optional[bool] = None,
504
+ return_dict: Optional[bool] = None,
505
+ cache_position: Optional[torch.LongTensor] = None,
506
+ **flash_attn_kwargs: Unpack[FlashAttentionKwargs],
507
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
508
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
509
+ output_hidden_states = (
510
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
511
+ )
512
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
513
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
514
+
515
+ if (input_ids is None) ^ (inputs_embeds is not None):
516
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
517
+
518
+ if self.gradient_checkpointing and self.training and use_cache:
519
+ logger.warning_once(
520
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
521
+ )
522
+ use_cache = False
523
+
524
+ if inputs_embeds is None:
525
+ inputs_embeds = self.embed_tokens(input_ids)
526
+
527
+ if use_cache and past_key_values is None:
528
+ past_key_values = DynamicCache()
529
+
530
+ if cache_position is None:
531
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
532
+ cache_position = torch.arange(
533
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
534
+ )
535
+
536
+ if position_ids is None:
537
+ position_ids = cache_position.unsqueeze(0)
538
+
539
+ causal_mask = self._update_causal_mask(
540
+ attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
541
+ )
542
+
543
+ hidden_states = inputs_embeds
544
+ hidden_states = nn.functional.dropout(
545
+ hidden_states, p=self.embedding_dropout, training=self.training
546
+ ) # main diff with Llama
547
+
548
+ # create position embeddings to be shared across the decoder layers
549
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
550
+
551
+ # decoder layers
552
+ all_hidden_states = () if output_hidden_states else None
553
+ all_self_attns = () if output_attentions else None
554
+
555
+ for decoder_layer in self.layers[: self.config.num_hidden_layers]:
556
+ if output_hidden_states:
557
+ all_hidden_states += (hidden_states,)
558
+
559
+ layer_outputs = decoder_layer(
560
+ hidden_states,
561
+ attention_mask=causal_mask,
562
+ position_ids=position_ids,
563
+ past_key_value=past_key_values,
564
+ output_attentions=output_attentions,
565
+ use_cache=use_cache,
566
+ cache_position=cache_position,
567
+ position_embeddings=position_embeddings,
568
+ **flash_attn_kwargs,
569
+ )
570
+
571
+ hidden_states = layer_outputs[0]
572
+
573
+ if output_attentions:
574
+ all_self_attns += (layer_outputs[1],)
575
+
576
+ hidden_states = self.norm(hidden_states)
577
+
578
+ # add hidden states from the last decoder layer
579
+ if output_hidden_states:
580
+ all_hidden_states += (hidden_states,)
581
+
582
+ output = BaseModelOutputWithPast(
583
+ last_hidden_state=hidden_states,
584
+ past_key_values=past_key_values if use_cache else None,
585
+ hidden_states=all_hidden_states,
586
+ attentions=all_self_attns,
587
+ )
588
+ return output if return_dict else output.to_tuple()
589
+
590
+ def _update_causal_mask(
591
+ self,
592
+ attention_mask: torch.Tensor,
593
+ input_tensor: torch.Tensor,
594
+ cache_position: torch.Tensor,
595
+ past_key_values: Cache,
596
+ output_attentions: bool,
597
+ ):
598
+ if self.config._attn_implementation == "flash_attention_2":
599
+ if attention_mask is not None and past_key_values is not None:
600
+ is_padding_right = attention_mask[:, -1].sum().item() != input_tensor.size()[0]
601
+ if is_padding_right:
602
+ raise ValueError(
603
+ "You are attempting to perform batched generation with padding_side='right'"
604
+ " this may lead to unexpected behaviour for Flash Attention version of Starcoder2. Make sure to "
605
+ " call `tokenizer.padding_side = 'left'` before tokenizing the input. "
606
+ )
607
+ if attention_mask is not None and 0.0 in attention_mask:
608
+ return attention_mask
609
+ return None
610
+
611
+ # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
612
+ # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
613
+ # to infer the attention mask.
614
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
615
+ using_static_cache = isinstance(past_key_values, StaticCache)
616
+ using_sliding_window_cache = isinstance(past_key_values, SlidingWindowCache)
617
+
618
+ # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
619
+ if (
620
+ self.config._attn_implementation == "sdpa"
621
+ and not (using_static_cache or using_sliding_window_cache)
622
+ and not output_attentions
623
+ ):
624
+ if AttentionMaskConverter._ignore_causal_mask_sdpa(
625
+ attention_mask,
626
+ inputs_embeds=input_tensor,
627
+ past_key_values_length=past_seen_tokens,
628
+ sliding_window=self.config.sliding_window,
629
+ is_training=self.training,
630
+ ):
631
+ return None
632
+
633
+ dtype, device = input_tensor.dtype, input_tensor.device
634
+ min_dtype = torch.finfo(dtype).min
635
+ sequence_length = input_tensor.shape[1]
636
+ # SlidingWindowCache or StaticCache
637
+ if using_sliding_window_cache or using_static_cache:
638
+ target_length = past_key_values.get_max_cache_shape()
639
+ # DynamicCache or no cache
640
+ else:
641
+ target_length = (
642
+ attention_mask.shape[-1]
643
+ if isinstance(attention_mask, torch.Tensor)
644
+ else past_seen_tokens + sequence_length + 1
645
+ )
646
+
647
+ # In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
648
+ causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
649
+ attention_mask,
650
+ sequence_length=sequence_length,
651
+ target_length=target_length,
652
+ dtype=dtype,
653
+ device=device,
654
+ cache_position=cache_position,
655
+ batch_size=input_tensor.shape[0],
656
+ config=self.config,
657
+ past_key_values=past_key_values,
658
+ )
659
+
660
+ if (
661
+ self.config._attn_implementation == "sdpa"
662
+ and attention_mask is not None
663
+ and attention_mask.device.type == "cuda"
664
+ and not output_attentions
665
+ ):
666
+ # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
667
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
668
+ # Details: https://github.com/pytorch/pytorch/issues/110213
669
+ causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
670
+
671
+ return causal_mask
672
+
673
+ @staticmethod
674
+ def _prepare_4d_causal_attention_mask_with_cache_position(
675
+ attention_mask: torch.Tensor,
676
+ sequence_length: int,
677
+ target_length: int,
678
+ dtype: torch.dtype,
679
+ device: torch.device,
680
+ cache_position: torch.Tensor,
681
+ batch_size: int,
682
+ config: Starcoder2Config,
683
+ past_key_values: Cache,
684
+ ):
685
+ """
686
+ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
687
+ `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
688
+
689
+ Args:
690
+ attention_mask (`torch.Tensor`):
691
+ A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`.
692
+ sequence_length (`int`):
693
+ The sequence length being processed.
694
+ target_length (`int`):
695
+ The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet.
696
+ dtype (`torch.dtype`):
697
+ The dtype to use for the 4D attention mask.
698
+ device (`torch.device`):
699
+ The device to plcae the 4D attention mask on.
700
+ cache_position (`torch.Tensor`):
701
+ Indices depicting the position of the input sequence tokens in the sequence.
702
+ batch_size (`torch.Tensor`):
703
+ Batch size.
704
+ config (`Starcoder2Config`):
705
+ The model's configuration class
706
+ past_key_values (`Cache`):
707
+ The cache class that is being used currently to generate
708
+ """
709
+ if attention_mask is not None and attention_mask.dim() == 4:
710
+ # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
711
+ causal_mask = attention_mask
712
+ else:
713
+ min_dtype = torch.finfo(dtype).min
714
+ causal_mask = torch.full(
715
+ (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device
716
+ )
717
+ diagonal_attend_mask = torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
718
+ if config.sliding_window is not None:
719
+ # if we have sliding window, we should not attend to tokens beyond sliding window length, so we mask them out also
720
+ # the check is needed to verify is current checkpoint was trained with sliding window or not
721
+ if not isinstance(past_key_values, SlidingWindowCache) or sequence_length > target_length:
722
+ sliding_attend_mask = torch.arange(target_length, device=device) <= (
723
+ cache_position.reshape(-1, 1) - config.sliding_window
724
+ )
725
+ diagonal_attend_mask.bitwise_or_(sliding_attend_mask)
726
+ causal_mask *= diagonal_attend_mask
727
+ causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
728
+ if attention_mask is not None:
729
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
730
+ if attention_mask.shape[-1] > target_length:
731
+ attention_mask = attention_mask[:, :target_length]
732
+ mask_length = attention_mask.shape[-1]
733
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
734
+ padding_mask = padding_mask == 0
735
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
736
+ padding_mask, min_dtype
737
+ )
738
+ return causal_mask
739
+
740
+
741
+ class KwargsForCausalLM(FlashAttentionKwargs, LossKwargs): ...
742
+
743
+
744
+ class Starcoder2ForCausalLM(Starcoder2PreTrainedModel, GenerationMixin):
745
+ _tied_weights_keys = ["lm_head.weight"]
746
+ _tp_plan = {"lm_head": "colwise_rep"}
747
+
748
+ def __init__(self, config):
749
+ super().__init__(config)
750
+ self.model = Starcoder2Model(config)
751
+ self.vocab_size = config.vocab_size
752
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
753
+
754
+ # Initialize weights and apply final processing
755
+ self.post_init()
756
+
757
+ def get_input_embeddings(self):
758
+ return self.model.embed_tokens
759
+
760
+ def set_input_embeddings(self, value):
761
+ self.model.embed_tokens = value
762
+
763
+ def get_output_embeddings(self):
764
+ return self.lm_head
765
+
766
+ def set_output_embeddings(self, new_embeddings):
767
+ self.lm_head = new_embeddings
768
+
769
+ def set_decoder(self, decoder):
770
+ self.model = decoder
771
+
772
+ def get_decoder(self):
773
+ return self.model
774
+
775
+ @add_start_docstrings_to_model_forward(STARCODER2_INPUTS_DOCSTRING)
776
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
777
+ def forward(
778
+ self,
779
+ input_ids: torch.LongTensor = None,
780
+ attention_mask: Optional[torch.Tensor] = None,
781
+ position_ids: Optional[torch.LongTensor] = None,
782
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
783
+ inputs_embeds: Optional[torch.FloatTensor] = None,
784
+ labels: Optional[torch.LongTensor] = None,
785
+ use_cache: Optional[bool] = None,
786
+ output_attentions: Optional[bool] = None,
787
+ output_hidden_states: Optional[bool] = None,
788
+ return_dict: Optional[bool] = None,
789
+ cache_position: Optional[torch.LongTensor] = None,
790
+ num_logits_to_keep: int = 0,
791
+ **kwargs: Unpack[KwargsForCausalLM],
792
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
793
+ r"""
794
+ Args:
795
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
796
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
797
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
798
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
799
+
800
+ num_logits_to_keep (`int`, *optional*):
801
+ Calculate logits for the last `num_logits_to_keep` tokens. If `0`, calculate logits for all
802
+ `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
803
+ token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
804
+
805
+ Returns:
806
+
807
+ Example:
808
+
809
+ ```python
810
+ >>> from transformers import AutoTokenizer, Starcoder2ForCausalLM
811
+
812
+ >>> model = Starcoder2ForCausalLM.from_pretrained("meta-starcoder2/Starcoder2-2-7b-hf")
813
+ >>> tokenizer = AutoTokenizer.from_pretrained("meta-starcoder2/Starcoder2-2-7b-hf")
814
+
815
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
816
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
817
+
818
+ >>> # Generate
819
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
820
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
821
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
822
+ ```"""
823
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
824
+ output_hidden_states = (
825
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
826
+ )
827
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
828
+
829
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
830
+ outputs = self.model(
831
+ input_ids=input_ids,
832
+ attention_mask=attention_mask,
833
+ position_ids=position_ids,
834
+ past_key_values=past_key_values,
835
+ inputs_embeds=inputs_embeds,
836
+ use_cache=use_cache,
837
+ output_attentions=output_attentions,
838
+ output_hidden_states=output_hidden_states,
839
+ return_dict=return_dict,
840
+ cache_position=cache_position,
841
+ **kwargs,
842
+ )
843
+
844
+ hidden_states = outputs[0]
845
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
846
+ logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :])
847
+
848
+ loss = None
849
+ if labels is not None:
850
+ loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
851
+
852
+ if not return_dict:
853
+ output = (logits,) + outputs[1:]
854
+ return (loss,) + output if loss is not None else output
855
+
856
+ return CausalLMOutputWithPast(
857
+ loss=loss,
858
+ logits=logits,
859
+ past_key_values=outputs.past_key_values,
860
+ hidden_states=outputs.hidden_states,
861
+ attentions=outputs.attentions,
862
+ )
863
+
864
+
865
+ @add_start_docstrings(
866
+ """
867
+ The Starcoder2 Model transformer with a sequence classification head on top (linear layer).
868
+
869
+ [`Starcoder2ForSequenceClassification`] uses the last token in order to do the classification, as other causal models
870
+ (e.g. GPT-2) do.
871
+
872
+ Since it does classification on the last token, it requires to know the position of the last token. If a
873
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
874
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
875
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
876
+ each row of the batch).
877
+ """,
878
+ STARCODER2_START_DOCSTRING,
879
+ )
880
+ class Starcoder2ForSequenceClassification(Starcoder2PreTrainedModel):
881
+ def __init__(self, config):
882
+ super().__init__(config)
883
+ self.num_labels = config.num_labels
884
+ self.model = Starcoder2Model(config)
885
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
886
+
887
+ # Initialize weights and apply final processing
888
+ self.post_init()
889
+
890
+ def get_input_embeddings(self):
891
+ return self.model.embed_tokens
892
+
893
+ def set_input_embeddings(self, value):
894
+ self.model.embed_tokens = value
895
+
896
+ @add_start_docstrings_to_model_forward(STARCODER2_INPUTS_DOCSTRING)
897
+ def forward(
898
+ self,
899
+ input_ids: Optional[torch.LongTensor] = None,
900
+ attention_mask: Optional[torch.Tensor] = None,
901
+ position_ids: Optional[torch.LongTensor] = None,
902
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
903
+ inputs_embeds: Optional[torch.FloatTensor] = None,
904
+ labels: Optional[torch.LongTensor] = None,
905
+ use_cache: Optional[bool] = None,
906
+ output_attentions: Optional[bool] = None,
907
+ output_hidden_states: Optional[bool] = None,
908
+ return_dict: Optional[bool] = None,
909
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
910
+ r"""
911
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
912
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
913
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
914
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
915
+ """
916
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
917
+
918
+ transformer_outputs = self.model(
919
+ input_ids,
920
+ attention_mask=attention_mask,
921
+ position_ids=position_ids,
922
+ past_key_values=past_key_values,
923
+ inputs_embeds=inputs_embeds,
924
+ use_cache=use_cache,
925
+ output_attentions=output_attentions,
926
+ output_hidden_states=output_hidden_states,
927
+ return_dict=return_dict,
928
+ )
929
+ hidden_states = transformer_outputs[0]
930
+ logits = self.score(hidden_states)
931
+
932
+ if input_ids is not None:
933
+ batch_size = input_ids.shape[0]
934
+ else:
935
+ batch_size = inputs_embeds.shape[0]
936
+
937
+ if self.config.pad_token_id is None and batch_size != 1:
938
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
939
+ if self.config.pad_token_id is None:
940
+ sequence_lengths = -1
941
+ else:
942
+ if input_ids is not None:
943
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
944
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
945
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
946
+ sequence_lengths = sequence_lengths.to(logits.device)
947
+ else:
948
+ sequence_lengths = -1
949
+
950
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
951
+
952
+ loss = None
953
+ if labels is not None:
954
+ loss = self.loss_function(logits=logits, labels=labels, pooled_logits=pooled_logits, config=self.config)
955
+
956
+ if not return_dict:
957
+ output = (pooled_logits,) + transformer_outputs[1:]
958
+ return ((loss,) + output) if loss is not None else output
959
+
960
+ return SequenceClassifierOutputWithPast(
961
+ loss=loss,
962
+ logits=pooled_logits,
963
+ past_key_values=transformer_outputs.past_key_values,
964
+ hidden_states=transformer_outputs.hidden_states,
965
+ attentions=transformer_outputs.attentions,
966
+ )
967
+
968
+
969
+ @add_start_docstrings(
970
+ """
971
+ The Starcoder2 Model transformer with a token classification head on top (a linear layer on top of the hidden-states
972
+ output) e.g. for Named-Entity-Recognition (NER) tasks.
973
+ """,
974
+ STARCODER2_START_DOCSTRING,
975
+ )
976
+ class Starcoder2ForTokenClassification(Starcoder2PreTrainedModel):
977
+ def __init__(self, config):
978
+ super().__init__(config)
979
+ self.num_labels = config.num_labels
980
+ self.model = Starcoder2Model(config)
981
+ if getattr(config, "classifier_dropout", None) is not None:
982
+ classifier_dropout = config.classifier_dropout
983
+ elif getattr(config, "hidden_dropout", None) is not None:
984
+ classifier_dropout = config.hidden_dropout
985
+ else:
986
+ classifier_dropout = 0.1
987
+ self.dropout = nn.Dropout(classifier_dropout)
988
+ self.score = nn.Linear(config.hidden_size, config.num_labels)
989
+
990
+ # Initialize weights and apply final processing
991
+ self.post_init()
992
+
993
+ def get_input_embeddings(self):
994
+ return self.model.embed_tokens
995
+
996
+ def set_input_embeddings(self, value):
997
+ self.model.embed_tokens = value
998
+
999
+ @add_start_docstrings_to_model_forward(STARCODER2_INPUTS_DOCSTRING)
1000
+ @add_code_sample_docstrings(
1001
+ checkpoint=_CHECKPOINT_FOR_DOC,
1002
+ output_type=TokenClassifierOutput,
1003
+ config_class=_CONFIG_FOR_DOC,
1004
+ )
1005
+ def forward(
1006
+ self,
1007
+ input_ids: Optional[torch.LongTensor] = None,
1008
+ attention_mask: Optional[torch.Tensor] = None,
1009
+ position_ids: Optional[torch.LongTensor] = None,
1010
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1011
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1012
+ labels: Optional[torch.LongTensor] = None,
1013
+ use_cache: Optional[bool] = None,
1014
+ output_attentions: Optional[bool] = None,
1015
+ output_hidden_states: Optional[bool] = None,
1016
+ return_dict: Optional[bool] = None,
1017
+ ) -> Union[Tuple, TokenClassifierOutput]:
1018
+ r"""
1019
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1020
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1021
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1022
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1023
+ """
1024
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1025
+
1026
+ outputs = self.model(
1027
+ input_ids,
1028
+ attention_mask=attention_mask,
1029
+ position_ids=position_ids,
1030
+ past_key_values=past_key_values,
1031
+ inputs_embeds=inputs_embeds,
1032
+ use_cache=use_cache,
1033
+ output_attentions=output_attentions,
1034
+ output_hidden_states=output_hidden_states,
1035
+ return_dict=return_dict,
1036
+ )
1037
+ sequence_output = outputs[0]
1038
+ sequence_output = self.dropout(sequence_output)
1039
+ logits = self.score(sequence_output)
1040
+
1041
+ loss = None
1042
+ if labels is not None:
1043
+ loss = self.loss_function(logits, labels, self.config)
1044
+
1045
+ if not return_dict:
1046
+ output = (logits,) + outputs[2:]
1047
+ return ((loss,) + output) if loss is not None else output
1048
+
1049
+ return TokenClassifierOutput(
1050
+ loss=loss,
1051
+ logits=logits,
1052
+ hidden_states=outputs.hidden_states,
1053
+ attentions=outputs.attentions,
1054
+ )
1055
+
1056
+
1057
+ __all__ = [
1058
+ "Starcoder2ForCausalLM",
1059
+ "Starcoder2Model",
1060
+ "Starcoder2PreTrainedModel",
1061
+ "Starcoder2ForSequenceClassification",
1062
+ "Starcoder2ForTokenClassification",
1063
+ ]
vlmpy310/lib/python3.10/site-packages/transformers/models/starcoder2/modular_starcoder2.py ADDED
@@ -0,0 +1,274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 BigCode and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """PyTorch Starcoder2 model."""
21
+
22
+ from typing import Callable, List, Optional, Tuple, Union
23
+
24
+ import torch
25
+ import torch.utils.checkpoint
26
+ from torch import nn
27
+
28
+ from ...activations import ACT2FN
29
+ from ...cache_utils import Cache, DynamicCache
30
+ from ...modeling_flash_attention_utils import FlashAttentionKwargs
31
+ from ...modeling_outputs import (
32
+ BaseModelOutputWithPast,
33
+ )
34
+ from ...modeling_utils import ALL_ATTENTION_FUNCTIONS
35
+ from ...processing_utils import Unpack
36
+ from ...utils import add_start_docstrings_to_model_forward, logging
37
+ from ..mistral.modeling_mistral import (
38
+ MistralAttention,
39
+ MistralDecoderLayer,
40
+ MistralForCausalLM,
41
+ MistralForSequenceClassification,
42
+ MistralForTokenClassification,
43
+ MistralModel,
44
+ apply_rotary_pos_emb,
45
+ eager_attention_forward,
46
+ )
47
+ from .configuration_starcoder2 import Starcoder2Config
48
+
49
+
50
+ logger = logging.get_logger(__name__)
51
+
52
+ _CONFIG_FOR_DOC = "Starcoder2Config"
53
+ _CHECKPOINT_FOR_DOC = "bigcode/starcoder2-7b"
54
+
55
+
56
+ class Starcoder2MLP(nn.Module):
57
+ def __init__(self, config: Starcoder2Config):
58
+ super().__init__()
59
+ embed_dim = config.hidden_size
60
+ self.c_fc = nn.Linear(embed_dim, config.intermediate_size, bias=config.use_bias)
61
+ self.c_proj = nn.Linear(config.intermediate_size, embed_dim, bias=config.use_bias)
62
+ self.act = ACT2FN[config.hidden_act]
63
+ self.residual_dropout = config.residual_dropout
64
+
65
+ def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor:
66
+ hidden_states = self.c_fc(hidden_states)
67
+ hidden_states = self.act(hidden_states)
68
+ hidden_states = self.c_proj(hidden_states)
69
+ hidden_states = nn.functional.dropout(hidden_states, p=self.residual_dropout, training=self.training)
70
+ return hidden_states
71
+
72
+
73
+ class Starcoder2Attention(MistralAttention):
74
+ def __init__(self, config: Starcoder2Config, layer_idx: Optional[int] = None):
75
+ super().__init__()
76
+ self.residual_dropout = config.residual_dropout
77
+ self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.use_bias)
78
+ self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.use_bias)
79
+ self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.use_bias)
80
+ self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.use_bias)
81
+
82
+ def forward(
83
+ self,
84
+ hidden_states: torch.Tensor,
85
+ position_embeddings: Tuple[torch.Tensor, torch.Tensor],
86
+ attention_mask: Optional[torch.Tensor],
87
+ past_key_value: Optional[Cache] = None,
88
+ cache_position: Optional[torch.LongTensor] = None,
89
+ **kwargs: Unpack[FlashAttentionKwargs],
90
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
91
+ input_shape = hidden_states.shape[:-1]
92
+ hidden_shape = (*input_shape, -1, self.head_dim)
93
+
94
+ query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
95
+ key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
96
+ value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
97
+
98
+ cos, sin = position_embeddings
99
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
100
+
101
+ if past_key_value is not None:
102
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
103
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
104
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
105
+
106
+ attention_interface: Callable = eager_attention_forward
107
+ if self.config._attn_implementation != "eager":
108
+ if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False):
109
+ logger.warning_once(
110
+ "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to "
111
+ 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
112
+ )
113
+ else:
114
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
115
+
116
+ attn_output, attn_weights = attention_interface(
117
+ self,
118
+ query_states,
119
+ key_states,
120
+ value_states,
121
+ attention_mask,
122
+ dropout=0.0 if not self.training else self.attention_dropout,
123
+ scaling=self.scaling,
124
+ sliding_window=getattr(self.config, "sliding_window", None), # diff with Llama
125
+ **kwargs,
126
+ )
127
+
128
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
129
+ attn_output = self.o_proj(attn_output)
130
+ attn_output = nn.functional.dropout(
131
+ attn_output, p=self.residual_dropout, training=self.training
132
+ ) # diff with Llama
133
+
134
+ return attn_output, attn_weights
135
+
136
+
137
+ class Starcoder2DecoderLayer(MistralDecoderLayer):
138
+ def __init__(self, config: Starcoder2Config, layer_idx: int):
139
+ super().__init__(self)
140
+ self.self_attn = Starcoder2Attention(config=config, layer_idx=layer_idx)
141
+ self.mlp = Starcoder2MLP(config)
142
+ self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.norm_epsilon)
143
+ self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.norm_epsilon)
144
+
145
+
146
+ STARCODER2_INPUTS_DOCSTRING = None # will be automatically redefined
147
+
148
+
149
+ class Starcoder2Model(MistralModel):
150
+ def __init__(self, config: Starcoder2Config):
151
+ super().__init__(config)
152
+ self.layers = nn.ModuleList(
153
+ [Starcoder2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
154
+ )
155
+ self.norm = nn.LayerNorm(config.hidden_size, eps=config.norm_epsilon)
156
+ self.embedding_dropout = config.embedding_dropout
157
+
158
+ @add_start_docstrings_to_model_forward(STARCODER2_INPUTS_DOCSTRING)
159
+ def forward(
160
+ self,
161
+ input_ids: torch.LongTensor = None,
162
+ attention_mask: Optional[torch.Tensor] = None,
163
+ position_ids: Optional[torch.LongTensor] = None,
164
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
165
+ inputs_embeds: Optional[torch.FloatTensor] = None,
166
+ use_cache: Optional[bool] = None,
167
+ output_attentions: Optional[bool] = None,
168
+ output_hidden_states: Optional[bool] = None,
169
+ return_dict: Optional[bool] = None,
170
+ cache_position: Optional[torch.LongTensor] = None,
171
+ **flash_attn_kwargs: Unpack[FlashAttentionKwargs],
172
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
173
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
174
+ output_hidden_states = (
175
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
176
+ )
177
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
178
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
179
+
180
+ if (input_ids is None) ^ (inputs_embeds is not None):
181
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
182
+
183
+ if self.gradient_checkpointing and self.training and use_cache:
184
+ logger.warning_once(
185
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
186
+ )
187
+ use_cache = False
188
+
189
+ if inputs_embeds is None:
190
+ inputs_embeds = self.embed_tokens(input_ids)
191
+
192
+ if use_cache and past_key_values is None:
193
+ past_key_values = DynamicCache()
194
+
195
+ if cache_position is None:
196
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
197
+ cache_position = torch.arange(
198
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
199
+ )
200
+
201
+ if position_ids is None:
202
+ position_ids = cache_position.unsqueeze(0)
203
+
204
+ causal_mask = self._update_causal_mask(
205
+ attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
206
+ )
207
+
208
+ hidden_states = inputs_embeds
209
+ hidden_states = nn.functional.dropout(
210
+ hidden_states, p=self.embedding_dropout, training=self.training
211
+ ) # main diff with Llama
212
+
213
+ # create position embeddings to be shared across the decoder layers
214
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
215
+
216
+ # decoder layers
217
+ all_hidden_states = () if output_hidden_states else None
218
+ all_self_attns = () if output_attentions else None
219
+
220
+ for decoder_layer in self.layers[: self.config.num_hidden_layers]:
221
+ if output_hidden_states:
222
+ all_hidden_states += (hidden_states,)
223
+
224
+ layer_outputs = decoder_layer(
225
+ hidden_states,
226
+ attention_mask=causal_mask,
227
+ position_ids=position_ids,
228
+ past_key_value=past_key_values,
229
+ output_attentions=output_attentions,
230
+ use_cache=use_cache,
231
+ cache_position=cache_position,
232
+ position_embeddings=position_embeddings,
233
+ **flash_attn_kwargs,
234
+ )
235
+
236
+ hidden_states = layer_outputs[0]
237
+
238
+ if output_attentions:
239
+ all_self_attns += (layer_outputs[1],)
240
+
241
+ hidden_states = self.norm(hidden_states)
242
+
243
+ # add hidden states from the last decoder layer
244
+ if output_hidden_states:
245
+ all_hidden_states += (hidden_states,)
246
+
247
+ output = BaseModelOutputWithPast(
248
+ last_hidden_state=hidden_states,
249
+ past_key_values=past_key_values if use_cache else None,
250
+ hidden_states=all_hidden_states,
251
+ attentions=all_self_attns,
252
+ )
253
+ return output if return_dict else output.to_tuple()
254
+
255
+
256
+ class Starcoder2ForCausalLM(MistralForCausalLM):
257
+ pass
258
+
259
+
260
+ class Starcoder2ForSequenceClassification(MistralForSequenceClassification):
261
+ pass
262
+
263
+
264
+ class Starcoder2ForTokenClassification(MistralForTokenClassification):
265
+ pass
266
+
267
+
268
+ __all__ = [
269
+ "Starcoder2ForCausalLM",
270
+ "Starcoder2Model",
271
+ "Starcoder2PreTrainedModel", # noqa: F822
272
+ "Starcoder2ForSequenceClassification",
273
+ "Starcoder2ForTokenClassification",
274
+ ]