ZTWHHH commited on
Commit
68d875b
·
verified ·
1 Parent(s): 0421b54

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. janus/lib/python3.10/site-packages/transformers/models/canine/__init__.py +28 -0
  3. janus/lib/python3.10/site-packages/transformers/models/canine/__pycache__/tokenization_canine.cpython-310.pyc +0 -0
  4. janus/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/modeling_codegen.cpython-310.pyc +0 -0
  5. janus/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/tokenization_codegen.cpython-310.pyc +0 -0
  6. janus/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/tokenization_codegen_fast.cpython-310.pyc +0 -0
  7. janus/lib/python3.10/site-packages/transformers/models/codegen/configuration_codegen.py +230 -0
  8. janus/lib/python3.10/site-packages/transformers/models/codegen/modeling_codegen.py +814 -0
  9. janus/lib/python3.10/site-packages/transformers/models/codegen/tokenization_codegen.py +419 -0
  10. janus/lib/python3.10/site-packages/transformers/models/codegen/tokenization_codegen_fast.py +265 -0
  11. janus/lib/python3.10/site-packages/transformers/models/cohere2/__init__.py +27 -0
  12. janus/lib/python3.10/site-packages/transformers/models/cohere2/__pycache__/modeling_cohere2.cpython-310.pyc +0 -0
  13. janus/lib/python3.10/site-packages/transformers/models/cohere2/__pycache__/modular_cohere2.cpython-310.pyc +0 -0
  14. janus/lib/python3.10/site-packages/transformers/models/cohere2/configuration_cohere2.py +209 -0
  15. janus/lib/python3.10/site-packages/transformers/models/cohere2/modeling_cohere2.py +948 -0
  16. janus/lib/python3.10/site-packages/transformers/models/cohere2/modular_cohere2.py +618 -0
  17. janus/lib/python3.10/site-packages/transformers/models/cpm/__init__.py +27 -0
  18. janus/lib/python3.10/site-packages/transformers/models/cpm/__pycache__/__init__.cpython-310.pyc +0 -0
  19. janus/lib/python3.10/site-packages/transformers/models/cpm/__pycache__/tokenization_cpm.cpython-310.pyc +0 -0
  20. janus/lib/python3.10/site-packages/transformers/models/cpm/__pycache__/tokenization_cpm_fast.cpython-310.pyc +0 -0
  21. janus/lib/python3.10/site-packages/transformers/models/cpm/tokenization_cpm.py +348 -0
  22. janus/lib/python3.10/site-packages/transformers/models/cpm/tokenization_cpm_fast.py +241 -0
  23. janus/lib/python3.10/site-packages/transformers/models/deit/__pycache__/image_processing_deit.cpython-310.pyc +0 -0
  24. janus/lib/python3.10/site-packages/transformers/models/deit/__pycache__/modeling_tf_deit.cpython-310.pyc +0 -0
  25. janus/lib/python3.10/site-packages/transformers/models/deit/image_processing_deit.py +299 -0
  26. janus/lib/python3.10/site-packages/transformers/models/deit/modeling_deit.py +1021 -0
  27. janus/lib/python3.10/site-packages/transformers/models/dpr/__init__.py +30 -0
  28. janus/lib/python3.10/site-packages/transformers/models/longt5/__init__.py +28 -0
  29. janus/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/__init__.cpython-310.pyc +0 -0
  30. janus/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/configuration_longt5.cpython-310.pyc +0 -0
  31. janus/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/modeling_flax_longt5.cpython-310.pyc +0 -0
  32. janus/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/modeling_longt5.cpython-310.pyc +0 -0
  33. janus/lib/python3.10/site-packages/transformers/models/longt5/configuration_longt5.py +180 -0
  34. janus/lib/python3.10/site-packages/transformers/models/longt5/modeling_flax_longt5.py +0 -0
  35. janus/lib/python3.10/site-packages/transformers/models/longt5/modeling_longt5.py +0 -0
  36. janus/lib/python3.10/site-packages/transformers/models/megatron_gpt2/__pycache__/__init__.cpython-310.pyc +0 -0
  37. janus/lib/python3.10/site-packages/transformers/models/megatron_gpt2/checkpoint_reshaping_and_interoperability.py +922 -0
  38. janus/lib/python3.10/site-packages/transformers/models/mobilevitv2/__pycache__/__init__.cpython-310.pyc +0 -0
  39. janus/lib/python3.10/site-packages/transformers/models/mobilevitv2/__pycache__/modeling_mobilevitv2.cpython-310.pyc +0 -0
  40. janus/lib/python3.10/site-packages/transformers/models/mobilevitv2/modeling_mobilevitv2.py +1035 -0
  41. janus/lib/python3.10/site-packages/transformers/models/mt5/__init__.py +30 -0
  42. janus/lib/python3.10/site-packages/transformers/models/mt5/__pycache__/__init__.cpython-310.pyc +0 -0
  43. janus/lib/python3.10/site-packages/transformers/models/mt5/__pycache__/configuration_mt5.cpython-310.pyc +0 -0
  44. janus/lib/python3.10/site-packages/transformers/models/mt5/__pycache__/modeling_flax_mt5.cpython-310.pyc +0 -0
  45. janus/lib/python3.10/site-packages/transformers/models/mt5/__pycache__/modeling_mt5.cpython-310.pyc +0 -0
  46. janus/lib/python3.10/site-packages/transformers/models/mt5/__pycache__/modeling_tf_mt5.cpython-310.pyc +0 -0
  47. janus/lib/python3.10/site-packages/transformers/models/mt5/__pycache__/tokenization_mt5.cpython-310.pyc +0 -0
  48. janus/lib/python3.10/site-packages/transformers/models/mt5/__pycache__/tokenization_mt5_fast.cpython-310.pyc +0 -0
  49. janus/lib/python3.10/site-packages/transformers/models/mt5/configuration_mt5.py +182 -0
  50. janus/lib/python3.10/site-packages/transformers/models/mt5/modeling_flax_mt5.py +123 -0
.gitattributes CHANGED
@@ -440,3 +440,4 @@ deepseek/lib/python3.10/site-packages/pyarrow/_s3fs.cpython-310-x86_64-linux-gnu
440
  janus/lib/libtinfow.so filter=lfs diff=lfs merge=lfs -text
441
  janus/lib/libtinfow.so.6 filter=lfs diff=lfs merge=lfs -text
442
  janus/lib/python3.10/site-packages/transformers/generation/__pycache__/logits_process.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
 
 
440
  janus/lib/libtinfow.so filter=lfs diff=lfs merge=lfs -text
441
  janus/lib/libtinfow.so.6 filter=lfs diff=lfs merge=lfs -text
442
  janus/lib/python3.10/site-packages/transformers/generation/__pycache__/logits_process.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
443
+ janus/lib/python3.10/site-packages/transformers/models/oneformer/__pycache__/modeling_oneformer.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
janus/lib/python3.10/site-packages/transformers/models/canine/__init__.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import _LazyModule
17
+ from ...utils.import_utils import define_import_structure
18
+
19
+
20
+ if TYPE_CHECKING:
21
+ from .configuration_canine import *
22
+ from .modeling_canine import *
23
+ from .tokenization_canine import *
24
+ else:
25
+ import sys
26
+
27
+ _file = globals()["__file__"]
28
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
janus/lib/python3.10/site-packages/transformers/models/canine/__pycache__/tokenization_canine.cpython-310.pyc ADDED
Binary file (7.82 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/modeling_codegen.cpython-310.pyc ADDED
Binary file (25.1 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/tokenization_codegen.cpython-310.pyc ADDED
Binary file (15.7 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/tokenization_codegen_fast.cpython-310.pyc ADDED
Binary file (9.79 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/codegen/configuration_codegen.py ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Salesforce authors, The EleutherAI, and HuggingFace Teams. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """CodeGen model configuration"""
16
+
17
+ from collections import OrderedDict
18
+ from typing import Any, List, Mapping, Optional
19
+
20
+ from ... import PreTrainedTokenizer, TensorType, is_torch_available
21
+ from ...configuration_utils import PretrainedConfig
22
+ from ...onnx import OnnxConfigWithPast, PatchingSpec
23
+ from ...utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+
29
+ class CodeGenConfig(PretrainedConfig):
30
+ r"""
31
+ This is the configuration class to store the configuration of a [`CodeGenModel`]. It is used to instantiate a
32
+ CodeGen model according to the specified arguments, defining the model architecture. Instantiating a configuration
33
+ with the defaults will yield a similar configuration to that of the CodeGen
34
+ [Salesforce/codegen-2B-mono](https://huggingface.co/Salesforce/codegen-2B-mono) architecture. Configuration objects
35
+ inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from
36
+ [`PretrainedConfig`] for more information.
37
+
38
+ Args:
39
+ vocab_size (`int`, *optional*, defaults to 50400):
40
+ Vocabulary size of the CodeGen model. Defines the number of different tokens that can be represented by the
41
+ `inputs_ids` passed when calling [`CodeGenModel`].
42
+ n_positions (`int`, *optional*, defaults to 2048):
43
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
44
+ just in case (e.g., 512 or 1024 or 2048).
45
+ n_ctx (`int`, *optional*, defaults to 2048):
46
+ This attribute is used in `CodeGenModel.__init__` without any real effect.
47
+ n_embd (`int`, *optional*, defaults to 4096):
48
+ Dimensionality of the embeddings and hidden states.
49
+ n_layer (`int`, *optional*, defaults to 28):
50
+ Number of hidden layers in the Transformer encoder.
51
+ n_head (`int`, *optional*, defaults to 16):
52
+ Number of attention heads for each attention layer in the Transformer encoder.
53
+ rotary_dim (`int`, *optional*, defaults to 64):
54
+ Number of dimensions in the embedding that Rotary Position Embedding is applied to.
55
+ n_inner (`int`, *optional*):
56
+ Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd
57
+ activation_function (`str`, *optional*, defaults to `"gelu_new"`):
58
+ Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`.
59
+ resid_pdrop (`float`, *optional*, defaults to 0.0):
60
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
61
+ embd_pdrop (`int`, *optional*, defaults to 0.0):
62
+ The dropout ratio for the embeddings.
63
+ attn_pdrop (`float`, *optional*, defaults to 0.0):
64
+ The dropout ratio for the attention.
65
+ layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
66
+ The epsilon to use in the layer normalization layers.
67
+ initializer_range (`float`, *optional*, defaults to 0.02):
68
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
69
+ use_cache (`bool`, *optional*, defaults to `True`):
70
+ Whether or not the model should return the last key/values attentions (not used by all models).
71
+ bos_token_id (`int`, *optional*, defaults to 50256):
72
+ Beginning of stream token id.
73
+ eos_token_id (`int`, *optional*, defaults to 50256):
74
+ End of stream token id.
75
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
76
+ Whether the model's input and output word embeddings should be tied. Note that this is only relevant if the
77
+ model has a output word embedding layer.
78
+
79
+ Example:
80
+
81
+ ```python
82
+ >>> from transformers import CodeGenConfig, CodeGenModel
83
+
84
+ >>> # Initializing a CodeGen 6B configuration
85
+ >>> configuration = CodeGenConfig()
86
+
87
+ >>> # Initializing a model (with random weights) from the configuration
88
+ >>> model = CodeGenModel(configuration)
89
+
90
+ >>> # Accessing the model configuration
91
+ >>> configuration = model.config
92
+ ```"""
93
+
94
+ model_type = "codegen"
95
+ attribute_map = {
96
+ "max_position_embeddings": "n_positions",
97
+ "hidden_size": "n_embd",
98
+ "num_attention_heads": "n_head",
99
+ "num_hidden_layers": "n_layer",
100
+ }
101
+
102
+ def __init__(
103
+ self,
104
+ vocab_size=50400,
105
+ n_positions=2048,
106
+ n_ctx=2048,
107
+ n_embd=4096,
108
+ n_layer=28,
109
+ n_head=16,
110
+ rotary_dim=64,
111
+ n_inner=None,
112
+ activation_function="gelu_new",
113
+ resid_pdrop=0.0,
114
+ embd_pdrop=0.0,
115
+ attn_pdrop=0.0,
116
+ layer_norm_epsilon=1e-5,
117
+ initializer_range=0.02,
118
+ use_cache=True,
119
+ bos_token_id=50256,
120
+ eos_token_id=50256,
121
+ tie_word_embeddings=False,
122
+ **kwargs,
123
+ ):
124
+ self.vocab_size = vocab_size
125
+ self.n_ctx = n_ctx
126
+ self.n_positions = n_positions
127
+ self.n_embd = n_embd
128
+ self.n_layer = n_layer
129
+ self.n_head = n_head
130
+ self.n_inner = n_inner
131
+ self.rotary_dim = rotary_dim
132
+ self.activation_function = activation_function
133
+ self.resid_pdrop = resid_pdrop
134
+ self.embd_pdrop = embd_pdrop
135
+ self.attn_pdrop = attn_pdrop
136
+ self.layer_norm_epsilon = layer_norm_epsilon
137
+ self.initializer_range = initializer_range
138
+ self.use_cache = use_cache
139
+
140
+ self.bos_token_id = bos_token_id
141
+ self.eos_token_id = eos_token_id
142
+
143
+ super().__init__(
144
+ bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs
145
+ )
146
+
147
+
148
+ # Copied from transformers.models.gpt2.configuration_gpt2.GPT2OnnxConfig
149
+ class CodeGenOnnxConfig(OnnxConfigWithPast):
150
+ def __init__(
151
+ self,
152
+ config: PretrainedConfig,
153
+ task: str = "default",
154
+ patching_specs: List[PatchingSpec] = None,
155
+ use_past: bool = False,
156
+ ):
157
+ super().__init__(config, task=task, patching_specs=patching_specs, use_past=use_past)
158
+ if not getattr(self._config, "pad_token_id", None):
159
+ # TODO: how to do that better?
160
+ self._config.pad_token_id = 0
161
+
162
+ @property
163
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
164
+ common_inputs = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}})
165
+ if self.use_past:
166
+ self.fill_with_past_key_values_(common_inputs, direction="inputs")
167
+ common_inputs["attention_mask"] = {0: "batch", 1: "past_sequence + sequence"}
168
+ else:
169
+ common_inputs["attention_mask"] = {0: "batch", 1: "sequence"}
170
+
171
+ return common_inputs
172
+
173
+ @property
174
+ def num_layers(self) -> int:
175
+ return self._config.n_layer
176
+
177
+ @property
178
+ def num_attention_heads(self) -> int:
179
+ return self._config.n_head
180
+
181
+ def generate_dummy_inputs(
182
+ self,
183
+ tokenizer: PreTrainedTokenizer,
184
+ batch_size: int = -1,
185
+ seq_length: int = -1,
186
+ is_pair: bool = False,
187
+ framework: Optional[TensorType] = None,
188
+ ) -> Mapping[str, Any]:
189
+ common_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs(
190
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
191
+ )
192
+
193
+ # We need to order the input in the way they appears in the forward()
194
+ ordered_inputs = OrderedDict({"input_ids": common_inputs["input_ids"]})
195
+
196
+ # Need to add the past_keys
197
+ if self.use_past:
198
+ if not is_torch_available():
199
+ raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
200
+ else:
201
+ import torch
202
+
203
+ batch, seqlen = common_inputs["input_ids"].shape
204
+ # Not using the same length for past_key_values
205
+ past_key_values_length = seqlen + 2
206
+ past_shape = (
207
+ batch,
208
+ self.num_attention_heads,
209
+ past_key_values_length,
210
+ self._config.hidden_size // self.num_attention_heads,
211
+ )
212
+ ordered_inputs["past_key_values"] = [
213
+ (torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(self.num_layers)
214
+ ]
215
+
216
+ ordered_inputs["attention_mask"] = common_inputs["attention_mask"]
217
+ if self.use_past:
218
+ mask_dtype = ordered_inputs["attention_mask"].dtype
219
+ ordered_inputs["attention_mask"] = torch.cat(
220
+ [ordered_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1
221
+ )
222
+
223
+ return ordered_inputs
224
+
225
+ @property
226
+ def default_onnx_opset(self) -> int:
227
+ return 13
228
+
229
+
230
+ __all__ = ["CodeGenConfig", "CodeGenOnnxConfig"]
janus/lib/python3.10/site-packages/transformers/models/codegen/modeling_codegen.py ADDED
@@ -0,0 +1,814 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Salesforce authors, The EleutherAI, and HuggingFace Teams. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """PyTorch CodeGen model."""
16
+
17
+ from typing import Optional, Tuple, Union
18
+
19
+ import torch
20
+ import torch.utils.checkpoint
21
+ from torch import nn
22
+ from torch.nn import CrossEntropyLoss
23
+
24
+ from ...activations import ACT2FN
25
+ from ...cache_utils import Cache, DynamicCache, StaticCache
26
+ from ...generation import GenerationMixin
27
+ from ...modeling_attn_mask_utils import AttentionMaskConverter
28
+ from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
29
+ from ...modeling_utils import PreTrainedModel
30
+ from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
31
+ from .configuration_codegen import CodeGenConfig
32
+
33
+
34
+ logger = logging.get_logger(__name__)
35
+
36
+ _CHECKPOINT_FOR_DOC = "Salesforce/codegen-2B-mono"
37
+ _CONFIG_FOR_DOC = "CodeGenConfig"
38
+
39
+
40
+ # Copied from transformers.models.gptj.modeling_gptj.create_sinusoidal_positions
41
+ def create_sinusoidal_positions(num_pos: int, dim: int) -> torch.Tensor:
42
+ inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, dtype=torch.int64) / dim))
43
+ sinusoid_inp = torch.einsum("i , j -> i j", torch.arange(num_pos, dtype=torch.int64).float(), inv_freq).float()
44
+ return torch.cat((torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)), dim=1)
45
+
46
+
47
+ # Copied from transformers.models.gptj.modeling_gptj.rotate_every_two
48
+ def rotate_every_two(x: torch.Tensor) -> torch.Tensor:
49
+ x1 = x[:, :, :, ::2]
50
+ x2 = x[:, :, :, 1::2]
51
+ x = torch.stack((-x2, x1), dim=-1)
52
+ return x.flatten(-2) # in einsum notation: rearrange(x, '... d j -> ... (d j)')
53
+
54
+
55
+ # Copied from transformers.models.gptj.modeling_gptj.apply_rotary_pos_emb
56
+ def apply_rotary_pos_emb(tensor: torch.Tensor, sin: torch.Tensor, cos: torch.Tensor) -> torch.Tensor:
57
+ sin = torch.repeat_interleave(sin[:, :, None, :], 2, 3)
58
+ cos = torch.repeat_interleave(cos[:, :, None, :], 2, 3)
59
+ return (tensor * cos) + (rotate_every_two(tensor) * sin)
60
+
61
+
62
+ class CodeGenAttention(nn.Module):
63
+ def __init__(self, config, layer_idx=None):
64
+ super().__init__()
65
+
66
+ max_positions = config.max_position_embeddings
67
+ self.attn_dropout = nn.Dropout(config.attn_pdrop)
68
+ self.resid_dropout = nn.Dropout(config.resid_pdrop)
69
+ self.layer_idx = layer_idx
70
+ if layer_idx is None:
71
+ logger.warning_once(
72
+ f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
73
+ "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
74
+ "when creating this class."
75
+ )
76
+
77
+ self.embed_dim = config.hidden_size
78
+ self.num_attention_heads = config.num_attention_heads
79
+ self.head_dim = self.embed_dim // self.num_attention_heads
80
+ if self.head_dim * self.num_attention_heads != self.embed_dim:
81
+ raise ValueError(
82
+ f"embed_dim must be divisible by num_attention_heads (got `embed_dim`: {self.embed_dim} and"
83
+ f" `num_attention_heads`: {self.num_attention_heads})."
84
+ )
85
+ self.scale_attn = torch.sqrt(torch.tensor(self.head_dim, dtype=torch.float32)).to(torch.get_default_dtype())
86
+ self.qkv_proj = nn.Linear(self.embed_dim, self.embed_dim * 3, bias=False)
87
+
88
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
89
+ self.rotary_dim = config.rotary_dim
90
+ pos_embd_dim = self.rotary_dim or self.embed_dim
91
+ self.embed_positions = create_sinusoidal_positions(max_positions, pos_embd_dim)
92
+
93
+ def _split_heads(self, x, n_head, dim_head, mp_num):
94
+ reshaped = x.reshape(x.shape[:-1] + (n_head // mp_num, dim_head))
95
+ reshaped = reshaped.reshape(x.shape[:-2] + (-1,) + reshaped.shape[-1:])
96
+ return reshaped
97
+
98
+ def _merge_heads(self, tensor, num_attention_heads, attn_head_size):
99
+ """
100
+ Merges attn_head_size dim and num_attn_heads dim into n_ctx
101
+ """
102
+ if len(tensor.shape) == 5:
103
+ tensor = tensor.permute(0, 1, 3, 2, 4).contiguous()
104
+ elif len(tensor.shape) == 4:
105
+ tensor = tensor.permute(0, 2, 1, 3).contiguous()
106
+ else:
107
+ raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}")
108
+ new_shape = tensor.size()[:-2] + (num_attention_heads * attn_head_size,)
109
+ return tensor.view(new_shape)
110
+
111
+ def _attn(
112
+ self,
113
+ query,
114
+ key,
115
+ value,
116
+ attention_mask=None,
117
+ head_mask=None,
118
+ ):
119
+ # Keep the attention weights computation in fp32 to avoid overflow issues
120
+ query = query.to(torch.float32)
121
+ key = key.to(torch.float32)
122
+
123
+ attn_weights = torch.matmul(query, key.transpose(-1, -2))
124
+
125
+ if attention_mask is not None:
126
+ causal_mask = attention_mask[:, :, :, : key.shape[-2]]
127
+ attn_weights += causal_mask
128
+
129
+ attn_weights = attn_weights / self.scale_attn
130
+ attn_weights = nn.Softmax(dim=-1)(attn_weights)
131
+ attn_weights = attn_weights.to(value.dtype)
132
+ attn_weights = self.attn_dropout(attn_weights)
133
+
134
+ # Mask heads if we want to
135
+ if head_mask is not None:
136
+ attn_weights = attn_weights * head_mask
137
+
138
+ attn_output = torch.matmul(attn_weights, value)
139
+
140
+ return attn_output, attn_weights
141
+
142
+ def forward(
143
+ self,
144
+ hidden_states: Optional[torch.FloatTensor],
145
+ layer_past: Optional[Cache] = None,
146
+ attention_mask: Optional[torch.FloatTensor] = None,
147
+ position_ids: Optional[torch.LongTensor] = None,
148
+ head_mask: Optional[torch.FloatTensor] = None,
149
+ use_cache: Optional[bool] = False,
150
+ output_attentions: Optional[bool] = False,
151
+ cache_position: Optional[torch.LongTensor] = None,
152
+ ) -> Union[
153
+ Tuple[torch.Tensor, Tuple[torch.Tensor]],
154
+ Optional[Tuple[torch.Tensor, Tuple[torch.Tensor], Tuple[torch.Tensor, ...]]],
155
+ ]:
156
+ qkv = self.qkv_proj(hidden_states)
157
+ # TODO(enijkamp): factor out number of logical TPU-v4 cores or make forward pass agnostic
158
+ mp_num = 4
159
+ qkv_split = qkv.reshape(qkv.shape[:-1] + (mp_num, -1))
160
+
161
+ local_dim = self.head_dim * self.num_attention_heads // mp_num
162
+ query, value, key = torch.split(qkv_split, local_dim, dim=-1)
163
+ query = self._split_heads(query, self.num_attention_heads, self.head_dim, mp_num=mp_num)
164
+ key = self._split_heads(key, self.num_attention_heads, self.head_dim, mp_num=mp_num)
165
+
166
+ value = self._split_heads(value, self.num_attention_heads, self.head_dim, mp_num=mp_num)
167
+ value = value.permute(0, 2, 1, 3)
168
+
169
+ embed_positions = self.embed_positions
170
+ if embed_positions.device != position_ids.device:
171
+ embed_positions = embed_positions.to(position_ids.device)
172
+ self.embed_positions = embed_positions
173
+
174
+ sincos = embed_positions[position_ids]
175
+ sin, cos = torch.split(sincos, sincos.shape[-1] // 2, dim=-1)
176
+
177
+ if self.rotary_dim is not None:
178
+ k_rot = key[:, :, :, : self.rotary_dim]
179
+ k_pass = key[:, :, :, self.rotary_dim :]
180
+
181
+ q_rot = query[:, :, :, : self.rotary_dim]
182
+ q_pass = query[:, :, :, self.rotary_dim :]
183
+
184
+ k_rot = apply_rotary_pos_emb(k_rot, sin, cos)
185
+ q_rot = apply_rotary_pos_emb(q_rot, sin, cos)
186
+
187
+ key = torch.cat([k_rot, k_pass], dim=-1)
188
+ query = torch.cat([q_rot, q_pass], dim=-1)
189
+ else:
190
+ key = apply_rotary_pos_emb(key, sin, cos)
191
+ query = apply_rotary_pos_emb(query, sin, cos)
192
+
193
+ key = key.permute(0, 2, 1, 3)
194
+ query = query.permute(0, 2, 1, 3)
195
+
196
+ # Note that this cast is quite ugly, but is not implemented before ROPE as k_rot in the original codebase is always in fp32.
197
+ # Reference: https://github.com/salesforce/CodeGen/blob/f210c3bb1216c975ad858cd4132c0fdeabf4bfc2/codegen1/jaxformer/hf/codegen/modeling_codegen.py#L38
198
+ if layer_past is not None:
199
+ cache_kwargs = {
200
+ "sin": sin,
201
+ "cos": cos,
202
+ "partial_rotation_size": self.rotary_dim,
203
+ "cache_position": cache_position,
204
+ }
205
+ key, value = layer_past.update(key.to(hidden_states.dtype), value, self.layer_idx, cache_kwargs)
206
+
207
+ # compute self-attention: V x Softmax(QK^T)
208
+ attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
209
+
210
+ attn_output = self._merge_heads(attn_output, self.num_attention_heads, self.head_dim)
211
+ attn_output = self.out_proj(attn_output)
212
+ attn_output = self.resid_dropout(attn_output)
213
+
214
+ outputs = (attn_output, layer_past)
215
+ if output_attentions:
216
+ outputs += (attn_weights,)
217
+
218
+ return outputs # a, present, (attentions)
219
+
220
+
221
+ # Copied from transformers.models.gptj.modeling_gptj.GPTJMLP with GPTJ->CodeGen
222
+ class CodeGenMLP(nn.Module):
223
+ def __init__(self, intermediate_size, config): # in MLP: intermediate_size= 4 * embed_dim
224
+ super().__init__()
225
+ embed_dim = config.n_embd
226
+
227
+ self.fc_in = nn.Linear(embed_dim, intermediate_size)
228
+ self.fc_out = nn.Linear(intermediate_size, embed_dim)
229
+
230
+ self.act = ACT2FN[config.activation_function]
231
+ self.dropout = nn.Dropout(config.resid_pdrop)
232
+
233
+ def forward(self, hidden_states: Optional[torch.FloatTensor]) -> torch.FloatTensor:
234
+ hidden_states = self.fc_in(hidden_states)
235
+ hidden_states = self.act(hidden_states)
236
+ hidden_states = self.fc_out(hidden_states)
237
+ hidden_states = self.dropout(hidden_states)
238
+ return hidden_states
239
+
240
+
241
+ # Copied from transformers.models.gptj.modeling_gptj.GPTJBlock with GPTJ->CodeGen
242
+ class CodeGenBlock(nn.Module):
243
+ # Ignore copy
244
+ def __init__(self, config, layer_idx=None):
245
+ super().__init__()
246
+ inner_dim = config.n_inner if config.n_inner is not None else 4 * config.n_embd
247
+ self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
248
+ self.attn = CodeGenAttention(config, layer_idx)
249
+ self.mlp = CodeGenMLP(inner_dim, config)
250
+
251
+ def forward(
252
+ self,
253
+ hidden_states: Optional[torch.FloatTensor],
254
+ layer_past: Optional[Cache] = None,
255
+ attention_mask: Optional[torch.FloatTensor] = None,
256
+ position_ids: Optional[torch.LongTensor] = None,
257
+ head_mask: Optional[torch.FloatTensor] = None,
258
+ use_cache: Optional[bool] = False,
259
+ output_attentions: Optional[bool] = False,
260
+ cache_position: Optional[torch.LongTensor] = None,
261
+ ) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]:
262
+ residual = hidden_states
263
+ hidden_states = self.ln_1(hidden_states)
264
+ attn_outputs = self.attn(
265
+ hidden_states=hidden_states,
266
+ layer_past=layer_past,
267
+ attention_mask=attention_mask,
268
+ position_ids=position_ids,
269
+ head_mask=head_mask,
270
+ use_cache=use_cache,
271
+ output_attentions=output_attentions,
272
+ cache_position=cache_position,
273
+ )
274
+ attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
275
+ outputs = attn_outputs[1:]
276
+
277
+ feed_forward_hidden_states = self.mlp(hidden_states)
278
+ hidden_states = attn_output + feed_forward_hidden_states + residual
279
+
280
+ if use_cache:
281
+ outputs = (hidden_states,) + outputs
282
+ else:
283
+ outputs = (hidden_states,) + outputs[1:]
284
+
285
+ return outputs # hidden_states, present, (attentions)
286
+
287
+
288
+ class CodeGenPreTrainedModel(PreTrainedModel):
289
+ """
290
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
291
+ models.
292
+ """
293
+
294
+ config_class = CodeGenConfig
295
+ base_model_prefix = "transformer"
296
+ supports_gradient_checkpointing = True
297
+ _no_split_modules = ["CodeGenBlock"]
298
+ _skip_keys_device_placement = "past_key_values"
299
+ _supports_cache_class = True
300
+ _supports_quantized_cache = True
301
+ _supports_static_cache = True
302
+
303
+ def __init__(self, *inputs, **kwargs):
304
+ super().__init__(*inputs, **kwargs)
305
+
306
+ def _init_weights(self, module):
307
+ """Initialize the weights."""
308
+ if isinstance(module, (nn.Linear,)):
309
+ # Slightly different from Mesh Transformer JAX which uses truncated_normal for initialization
310
+ # cf https://github.com/pytorch/pytorch/pull/5617
311
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
312
+ if module.bias is not None:
313
+ module.bias.data.zero_()
314
+ elif isinstance(module, nn.Embedding):
315
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
316
+ if module.padding_idx is not None:
317
+ module.weight.data[module.padding_idx].zero_()
318
+ elif isinstance(module, nn.LayerNorm):
319
+ module.bias.data.zero_()
320
+ module.weight.data.fill_(1.0)
321
+
322
+
323
+ CODEGEN_START_DOCSTRING = r"""
324
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
325
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
326
+ behavior.
327
+
328
+ Parameters:
329
+ config ([`CodeGenConfig`]): Model configuration class with all the parameters of the model.
330
+ Initializing with a config file does not load the weights associated with the model, only the
331
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
332
+ """
333
+
334
+ CODEGEN_INPUTS_DOCSTRING = r"""
335
+ Args:
336
+ input_ids (`torch.LongTensor` of shape `({0})`):
337
+ Indices of input sequence tokens in the vocabulary.
338
+
339
+ Indices can be obtained using [`AutoProcenizer`]. See [`PreTrainedTokenizer.encode`] and
340
+ [`PreTrainedTokenizer.__call__`] for details.
341
+
342
+ [What are input IDs?](../glossary#input-ids)
343
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
344
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
345
+
346
+ - 1 for tokens that are **not masked**,
347
+ - 0 for tokens that are **masked**.
348
+
349
+ [What are attention masks?](../glossary#attention-mask)
350
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
351
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
352
+ 1]`:
353
+
354
+ - 0 corresponds to a *sentence A* token,
355
+ - 1 corresponds to a *sentence B* token.
356
+
357
+ [What are token type IDs?](../glossary#token-type-ids)
358
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
359
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
360
+ config.n_positions - 1]`.
361
+
362
+ [What are position IDs?](../glossary#position-ids)
363
+ head_mask (`torch.FloatTensor` of shape `(num_attention_heads,)` or `(n_layer, num_attention_heads)`, *optional*):
364
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
365
+
366
+ - 1 indicates the head is **not masked**,
367
+ - 0 indicates the head is **masked**.
368
+
369
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_dim)`, *optional*):
370
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
371
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
372
+ model's internal embedding lookup matrix.
373
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
374
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
375
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
376
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
377
+
378
+ Two formats are allowed:
379
+ - a [`~cache_utils.Cache`] instance, see our
380
+ [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache);
381
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
382
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
383
+ cache format.
384
+
385
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
386
+ legacy cache format will be returned.
387
+
388
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
389
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
390
+ of shape `(batch_size, sequence_length)`.
391
+ output_attentions (`bool`, *optional*):
392
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
393
+ tensors for more detail.
394
+ output_hidden_states (`bool`, *optional*):
395
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
396
+ more detail.
397
+ return_dict (`bool`, *optional*):
398
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
399
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
400
+ Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
401
+ this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
402
+ the complete sequence length.
403
+ """
404
+
405
+
406
+ @add_start_docstrings(
407
+ "The bare CodeGen Model transformer outputting raw hidden-states without any specific head on top.",
408
+ CODEGEN_START_DOCSTRING,
409
+ )
410
+ class CodeGenModel(CodeGenPreTrainedModel):
411
+ def __init__(self, config):
412
+ super().__init__(config)
413
+
414
+ self.embed_dim = config.n_embd
415
+ self.vocab_size = config.vocab_size
416
+ self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
417
+ self.drop = nn.Dropout(config.embd_pdrop)
418
+ self.h = nn.ModuleList([CodeGenBlock(config, layer_idx=i) for i in range(config.n_layer)])
419
+ self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
420
+ self.rotary_dim = min(config.rotary_dim, config.n_ctx // config.num_attention_heads)
421
+
422
+ self.gradient_checkpointing = False
423
+
424
+ # Initialize weights and apply final processing
425
+ self.post_init()
426
+
427
+ def get_input_embeddings(self):
428
+ return self.wte
429
+
430
+ def set_input_embeddings(self, new_embeddings):
431
+ self.wte = new_embeddings
432
+
433
+ @add_start_docstrings_to_model_forward(CODEGEN_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
434
+ @add_code_sample_docstrings(
435
+ checkpoint=_CHECKPOINT_FOR_DOC,
436
+ output_type=BaseModelOutputWithPast,
437
+ config_class=_CONFIG_FOR_DOC,
438
+ )
439
+ def forward(
440
+ self,
441
+ input_ids: Optional[torch.LongTensor] = None,
442
+ past_key_values: Optional[Union[Cache, Tuple[Tuple[torch.Tensor]]]] = None,
443
+ attention_mask: Optional[torch.FloatTensor] = None,
444
+ token_type_ids: Optional[torch.LongTensor] = None,
445
+ position_ids: Optional[torch.LongTensor] = None,
446
+ head_mask: Optional[torch.FloatTensor] = None,
447
+ inputs_embeds: Optional[torch.FloatTensor] = None,
448
+ use_cache: Optional[bool] = None,
449
+ output_attentions: Optional[bool] = None,
450
+ output_hidden_states: Optional[bool] = None,
451
+ return_dict: Optional[bool] = None,
452
+ cache_position: Optional[torch.LongTensor] = None,
453
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
454
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
455
+ output_hidden_states = (
456
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
457
+ )
458
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
459
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
460
+
461
+ if (input_ids is None) ^ (inputs_embeds is not None):
462
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
463
+
464
+ if self.gradient_checkpointing and self.training:
465
+ if use_cache:
466
+ logger.warning_once(
467
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
468
+ )
469
+ use_cache = False
470
+
471
+ if inputs_embeds is None:
472
+ inputs_embeds = self.wte(input_ids)
473
+
474
+ # kept for BC (non `Cache` `past_key_values` inputs)
475
+ return_legacy_cache = False
476
+ if use_cache and not isinstance(past_key_values, Cache):
477
+ return_legacy_cache = True
478
+ if past_key_values is None:
479
+ past_key_values = DynamicCache()
480
+ else:
481
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
482
+ logger.warning_once(
483
+ "We detected that you are passing `past_key_values` as a tuple of tuples. This is deprecated and "
484
+ "will be removed in v4.47. Please convert your cache or use an appropriate `Cache` class "
485
+ "(https://huggingface.co/docs/transformers/kv_cache#legacy-cache-format)"
486
+ )
487
+
488
+ seq_length = inputs_embeds.shape[1]
489
+ if cache_position is None:
490
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
491
+ cache_position = torch.arange(past_seen_tokens, past_seen_tokens + seq_length, device=inputs_embeds.device)
492
+
493
+ if position_ids is None:
494
+ position_ids = cache_position.unsqueeze(0)
495
+
496
+ causal_mask = self._update_causal_mask(
497
+ attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
498
+ )
499
+
500
+ # Prepare head mask if needed
501
+ # 1.0 in head_mask indicate we keep the head
502
+ # attention_probs has shape bsz x num_attention_heads x N x N
503
+ # head_mask has shape n_layer x batch x num_attention_heads x N x N
504
+ head_mask = self.get_head_mask(head_mask, self.config.n_layer)
505
+ hidden_states = inputs_embeds
506
+
507
+ if token_type_ids is not None:
508
+ token_type_ids = token_type_ids.view(-1, seq_length)
509
+ token_type_embeds = self.wte(token_type_ids)
510
+ hidden_states = hidden_states + token_type_embeds
511
+
512
+ hidden_states = self.drop(hidden_states)
513
+ output_shape = (-1, seq_length, hidden_states.size(-1))
514
+
515
+ next_decoder_cache = None
516
+ all_self_attentions = () if output_attentions else None
517
+ all_hidden_states = () if output_hidden_states else None
518
+ for i, block in enumerate(self.h):
519
+ if output_hidden_states:
520
+ all_hidden_states = all_hidden_states + (hidden_states,)
521
+
522
+ if self.gradient_checkpointing and self.training:
523
+ outputs = self._gradient_checkpointing_func(
524
+ block.__call__,
525
+ hidden_states,
526
+ None,
527
+ causal_mask,
528
+ position_ids,
529
+ head_mask[i],
530
+ use_cache,
531
+ output_attentions,
532
+ cache_position,
533
+ )
534
+ else:
535
+ outputs = block(
536
+ hidden_states=hidden_states,
537
+ layer_past=past_key_values,
538
+ attention_mask=causal_mask,
539
+ position_ids=position_ids,
540
+ head_mask=head_mask[i],
541
+ use_cache=use_cache,
542
+ output_attentions=output_attentions,
543
+ cache_position=cache_position,
544
+ )
545
+
546
+ hidden_states = outputs[0]
547
+ if use_cache is True:
548
+ next_decoder_cache = outputs[1]
549
+
550
+ if output_attentions:
551
+ all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
552
+
553
+ hidden_states = self.ln_f(hidden_states)
554
+
555
+ hidden_states = hidden_states.view(output_shape)
556
+ # Add last hidden state
557
+ if output_hidden_states:
558
+ all_hidden_states = all_hidden_states + (hidden_states,)
559
+
560
+ next_cache = next_decoder_cache if use_cache else None
561
+ if return_legacy_cache:
562
+ next_cache = next_cache.to_legacy_cache()
563
+
564
+ if not return_dict:
565
+ return tuple(
566
+ v for v in [hidden_states, next_cache, all_hidden_states, all_self_attentions] if v is not None
567
+ )
568
+
569
+ return BaseModelOutputWithPast(
570
+ last_hidden_state=hidden_states,
571
+ past_key_values=next_cache,
572
+ hidden_states=all_hidden_states,
573
+ attentions=all_self_attentions,
574
+ )
575
+
576
+ # Copied from transformers.models.llama.modeling_llama.LlamaModel._update_causal_mask
577
+ def _update_causal_mask(
578
+ self,
579
+ attention_mask: torch.Tensor,
580
+ input_tensor: torch.Tensor,
581
+ cache_position: torch.Tensor,
582
+ past_key_values: Cache,
583
+ output_attentions: bool,
584
+ ):
585
+ if self.config._attn_implementation == "flash_attention_2":
586
+ if attention_mask is not None and (attention_mask == 0.0).any():
587
+ return attention_mask
588
+ return None
589
+
590
+ # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
591
+ # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
592
+ # to infer the attention mask.
593
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
594
+ using_static_cache = isinstance(past_key_values, StaticCache)
595
+
596
+ # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
597
+ if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions:
598
+ if AttentionMaskConverter._ignore_causal_mask_sdpa(
599
+ attention_mask,
600
+ inputs_embeds=input_tensor,
601
+ past_key_values_length=past_seen_tokens,
602
+ is_training=self.training,
603
+ ):
604
+ return None
605
+
606
+ dtype, device = input_tensor.dtype, input_tensor.device
607
+ sequence_length = input_tensor.shape[1]
608
+ if using_static_cache:
609
+ target_length = past_key_values.get_max_cache_shape()
610
+ else:
611
+ target_length = (
612
+ attention_mask.shape[-1]
613
+ if isinstance(attention_mask, torch.Tensor)
614
+ else past_seen_tokens + sequence_length + 1
615
+ )
616
+
617
+ # In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
618
+ causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
619
+ attention_mask,
620
+ sequence_length=sequence_length,
621
+ target_length=target_length,
622
+ dtype=dtype,
623
+ device=device,
624
+ cache_position=cache_position,
625
+ batch_size=input_tensor.shape[0],
626
+ )
627
+
628
+ if (
629
+ self.config._attn_implementation == "sdpa"
630
+ and attention_mask is not None
631
+ and attention_mask.device.type == "cuda"
632
+ and not output_attentions
633
+ ):
634
+ # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
635
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
636
+ # Details: https://github.com/pytorch/pytorch/issues/110213
637
+ min_dtype = torch.finfo(dtype).min
638
+ causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
639
+
640
+ return causal_mask
641
+
642
+ @staticmethod
643
+ # Copied from transformers.models.llama.modeling_llama.LlamaPreTrainedModel._prepare_4d_causal_attention_mask_with_cache_position
644
+ def _prepare_4d_causal_attention_mask_with_cache_position(
645
+ attention_mask: torch.Tensor,
646
+ sequence_length: int,
647
+ target_length: int,
648
+ dtype: torch.dtype,
649
+ device: torch.device,
650
+ cache_position: torch.Tensor,
651
+ batch_size: int,
652
+ **kwargs,
653
+ ):
654
+ """
655
+ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
656
+ `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
657
+
658
+ Args:
659
+ attention_mask (`torch.Tensor`):
660
+ A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
661
+ `(batch_size, 1, query_length, key_value_length)`.
662
+ sequence_length (`int`):
663
+ The sequence length being processed.
664
+ target_length (`int`):
665
+ The target length: when generating with static cache, the mask should be as long as the static cache,
666
+ to account for the 0 padding, the part of the cache that is not filled yet.
667
+ dtype (`torch.dtype`):
668
+ The dtype to use for the 4D attention mask.
669
+ device (`torch.device`):
670
+ The device to plcae the 4D attention mask on.
671
+ cache_position (`torch.Tensor`):
672
+ Indices depicting the position of the input sequence tokens in the sequence.
673
+ batch_size (`torch.Tensor`):
674
+ Batch size.
675
+ """
676
+ if attention_mask is not None and attention_mask.dim() == 4:
677
+ # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
678
+ causal_mask = attention_mask
679
+ else:
680
+ min_dtype = torch.finfo(dtype).min
681
+ causal_mask = torch.full(
682
+ (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device
683
+ )
684
+ if sequence_length != 1:
685
+ causal_mask = torch.triu(causal_mask, diagonal=1)
686
+ causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
687
+ causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
688
+ if attention_mask is not None:
689
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
690
+ mask_length = attention_mask.shape[-1]
691
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
692
+ padding_mask = padding_mask == 0
693
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
694
+ padding_mask, min_dtype
695
+ )
696
+
697
+ return causal_mask
698
+
699
+
700
+ @add_start_docstrings(
701
+ """
702
+ The CodeGen Model transformer with a language modeling head on top.
703
+ """,
704
+ CODEGEN_START_DOCSTRING,
705
+ )
706
+ class CodeGenForCausalLM(CodeGenPreTrainedModel, GenerationMixin):
707
+ _tied_weights_keys = ["lm_head.weight"]
708
+
709
+ def __init__(self, config):
710
+ super().__init__(config)
711
+ self.transformer = CodeGenModel(config)
712
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size)
713
+
714
+ # Initialize weights and apply final processing
715
+ self.post_init()
716
+
717
+ def get_output_embeddings(self):
718
+ return self.lm_head
719
+
720
+ def set_output_embeddings(self, new_embeddings):
721
+ self.lm_head = new_embeddings
722
+
723
+ @add_start_docstrings_to_model_forward(CODEGEN_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
724
+ @add_code_sample_docstrings(
725
+ checkpoint=_CHECKPOINT_FOR_DOC,
726
+ output_type=CausalLMOutputWithPast,
727
+ config_class=_CONFIG_FOR_DOC,
728
+ )
729
+ def forward(
730
+ self,
731
+ input_ids: Optional[torch.LongTensor] = None,
732
+ past_key_values: Optional[Union[Cache, Tuple[Tuple[torch.Tensor]]]] = None,
733
+ attention_mask: Optional[torch.FloatTensor] = None,
734
+ token_type_ids: Optional[torch.LongTensor] = None,
735
+ position_ids: Optional[torch.LongTensor] = None,
736
+ head_mask: Optional[torch.FloatTensor] = None,
737
+ inputs_embeds: Optional[torch.FloatTensor] = None,
738
+ labels: Optional[torch.LongTensor] = None,
739
+ use_cache: Optional[bool] = None,
740
+ output_attentions: Optional[bool] = None,
741
+ output_hidden_states: Optional[bool] = None,
742
+ return_dict: Optional[bool] = None,
743
+ cache_position: Optional[torch.LongTensor] = None,
744
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
745
+ r"""
746
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
747
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
748
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
749
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
750
+ """
751
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
752
+
753
+ transformer_outputs = self.transformer(
754
+ input_ids,
755
+ past_key_values=past_key_values,
756
+ attention_mask=attention_mask,
757
+ token_type_ids=token_type_ids,
758
+ position_ids=position_ids,
759
+ head_mask=head_mask,
760
+ inputs_embeds=inputs_embeds,
761
+ use_cache=use_cache,
762
+ output_attentions=output_attentions,
763
+ output_hidden_states=output_hidden_states,
764
+ return_dict=return_dict,
765
+ cache_position=cache_position,
766
+ )
767
+ hidden_states = transformer_outputs[0]
768
+
769
+ # make sure sampling in fp16 works correctly and
770
+ # compute loss in fp32 to match with mesh-tf version
771
+ # https://github.com/EleutherAI/gpt-neo/blob/89ce74164da2fb16179106f54e2269b5da8db333/models/gpt2/gpt2.py#L179
772
+ lm_logits = self.lm_head(hidden_states).to(torch.float32)
773
+
774
+ loss = None
775
+ if labels is not None:
776
+ # move labels to correct device to enable model parallelism
777
+ labels = labels.to(lm_logits.device)
778
+ # Shift so that tokens < n predict n
779
+ shift_logits = lm_logits[..., :-1, :].contiguous()
780
+ shift_labels = labels[..., 1:].contiguous()
781
+ # Flatten the tokens
782
+ loss_fct = CrossEntropyLoss()
783
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
784
+
785
+ loss = loss.to(hidden_states.dtype)
786
+
787
+ if not return_dict:
788
+ output = (lm_logits,) + transformer_outputs[1:]
789
+ return ((loss,) + output) if loss is not None else output
790
+
791
+ return CausalLMOutputWithPast(
792
+ loss=loss,
793
+ logits=lm_logits,
794
+ past_key_values=transformer_outputs.past_key_values,
795
+ hidden_states=transformer_outputs.hidden_states,
796
+ attentions=transformer_outputs.attentions,
797
+ )
798
+
799
+ @staticmethod
800
+ def _reorder_cache(
801
+ past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor
802
+ ) -> Tuple[Tuple[torch.Tensor]]:
803
+ """
804
+ This function is used to re-order the `past_key_values` cache if [`~PretrainedModel.beam_search`] or
805
+ [`~PretrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
806
+ beam_idx at every generation step.
807
+ """
808
+ return tuple(
809
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
810
+ for layer_past in past_key_values
811
+ )
812
+
813
+
814
+ __all__ = ["CodeGenForCausalLM", "CodeGenModel", "CodeGenPreTrainedModel"]
janus/lib/python3.10/site-packages/transformers/models/codegen/tokenization_codegen.py ADDED
@@ -0,0 +1,419 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The Salesforce authors, The Open AI Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for CodeGen"""
16
+
17
+ import json
18
+ import os
19
+ from functools import lru_cache
20
+ from typing import TYPE_CHECKING, List, Optional, Tuple, Union
21
+
22
+ import numpy as np
23
+ import regex as re
24
+
25
+ from ...utils import is_tf_available, is_torch_available, logging, to_py_obj
26
+
27
+
28
+ if TYPE_CHECKING:
29
+ if is_torch_available():
30
+ import torch
31
+ if is_tf_available():
32
+ import tensorflow as tf
33
+
34
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer
35
+
36
+
37
+ logger = logging.get_logger(__name__)
38
+
39
+ VOCAB_FILES_NAMES = {
40
+ "vocab_file": "vocab.json",
41
+ "merges_file": "merges.txt",
42
+ }
43
+
44
+
45
+ @lru_cache()
46
+ def bytes_to_unicode():
47
+ """
48
+ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
49
+ characters the bpe code barfs on.
50
+
51
+ The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
52
+ if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
53
+ decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
54
+ tables between utf-8 bytes and unicode strings.
55
+ """
56
+ bs = (
57
+ list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
58
+ )
59
+ cs = bs[:]
60
+ n = 0
61
+ for b in range(2**8):
62
+ if b not in bs:
63
+ bs.append(b)
64
+ cs.append(2**8 + n)
65
+ n += 1
66
+ cs = [chr(n) for n in cs]
67
+ return dict(zip(bs, cs))
68
+
69
+
70
+ def get_pairs(word):
71
+ """
72
+ Return set of symbol pairs in a word.
73
+
74
+ Word is represented as tuple of symbols (symbols being variable-length strings).
75
+ """
76
+ pairs = set()
77
+ prev_char = word[0]
78
+ for char in word[1:]:
79
+ pairs.add((prev_char, char))
80
+ prev_char = char
81
+ return pairs
82
+
83
+
84
+ class CodeGenTokenizer(PreTrainedTokenizer):
85
+ """
86
+ Construct a CodeGen tokenizer. Based on byte-level Byte-Pair-Encoding.
87
+
88
+ This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
89
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
90
+
91
+ ```python
92
+ >>> from transformers import CodeGenTokenizer
93
+
94
+ >>> tokenizer = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono")
95
+ >>> tokenizer("Hello world")["input_ids"]
96
+ [15496, 995]
97
+
98
+ >>> tokenizer(" Hello world")["input_ids"]
99
+ [18435, 995]
100
+ ```
101
+
102
+ You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
103
+ call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
104
+
105
+ <Tip>
106
+
107
+ When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one).
108
+
109
+ </Tip>
110
+
111
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
112
+ this superclass for more information regarding those methods.
113
+
114
+ Args:
115
+ vocab_file (`str`):
116
+ Path to the vocabulary file.
117
+ merges_file (`str`):
118
+ Path to the merges file.
119
+ errors (`str`, *optional*, defaults to `"replace"`):
120
+ Paradigm to follow when decoding bytes to UTF-8. See
121
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
122
+ unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
123
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
124
+ token instead.
125
+ bos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
126
+ The beginning of sequence token.
127
+ eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
128
+ The end of sequence token.
129
+ pad_token (`str`, *optional*):
130
+ The token used for padding, for example when batching sequences of different lengths.
131
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
132
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
133
+ other word. (CodeGen tokenizer detect beginning of words by the preceding space).
134
+ add_bos_token (`bool`, *optional*, defaults to `False`):
135
+ Whether to add a beginning of sequence token at the start of sequences.
136
+ return_token_type_ids (`bool`, *optional*, defaults to `False`):
137
+ Whether to return token type IDs.
138
+ """
139
+
140
+ vocab_files_names = VOCAB_FILES_NAMES
141
+ model_input_names = ["input_ids", "attention_mask"]
142
+
143
+ def __init__(
144
+ self,
145
+ vocab_file,
146
+ merges_file,
147
+ errors="replace",
148
+ unk_token="<|endoftext|>",
149
+ bos_token="<|endoftext|>",
150
+ eos_token="<|endoftext|>",
151
+ pad_token=None,
152
+ add_prefix_space=False,
153
+ add_bos_token=False,
154
+ return_token_type_ids=False,
155
+ **kwargs,
156
+ ):
157
+ bos_token = AddedToken(bos_token, special=True) if isinstance(bos_token, str) else bos_token
158
+ eos_token = AddedToken(eos_token, special=True) if isinstance(eos_token, str) else eos_token
159
+ unk_token = AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token
160
+ pad_token = AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token
161
+ self.add_bos_token = add_bos_token
162
+ self.return_token_type_ids = return_token_type_ids
163
+ if self.return_token_type_ids:
164
+ self.model_input_names.append("token_type_ids")
165
+
166
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
167
+ self.encoder = json.load(vocab_handle)
168
+ self.decoder = {v: k for k, v in self.encoder.items()}
169
+ self.errors = errors # how to handle errors in decoding
170
+ self.byte_encoder = bytes_to_unicode()
171
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
172
+ with open(merges_file, encoding="utf-8") as merges_handle:
173
+ bpe_merges = merges_handle.read().split("\n")[1:-1]
174
+ bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
175
+ self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
176
+ self.cache = {}
177
+ self.add_prefix_space = add_prefix_space
178
+
179
+ # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
180
+ self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
181
+ super().__init__(
182
+ errors=errors,
183
+ unk_token=unk_token,
184
+ bos_token=bos_token,
185
+ eos_token=eos_token,
186
+ pad_token=pad_token,
187
+ add_prefix_space=add_prefix_space,
188
+ add_bos_token=add_bos_token,
189
+ return_token_type_ids=return_token_type_ids,
190
+ **kwargs,
191
+ )
192
+
193
+ @property
194
+ def vocab_size(self):
195
+ return len(self.encoder)
196
+
197
+ def get_vocab(self):
198
+ return dict(self.encoder, **self.added_tokens_encoder)
199
+
200
+ def bpe(self, token):
201
+ if token in self.cache:
202
+ return self.cache[token]
203
+ word = tuple(token)
204
+ pairs = get_pairs(word)
205
+
206
+ if not pairs:
207
+ return token
208
+
209
+ while True:
210
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
211
+ if bigram not in self.bpe_ranks:
212
+ break
213
+ first, second = bigram
214
+ new_word = []
215
+ i = 0
216
+ while i < len(word):
217
+ try:
218
+ j = word.index(first, i)
219
+ except ValueError:
220
+ new_word.extend(word[i:])
221
+ break
222
+ else:
223
+ new_word.extend(word[i:j])
224
+ i = j
225
+
226
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
227
+ new_word.append(first + second)
228
+ i += 2
229
+ else:
230
+ new_word.append(word[i])
231
+ i += 1
232
+ new_word = tuple(new_word)
233
+ word = new_word
234
+ if len(word) == 1:
235
+ break
236
+ else:
237
+ pairs = get_pairs(word)
238
+ word = " ".join(word)
239
+ self.cache[token] = word
240
+ return word
241
+
242
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
243
+ if self.add_bos_token:
244
+ bos_token_ids = [self.bos_token_id]
245
+ else:
246
+ bos_token_ids = []
247
+
248
+ output = bos_token_ids + token_ids_0
249
+
250
+ if token_ids_1 is None:
251
+ return output
252
+
253
+ return output + bos_token_ids + token_ids_1
254
+
255
+ def _tokenize(self, text):
256
+ """Tokenize a string."""
257
+ bpe_tokens = []
258
+ for token in re.findall(self.pat, text):
259
+ token = "".join(
260
+ self.byte_encoder[b] for b in token.encode("utf-8")
261
+ ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
262
+ bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
263
+ return bpe_tokens
264
+
265
+ def _convert_token_to_id(self, token):
266
+ """Converts a token (str) in an id using the vocab."""
267
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
268
+
269
+ def _convert_id_to_token(self, index):
270
+ """Converts an index (integer) in a token (str) using the vocab."""
271
+ return self.decoder.get(index)
272
+
273
+ def convert_tokens_to_string(self, tokens):
274
+ """Converts a sequence of tokens (string) in a single string."""
275
+ text = "".join(tokens)
276
+ text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
277
+ return text
278
+
279
+ def create_token_type_ids_from_sequences(
280
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
281
+ ) -> List[int]:
282
+ """
283
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A sequence
284
+ pair mask has the following format:
285
+
286
+ ```
287
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
288
+ | first sequence | second sequence |
289
+ ```
290
+
291
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
292
+
293
+ Args:
294
+ token_ids_0 (`List[int]`):
295
+ List of IDs.
296
+ token_ids_1 (`List[int]`, *optional*):
297
+ Optional second list of IDs for sequence pairs.
298
+
299
+ Returns:
300
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
301
+ """
302
+ sep = [self.sep_token_id] if self.sep_token_id is not None else []
303
+ cls = [self.cls_token_id] if self.sep_token_id is not None else []
304
+ if token_ids_1 is None:
305
+ return len(cls + token_ids_0 + sep) * [0]
306
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
307
+
308
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
309
+ if not os.path.isdir(save_directory):
310
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
311
+ return
312
+ vocab_file = os.path.join(
313
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
314
+ )
315
+ merge_file = os.path.join(
316
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
317
+ )
318
+
319
+ with open(vocab_file, "w", encoding="utf-8") as f:
320
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
321
+
322
+ index = 0
323
+ with open(merge_file, "w", encoding="utf-8") as writer:
324
+ writer.write("#version: 0.2\n")
325
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
326
+ if index != token_index:
327
+ logger.warning(
328
+ f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
329
+ " Please check that the tokenizer is not corrupted!"
330
+ )
331
+ index = token_index
332
+ writer.write(" ".join(bpe_tokens) + "\n")
333
+ index += 1
334
+
335
+ return vocab_file, merge_file
336
+
337
+ def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
338
+ add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space)
339
+ if is_split_into_words or add_prefix_space:
340
+ text = " " + text
341
+ return (text, kwargs)
342
+
343
+ def decode(
344
+ self,
345
+ token_ids: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"],
346
+ skip_special_tokens: bool = False,
347
+ clean_up_tokenization_spaces: bool = None,
348
+ truncate_before_pattern: Optional[List[str]] = None,
349
+ **kwargs,
350
+ ) -> str:
351
+ """
352
+ Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special
353
+ tokens and clean up tokenization spaces.
354
+
355
+ Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`.
356
+
357
+ Args:
358
+ token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`):
359
+ List of tokenized input ids. Can be obtained using the `__call__` method.
360
+ skip_special_tokens (`bool`, *optional*, defaults to `False`):
361
+ Whether or not to remove special tokens in the decoding.
362
+ clean_up_tokenization_spaces (`bool`, *optional*):
363
+ Whether or not to clean up the tokenization spaces. If `None`, will default to
364
+ `self.clean_up_tokenization_spaces` (available in the `tokenizer_config`).
365
+ truncate_before_pattern (`List[str]`, *optional*, defaults to `None`):
366
+ A list of regular expression strings that will be used to truncate the returned string. This can be
367
+ used to remove extra pieces of code (e.g. truncate if observing a comment symbol "#" at the beginning
368
+ of a new line). An example pattern could be `["^#", re.escape("<|endoftext|>"), "^'''", "\n\n\n"]`.
369
+ kwargs (additional keyword arguments, *optional*):
370
+ Will be passed to the underlying model specific decode method.
371
+
372
+ Returns:
373
+ `str`: The decoded sentence.
374
+ """
375
+
376
+ token_ids = to_py_obj(token_ids)
377
+
378
+ decoded_text = super()._decode(
379
+ token_ids=token_ids,
380
+ skip_special_tokens=skip_special_tokens,
381
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
382
+ **kwargs,
383
+ )
384
+
385
+ if truncate_before_pattern is not None and len(truncate_before_pattern) > 0:
386
+ decoded_text = self.truncate(decoded_text, truncate_before_pattern)
387
+
388
+ return decoded_text
389
+
390
+ def truncate(self, completion, truncate_before_pattern):
391
+ def find_re(string, pattern, start_pos):
392
+ m = pattern.search(string, start_pos)
393
+ return m.start() if m else -1
394
+
395
+ terminals = [re.compile(pattern, re.MULTILINE) for pattern in truncate_before_pattern]
396
+
397
+ prints = list(re.finditer("^print", completion, re.MULTILINE))
398
+
399
+ if len(prints) > 1:
400
+ completion = completion[: prints[1].start()]
401
+
402
+ defs = list(re.finditer("^def", completion, re.MULTILINE))
403
+
404
+ if len(defs) > 1:
405
+ completion = completion[: defs[1].start()]
406
+
407
+ start_pos = 0
408
+
409
+ terminals_pos = [
410
+ pos for pos in [find_re(completion, terminal, start_pos) for terminal in terminals] if pos != -1
411
+ ]
412
+
413
+ if len(terminals_pos) > 0:
414
+ return completion[: min(terminals_pos)]
415
+ else:
416
+ return completion
417
+
418
+
419
+ __all__ = ["CodeGenTokenizer"]
janus/lib/python3.10/site-packages/transformers/models/codegen/tokenization_codegen_fast.py ADDED
@@ -0,0 +1,265 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The Salesforce authors, The Open AI Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for OpenAI GPT."""
16
+
17
+ import re
18
+ from typing import TYPE_CHECKING, List, Optional, Tuple, Union
19
+
20
+ import numpy as np
21
+
22
+ from ...utils import is_tf_available, is_torch_available, logging
23
+
24
+
25
+ if TYPE_CHECKING:
26
+ if is_torch_available():
27
+ import torch
28
+ if is_tf_available():
29
+ import tensorflow as tf
30
+
31
+
32
+ from ...tokenization_utils_base import BatchEncoding
33
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
34
+ from .tokenization_codegen import CodeGenTokenizer
35
+
36
+
37
+ logger = logging.get_logger(__name__)
38
+
39
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
40
+
41
+
42
+ class CodeGenTokenizerFast(PreTrainedTokenizerFast):
43
+ """
44
+ Construct a "fast" CodeGen tokenizer (backed by HuggingFace's *tokenizers* library). Based on byte-level
45
+ Byte-Pair-Encoding.
46
+
47
+ This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
48
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
49
+
50
+ ```python
51
+ >>> from transformers import CodeGenTokenizerFast
52
+
53
+ >>> tokenizer = CodeGenTokenizerFast.from_pretrained("Salesforce/codegen-350M-mono")
54
+ >>> tokenizer("Hello world")["input_ids"]
55
+ [15496, 995]
56
+
57
+ >>> tokenizer(" Hello world")["input_ids"]
58
+ [18435, 995]
59
+ ```
60
+
61
+ You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer, but since
62
+ the model was not pretrained this way, it might yield a decrease in performance.
63
+
64
+ <Tip>
65
+
66
+ When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`.
67
+
68
+ </Tip>
69
+
70
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
71
+ refer to this superclass for more information regarding those methods.
72
+
73
+ Args:
74
+ vocab_file (`str`, *optional*):
75
+ Path to the vocabulary file.
76
+ merges_file (`str`, *optional*):
77
+ Path to the merges file.
78
+ tokenizer_file (`str`, *optional*):
79
+ Path to [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that
80
+ contains everything needed to load the tokenizer.
81
+ unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
82
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
83
+ token instead.
84
+ bos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
85
+ The beginning of sequence token.
86
+ eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
87
+ The end of sequence token.
88
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
89
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
90
+ other word. (CodeGen tokenizer detect beginning of words by the preceding space).
91
+ return_token_type_ids (`bool`, *optional*, defaults to `False`):
92
+ Whether to return token type IDs.
93
+ """
94
+
95
+ vocab_files_names = VOCAB_FILES_NAMES
96
+ model_input_names = ["input_ids", "attention_mask"]
97
+ slow_tokenizer_class = CodeGenTokenizer
98
+
99
+ def __init__(
100
+ self,
101
+ vocab_file=None,
102
+ merges_file=None,
103
+ tokenizer_file=None,
104
+ unk_token="<|endoftext|>",
105
+ bos_token="<|endoftext|>",
106
+ eos_token="<|endoftext|>",
107
+ add_prefix_space=False,
108
+ return_token_type_ids=False,
109
+ **kwargs,
110
+ ):
111
+ self.return_token_type_ids = return_token_type_ids
112
+ if self.return_token_type_ids:
113
+ self.model_input_names.append("token_type_ids")
114
+
115
+ super().__init__(
116
+ vocab_file,
117
+ merges_file,
118
+ tokenizer_file=tokenizer_file,
119
+ unk_token=unk_token,
120
+ bos_token=bos_token,
121
+ eos_token=eos_token,
122
+ add_prefix_space=add_prefix_space,
123
+ return_token_type_ids=return_token_type_ids,
124
+ **kwargs,
125
+ )
126
+
127
+ if kwargs.pop("add_bos_token", False):
128
+ model_id = kwargs.pop("name_or_path", "")
129
+ raise ValueError(
130
+ "Currenty GPT2's fast tokenizer does NOT support adding a BOS token. "
131
+ "Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"
132
+ f"`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"
133
+ f"`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"
134
+ "This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."
135
+ " so that the fast tokenizer works correctly."
136
+ )
137
+
138
+ def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding:
139
+ is_split_into_words = kwargs.get("is_split_into_words", False)
140
+ assert self.add_prefix_space or not is_split_into_words, (
141
+ f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
142
+ "to use it with pretokenized inputs."
143
+ )
144
+
145
+ return super()._batch_encode_plus(*args, **kwargs)
146
+
147
+ def _encode_plus(self, *args, **kwargs) -> BatchEncoding:
148
+ is_split_into_words = kwargs.get("is_split_into_words", False)
149
+
150
+ assert self.add_prefix_space or not is_split_into_words, (
151
+ f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
152
+ "to use it with pretokenized inputs."
153
+ )
154
+
155
+ return super()._encode_plus(*args, **kwargs)
156
+
157
+ # Copied from transformers.models.codegen.tokenization_codegen.CodeGenTokenizer.create_token_type_ids_from_sequences
158
+ def create_token_type_ids_from_sequences(
159
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
160
+ ) -> List[int]:
161
+ """
162
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A sequence
163
+ pair mask has the following format:
164
+
165
+ ```
166
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
167
+ | first sequence | second sequence |
168
+ ```
169
+
170
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
171
+
172
+ Args:
173
+ token_ids_0 (`List[int]`):
174
+ List of IDs.
175
+ token_ids_1 (`List[int]`, *optional*):
176
+ Optional second list of IDs for sequence pairs.
177
+
178
+ Returns:
179
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
180
+ """
181
+ sep = [self.sep_token_id] if self.sep_token_id is not None else []
182
+ cls = [self.cls_token_id] if self.sep_token_id is not None else []
183
+ if token_ids_1 is None:
184
+ return len(cls + token_ids_0 + sep) * [0]
185
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
186
+
187
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
188
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
189
+ return tuple(files)
190
+
191
+ def decode(
192
+ self,
193
+ token_ids: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"],
194
+ skip_special_tokens: bool = False,
195
+ clean_up_tokenization_spaces: bool = None,
196
+ truncate_before_pattern: Optional[List[str]] = None,
197
+ **kwargs,
198
+ ) -> str:
199
+ """
200
+ Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special
201
+ tokens and clean up tokenization spaces.
202
+
203
+ Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`.
204
+
205
+ Args:
206
+ token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`):
207
+ List of tokenized input ids. Can be obtained using the `__call__` method.
208
+ skip_special_tokens (`bool`, *optional*, defaults to `False`):
209
+ Whether or not to remove special tokens in the decoding.
210
+ clean_up_tokenization_spaces (`bool`, *optional*):
211
+ Whether or not to clean up the tokenization spaces. If `None`, will default to
212
+ `self.clean_up_tokenization_spaces` (available in the `tokenizer_config`).
213
+ truncate_before_pattern (`List[str]`, *optional*, defaults to `None`):
214
+ A list of regular expression strings that will be used to truncate the returned string. This can be
215
+ used to remove extra pieces of code (e.g. truncate if observing a comment symbol "#" at the beginning
216
+ of a new line). An example pattern could be `["^#", re.escape("<|endoftext|>"), "^'''", "\n\n\n"]`.
217
+ kwargs (additional keyword arguments, *optional*):
218
+ Will be passed to the underlying model specific decode method.
219
+
220
+ Returns:
221
+ `str`: The decoded sentence.
222
+ """
223
+
224
+ decoded_text = super().decode(
225
+ token_ids=token_ids,
226
+ skip_special_tokens=skip_special_tokens,
227
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
228
+ **kwargs,
229
+ )
230
+
231
+ if truncate_before_pattern is not None and len(truncate_before_pattern) > 0:
232
+ decoded_text = self.truncate(decoded_text, truncate_before_pattern)
233
+
234
+ return decoded_text
235
+
236
+ def truncate(self, completion, truncate_before_pattern):
237
+ def find_re(string, pattern, start_pos):
238
+ m = pattern.search(string, start_pos)
239
+ return m.start() if m else -1
240
+
241
+ terminals = [re.compile(pattern, re.MULTILINE) for pattern in truncate_before_pattern]
242
+
243
+ prints = list(re.finditer("^print", completion, re.MULTILINE))
244
+
245
+ if len(prints) > 1:
246
+ completion = completion[: prints[1].start()]
247
+
248
+ defs = list(re.finditer("^def", completion, re.MULTILINE))
249
+
250
+ if len(defs) > 1:
251
+ completion = completion[: defs[1].start()]
252
+
253
+ start_pos = 0
254
+
255
+ terminals_pos = [
256
+ pos for pos in [find_re(completion, terminal, start_pos) for terminal in terminals] if pos != -1
257
+ ]
258
+
259
+ if len(terminals_pos) > 0:
260
+ return completion[: min(terminals_pos)]
261
+ else:
262
+ return completion
263
+
264
+
265
+ __all__ = ["CodeGenTokenizerFast"]
janus/lib/python3.10/site-packages/transformers/models/cohere2/__init__.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Cohere and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import _LazyModule
17
+ from ...utils.import_utils import define_import_structure
18
+
19
+
20
+ if TYPE_CHECKING:
21
+ from .configuration_cohere2 import *
22
+ from .modeling_cohere2 import *
23
+ else:
24
+ import sys
25
+
26
+ _file = globals()["__file__"]
27
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
janus/lib/python3.10/site-packages/transformers/models/cohere2/__pycache__/modeling_cohere2.cpython-310.pyc ADDED
Binary file (31.2 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/cohere2/__pycache__/modular_cohere2.cpython-310.pyc ADDED
Binary file (20 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/cohere2/configuration_cohere2.py ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
2
+ # This file was automatically generated from src/transformers/models/cohere2/modular_cohere2.py.
3
+ # Do NOT edit this file manually as any edits will be overwritten by the generation of
4
+ # the file from the modular. If any change should be done, please apply the change to the
5
+ # modular_cohere2.py file directly. One of our CI enforces this.
6
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
7
+ # coding=utf-8
8
+ # Copyright 2024 Cohere Inc. HuggingFace Inc. team. All rights reserved.
9
+ #
10
+ #
11
+ # Licensed under the Apache License, Version 2.0 (the "License");
12
+ # you may not use this file except in compliance with the License.
13
+ # You may obtain a copy of the License at
14
+ #
15
+ # http://www.apache.org/licenses/LICENSE-2.0
16
+ #
17
+ # Unless required by applicable law or agreed to in writing, software
18
+ # distributed under the License is distributed on an "AS IS" BASIS,
19
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
20
+ # See the License for the specific language governing permissions and
21
+ # limitations under the License.
22
+ from ...configuration_utils import PretrainedConfig
23
+ from ...modeling_rope_utils import rope_config_validation
24
+
25
+
26
+ class Cohere2Config(PretrainedConfig):
27
+ r"""
28
+ This is the configuration class to store the configuration of a [`CohereModel`]. It is used to instantiate an Cohere
29
+ model according to the specified arguments, defining the model architecture.
30
+
31
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
32
+ documentation from [`PretrainedConfig`] for more information. Instantiating a configuration
33
+ with the defaults will yield a similar configuration to that of the [CohereForAI/c4ai-command-r-v01](https://huggingface.co/CohereForAI/c4ai-command-r-v01) model.
34
+
35
+
36
+ Args:
37
+ vocab_size (`int`, *optional*, defaults to 256000):
38
+ Vocabulary size of the Cohere model. Defines the number of different tokens that can be represented by the
39
+ `inputs_ids` passed when calling [`CohereModel`]
40
+ hidden_size (`int`, *optional*, defaults to 8192):
41
+ Dimension of the hidden representations.
42
+ intermediate_size (`int`, *optional*, defaults to 22528):
43
+ Dimension of the MLP representations.
44
+ logit_scale (`float`, *optional*, defaults to 0.0625):
45
+ The scaling factor for the output logits.
46
+ num_hidden_layers (`int`, *optional*, defaults to 40):
47
+ Number of hidden layers in the Transformer decoder.
48
+ num_attention_heads (`int`, *optional*, defaults to 64):
49
+ Number of attention heads for each attention layer in the Transformer decoder.
50
+ num_key_value_heads (`int`, *optional*):
51
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
52
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
53
+ `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
54
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
55
+ by meanpooling all the original heads within that group. For more details checkout [this
56
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
57
+ `num_attention_heads`.
58
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
59
+ The non-linear activation function (function or string) in the decoder.
60
+ max_position_embeddings (`int`, *optional*, defaults to 8192):
61
+ The maximum sequence length that this model might ever be used with.
62
+ initializer_range (`float`, *optional*, defaults to 0.02):
63
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
64
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
65
+ The epsilon used by the layer normalization.
66
+ use_cache (`bool`, *optional*, defaults to `True`):
67
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
68
+ relevant if `config.is_decoder=True`.
69
+ pad_token_id (`int`, *optional*, defaults to 0):
70
+ Padding token id.
71
+ bos_token_id (`int`, *optional*, defaults to 5):
72
+ Beginning of stream token id.
73
+ eos_token_id (`int`, *optional*, defaults to 255001):
74
+ End of stream token id.
75
+ tie_word_embeddings (`bool`, *optional*, defaults to `True`):
76
+ Whether to tie weight embeddings
77
+ rope_theta (`float`, *optional*, defaults to 10000.0):
78
+ The base period of the RoPE embeddings.
79
+ rope_scaling (`Dict`, *optional*):
80
+ Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
81
+ and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
82
+ accordingly.
83
+ Expected contents:
84
+ `rope_type` (`str`):
85
+ The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
86
+ 'llama3'], with 'default' being the original RoPE implementation.
87
+ `factor` (`float`, *optional*):
88
+ Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
89
+ most scaling types, a `factor` of x will enable the model to handle sequences of length x *
90
+ original maximum pre-trained length.
91
+ `original_max_position_embeddings` (`int`, *optional*):
92
+ Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
93
+ pretraining.
94
+ `attention_factor` (`float`, *optional*):
95
+ Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
96
+ computation. If unspecified, it defaults to value recommended by the implementation, using the
97
+ `factor` field to infer the suggested value.
98
+ `beta_fast` (`float`, *optional*):
99
+ Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
100
+ ramp function. If unspecified, it defaults to 32.
101
+ `beta_slow` (`float`, *optional*):
102
+ Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
103
+ ramp function. If unspecified, it defaults to 1.
104
+ `short_factor` (`List[float]`, *optional*):
105
+ Only used with 'longrope'. The scaling factor to be applied to short contexts (<
106
+ `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
107
+ size divided by the number of attention heads divided by 2
108
+ `long_factor` (`List[float]`, *optional*):
109
+ Only used with 'longrope'. The scaling factor to be applied to long contexts (<
110
+ `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
111
+ size divided by the number of attention heads divided by 2
112
+ `low_freq_factor` (`float`, *optional*):
113
+ Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
114
+ `high_freq_factor` (`float`, *optional*):
115
+ Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
116
+ attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
117
+ Whether to use a bias in the query, key, value and output projection layers during self-attention.
118
+ attention_dropout (`float`, *optional*, defaults to 0.0):
119
+ The dropout ratio for the attention probabilities.
120
+ sliding_window (`int`, *optional*, defaults to 4096):
121
+ Size of the sliding window attention context.
122
+ sliding_window_pattern (`int`, *optional*, defaults to 4):
123
+ Pattern for the sliding window attention.
124
+ cache_implementation (`str`, *optional*, defaults to `"hybrid"`): the cache type to be used with `generate`.
125
+
126
+ ```python
127
+ >>> from transformers import Cohere2Model, Cohere2Config
128
+
129
+ >>> # Initializing a Cohere Nextmodel configuration
130
+ >>> configuration = Cohere2Config()
131
+
132
+ >>> # Initializing a model from the Cohere2 configuration
133
+ >>> model = Cohere2Model(configuration) # doctest: +SKIP
134
+
135
+ >>> # Accessing the model configuration
136
+ >>> configuration = model.config # doctest: +SKIP
137
+ ```
138
+ """
139
+
140
+ model_type = "cohere2"
141
+ keys_to_ignore_at_inference = ["past_key_values"]
142
+
143
+ def __init__(
144
+ self,
145
+ vocab_size=256000,
146
+ hidden_size=8192,
147
+ intermediate_size=22528,
148
+ logit_scale=0.0625,
149
+ num_hidden_layers=40,
150
+ num_attention_heads=64,
151
+ num_key_value_heads=None,
152
+ hidden_act="silu",
153
+ max_position_embeddings=8192,
154
+ initializer_range=0.02,
155
+ layer_norm_eps=1e-5,
156
+ use_cache=True,
157
+ pad_token_id=0,
158
+ bos_token_id=5,
159
+ eos_token_id=255001,
160
+ tie_word_embeddings=True,
161
+ rope_theta=10000.0,
162
+ rope_scaling=None,
163
+ attention_bias=False,
164
+ attention_dropout=0.0,
165
+ sliding_window=4096,
166
+ sliding_window_pattern=4,
167
+ cache_implementation="hybrid",
168
+ **kwargs,
169
+ ):
170
+ self.vocab_size = vocab_size
171
+ self.max_position_embeddings = max_position_embeddings
172
+ self.hidden_size = hidden_size
173
+ self.logit_scale = logit_scale
174
+ self.intermediate_size = intermediate_size
175
+ self.num_hidden_layers = num_hidden_layers
176
+ self.num_attention_heads = num_attention_heads
177
+
178
+ # for backward compatibility
179
+ if num_key_value_heads is None:
180
+ num_key_value_heads = num_attention_heads
181
+
182
+ self.num_key_value_heads = num_key_value_heads
183
+ self.hidden_act = hidden_act
184
+ self.initializer_range = initializer_range
185
+ self.layer_norm_eps = layer_norm_eps
186
+ self.use_cache = use_cache
187
+ self.rope_theta = rope_theta
188
+ self.rope_scaling = rope_scaling
189
+ self.attention_bias = attention_bias
190
+ self.attention_dropout = attention_dropout
191
+ self.sliding_window = sliding_window
192
+ self.sliding_window_pattern = sliding_window_pattern
193
+ # Need to specify head_dim in the config so it can be used in the attention forward functions
194
+ self.head_dim = hidden_size // num_attention_heads
195
+ self.cache_implementation = cache_implementation
196
+
197
+ # Validate the correctness of rotary position embeddings parameters
198
+ rope_config_validation(self)
199
+
200
+ super().__init__(
201
+ pad_token_id=pad_token_id,
202
+ bos_token_id=bos_token_id,
203
+ eos_token_id=eos_token_id,
204
+ tie_word_embeddings=tie_word_embeddings,
205
+ **kwargs,
206
+ )
207
+
208
+
209
+ __all__ = ["Cohere2Config"]
janus/lib/python3.10/site-packages/transformers/models/cohere2/modeling_cohere2.py ADDED
@@ -0,0 +1,948 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
2
+ # This file was automatically generated from src/transformers/models/cohere2/modular_cohere2.py.
3
+ # Do NOT edit this file manually as any edits will be overwritten by the generation of
4
+ # the file from the modular. If any change should be done, please apply the change to the
5
+ # modular_cohere2.py file directly. One of our CI enforces this.
6
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
7
+ # coding=utf-8
8
+ # Copyright 2024 Cohere Inc. HuggingFace Inc. team. All rights reserved.
9
+ #
10
+ #
11
+ # Licensed under the Apache License, Version 2.0 (the "License");
12
+ # you may not use this file except in compliance with the License.
13
+ # You may obtain a copy of the License at
14
+ #
15
+ # http://www.apache.org/licenses/LICENSE-2.0
16
+ #
17
+ # Unless required by applicable law or agreed to in writing, software
18
+ # distributed under the License is distributed on an "AS IS" BASIS,
19
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
20
+ # See the License for the specific language governing permissions and
21
+ # limitations under the License.
22
+ from typing import Callable, List, Optional, Tuple, Union
23
+
24
+ import torch
25
+ import torch.nn as nn
26
+
27
+ from ...activations import ACT2FN
28
+ from ...cache_utils import Cache, HybridCache
29
+ from ...generation import GenerationMixin
30
+ from ...modeling_flash_attention_utils import FlashAttentionKwargs
31
+ from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
32
+ from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS
33
+ from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
34
+ from ...processing_utils import Unpack
35
+ from ...utils import (
36
+ LossKwargs,
37
+ add_start_docstrings,
38
+ add_start_docstrings_to_model_forward,
39
+ logging,
40
+ replace_return_docstrings,
41
+ )
42
+ from .configuration_cohere2 import Cohere2Config
43
+
44
+
45
+ logger = logging.get_logger(__name__)
46
+ _CONFIG_FOR_DOC = "Cohere2Config"
47
+
48
+
49
+ class Cohere2RotaryEmbedding(nn.Module):
50
+ def __init__(self, config: Cohere2Config, device=None):
51
+ super().__init__()
52
+ # BC: "rope_type" was originally "type"
53
+ if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
54
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
55
+ else:
56
+ self.rope_type = "default"
57
+ self.max_seq_len_cached = config.max_position_embeddings
58
+ self.original_max_seq_len = config.max_position_embeddings
59
+
60
+ self.config = config
61
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
62
+
63
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
64
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
65
+ self.original_inv_freq = self.inv_freq
66
+
67
+ def _dynamic_frequency_update(self, position_ids, device):
68
+ """
69
+ dynamic RoPE layers should recompute `inv_freq` in the following situations:
70
+ 1 - growing beyond the cached sequence length (allow scaling)
71
+ 2 - the current sequence length is in the original scale (avoid losing precision with small sequences)
72
+ """
73
+ seq_len = torch.max(position_ids) + 1
74
+ if seq_len > self.max_seq_len_cached: # growth
75
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, seq_len=seq_len)
76
+ self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation
77
+ self.max_seq_len_cached = seq_len
78
+
79
+ if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset
80
+ self.register_buffer("inv_freq", self.original_inv_freq, persistent=False)
81
+ self.max_seq_len_cached = self.original_max_seq_len
82
+
83
+ @torch.no_grad()
84
+ def forward(self, x, position_ids):
85
+ if "dynamic" in self.rope_type:
86
+ self._dynamic_frequency_update(position_ids, device=x.device)
87
+
88
+ # Core RoPE block
89
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
90
+ position_ids_expanded = position_ids[:, None, :].float()
91
+ # Force float32 (see https://github.com/huggingface/transformers/pull/29285)
92
+ device_type = x.device.type
93
+ device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
94
+ with torch.autocast(device_type=device_type, enabled=False):
95
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
96
+ emb = torch.repeat_interleave(freqs, 2, dim=-1) # diff from Llama: we interleave() instead of cat()
97
+ cos = emb.cos()
98
+ sin = emb.sin()
99
+
100
+ # Advanced RoPE types (e.g. yarn) apply a post-processing scaling factor, equivalent to scaling attention
101
+ cos = cos * self.attention_scaling
102
+ sin = sin * self.attention_scaling
103
+
104
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
105
+
106
+
107
+ class Cohere2LayerNorm(nn.Module):
108
+ def __init__(self, hidden_size=None, eps=1e-5, bias=False):
109
+ """The hidden size can be a tuple or an int. The tuple is used for QKNorm to normalize across head_dim"""
110
+ super().__init__()
111
+ self.weight = nn.Parameter(torch.ones(hidden_size))
112
+ self.variance_epsilon = eps
113
+
114
+ def forward(self, hidden_states):
115
+ input_dtype = hidden_states.dtype
116
+ hidden_states = hidden_states.to(torch.float32)
117
+ mean = hidden_states.mean(-1, keepdim=True)
118
+ variance = (hidden_states - mean).pow(2).mean(-1, keepdim=True)
119
+ hidden_states = (hidden_states - mean) * torch.rsqrt(variance + self.variance_epsilon)
120
+ hidden_states = self.weight.to(torch.float32) * hidden_states
121
+ return hidden_states.to(input_dtype)
122
+
123
+
124
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
125
+ """
126
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
127
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
128
+ """
129
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
130
+ if n_rep == 1:
131
+ return hidden_states
132
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
133
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
134
+
135
+
136
+ def eager_attention_forward(
137
+ module: nn.Module,
138
+ query: torch.Tensor,
139
+ key: torch.Tensor,
140
+ value: torch.Tensor,
141
+ attention_mask: Optional[torch.Tensor],
142
+ scaling: float,
143
+ dropout: float = 0.0,
144
+ **kwargs,
145
+ ):
146
+ key_states = repeat_kv(key, module.num_key_value_groups)
147
+ value_states = repeat_kv(value, module.num_key_value_groups)
148
+
149
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
150
+ if attention_mask is not None:
151
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
152
+ attn_weights = attn_weights + causal_mask
153
+
154
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
155
+ attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
156
+ attn_output = torch.matmul(attn_weights, value_states)
157
+ attn_output = attn_output.transpose(1, 2).contiguous()
158
+
159
+ return attn_output, attn_weights
160
+
161
+
162
+ def rotate_half(x):
163
+ # Split and rotate. Note that this function is different from e.g. Llama.
164
+ x1 = x[..., ::2]
165
+ x2 = x[..., 1::2]
166
+ rot_x = torch.stack([-x2, x1], dim=-1).flatten(-2)
167
+ return rot_x
168
+
169
+
170
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
171
+ """Applies Rotary Position Embedding to the query and key tensors.
172
+
173
+ Args:
174
+ q (`torch.Tensor`): The query tensor.
175
+ k (`torch.Tensor`): The key tensor.
176
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
177
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
178
+ position_ids (`torch.Tensor`, *optional*):
179
+ Deprecated and unused.
180
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
181
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
182
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
183
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
184
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
185
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
186
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
187
+ Returns:
188
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
189
+ """
190
+ dtype = q.dtype
191
+ q = q.float()
192
+ k = k.float()
193
+ cos = cos.unsqueeze(unsqueeze_dim)
194
+ sin = sin.unsqueeze(unsqueeze_dim)
195
+ q_embed = (q * cos) + (rotate_half(q) * sin)
196
+ k_embed = (k * cos) + (rotate_half(k) * sin)
197
+ return q_embed.to(dtype=dtype), k_embed.to(dtype=dtype)
198
+
199
+
200
+ class Cohere2Attention(nn.Module):
201
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
202
+
203
+ def __init__(self, config: Cohere2Config, layer_idx: Optional[int] = None):
204
+ super().__init__()
205
+ self.config = config
206
+ self.layer_idx = layer_idx
207
+ self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
208
+ self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
209
+ self.scaling = self.head_dim**-0.5
210
+ self.attention_dropout = config.attention_dropout
211
+ self.is_causal = True
212
+
213
+ self.q_proj = nn.Linear(
214
+ config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
215
+ )
216
+ self.k_proj = nn.Linear(
217
+ config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
218
+ )
219
+ self.v_proj = nn.Linear(
220
+ config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
221
+ )
222
+ self.o_proj = nn.Linear(
223
+ config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
224
+ )
225
+ self.sliding_window = (
226
+ config.sliding_window if (self.layer_idx + 1) % self.config.sliding_window_pattern != 0 else None
227
+ )
228
+
229
+ def forward(
230
+ self,
231
+ hidden_states: torch.Tensor,
232
+ position_embeddings: Tuple[torch.Tensor, torch.Tensor],
233
+ attention_mask: Optional[torch.Tensor],
234
+ past_key_value: Optional[Cache] = None,
235
+ cache_position: Optional[torch.LongTensor] = None,
236
+ **kwargs: Unpack[FlashAttentionKwargs],
237
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
238
+ input_shape = hidden_states.shape[:-1]
239
+ hidden_shape = (*input_shape, -1, self.head_dim)
240
+
241
+ query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
242
+ key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
243
+ value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
244
+
245
+ cos, sin = position_embeddings
246
+ if self.sliding_window is not None:
247
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
248
+
249
+ if past_key_value is not None:
250
+ cache_kwargs = {
251
+ "sin": sin,
252
+ "cos": cos,
253
+ "sliding_window": self.sliding_window,
254
+ "cache_position": cache_position,
255
+ }
256
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
257
+
258
+ attention_interface: Callable = eager_attention_forward
259
+ if self.config._attn_implementation != "eager":
260
+ if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False):
261
+ logger.warning_once(
262
+ "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to "
263
+ 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
264
+ )
265
+ else:
266
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
267
+
268
+ attn_output, attn_weights = attention_interface(
269
+ self,
270
+ query_states,
271
+ key_states,
272
+ value_states,
273
+ attention_mask,
274
+ dropout=0.0 if not self.training else self.attention_dropout,
275
+ scaling=self.scaling,
276
+ sliding_window=self.sliding_window,
277
+ **kwargs,
278
+ )
279
+
280
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
281
+ attn_output = self.o_proj(attn_output)
282
+ return attn_output, attn_weights
283
+
284
+
285
+ class Cohere2MLP(nn.Module):
286
+ def __init__(self, config):
287
+ super().__init__()
288
+ self.config = config
289
+ self.hidden_size = config.hidden_size
290
+ self.intermediate_size = config.intermediate_size
291
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
292
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
293
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
294
+ self.act_fn = ACT2FN[config.hidden_act]
295
+
296
+ def forward(self, x):
297
+ down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
298
+ return down_proj
299
+
300
+
301
+ class Cohere2DecoderLayer(nn.Module):
302
+ def __init__(self, config: Cohere2Config, layer_idx: int):
303
+ super().__init__()
304
+ self.hidden_size = config.hidden_size
305
+ self.self_attn = Cohere2Attention(config, layer_idx)
306
+ self.mlp = Cohere2MLP(config)
307
+ self.input_layernorm = Cohere2LayerNorm(hidden_size=(config.hidden_size), eps=config.layer_norm_eps)
308
+ self.config = config
309
+ self.is_sliding = (layer_idx + 1) % self.config.sliding_window_pattern != 0
310
+ self.sliding_window = config.sliding_window
311
+
312
+ def forward(
313
+ self,
314
+ hidden_states: torch.Tensor,
315
+ position_embeddings: Tuple[torch.Tensor, torch.Tensor],
316
+ attention_mask: Optional[torch.Tensor] = None,
317
+ past_key_value: Optional[Cache] = None,
318
+ output_attentions: Optional[bool] = False,
319
+ use_cache: Optional[bool] = False,
320
+ cache_position: Optional[torch.LongTensor] = None,
321
+ **kwargs: Unpack[FlashAttentionKwargs],
322
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
323
+ """
324
+ Args:
325
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
326
+ position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`):
327
+ Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
328
+ with `head_dim` being the embedding dimension of each attention head.
329
+ attention_mask (`torch.FloatTensor`, *optional*):
330
+ attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
331
+ query_sequence_length, key_sequence_length)` if default attention is used.
332
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
333
+ output_attentions (`bool`, *optional*):
334
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
335
+ returned tensors for more detail.
336
+ use_cache (`bool`, *optional*):
337
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
338
+ (see `past_key_values`).
339
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
340
+ Indices depicting the position of the input sequence tokens in the sequence
341
+ """
342
+
343
+ if self.is_sliding and attention_mask is not None: # efficient SDPA and no padding
344
+ # Flash-attn is a 2D tensor
345
+ if self.config._attn_implementation == "flash_attention_2":
346
+ if past_key_value is not None: # when decoding
347
+ attention_mask = attention_mask[:, -self.sliding_window :]
348
+ else:
349
+ min_dtype = torch.finfo(hidden_states.dtype).min
350
+ sliding_window_mask = torch.tril(
351
+ torch.ones_like(attention_mask, dtype=torch.bool), diagonal=-self.sliding_window
352
+ )
353
+ attention_mask = torch.where(sliding_window_mask, min_dtype, attention_mask)
354
+ if attention_mask.shape[-1] <= 1: # when decoding
355
+ attention_mask = attention_mask[:, :, :, -self.sliding_window :]
356
+
357
+ residual = hidden_states
358
+
359
+ hidden_states = self.input_layernorm(hidden_states)
360
+
361
+ # Self Attention
362
+ hidden_states_attention, self_attn_weights = self.self_attn(
363
+ hidden_states=hidden_states,
364
+ position_embeddings=position_embeddings,
365
+ attention_mask=attention_mask,
366
+ past_key_value=past_key_value,
367
+ output_attentions=output_attentions,
368
+ use_cache=use_cache,
369
+ cache_position=cache_position,
370
+ **kwargs,
371
+ )
372
+
373
+ # Fully Connected
374
+ hidden_states_mlp = self.mlp(hidden_states)
375
+
376
+ # Add everything together
377
+ hidden_states = residual + hidden_states_attention + hidden_states_mlp
378
+
379
+ outputs = (hidden_states,)
380
+
381
+ if output_attentions:
382
+ outputs += (self_attn_weights,)
383
+
384
+ return outputs
385
+
386
+
387
+ COHERE2_START_DOCSTRING = r"""
388
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
389
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
390
+ etc.)
391
+
392
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
393
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
394
+ and behavior.
395
+
396
+ Parameters:
397
+ config ([`Cohere2Config`]):
398
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
399
+ load the weights associated with the model, only the configuration. Check out the
400
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
401
+ """
402
+
403
+
404
+ @add_start_docstrings(
405
+ "The bare Cohere2 Model outputting raw hidden-states without any specific head on top.",
406
+ COHERE2_START_DOCSTRING,
407
+ )
408
+ class Cohere2PreTrainedModel(PreTrainedModel):
409
+ config_class = Cohere2Config
410
+ base_model_prefix = "model"
411
+ supports_gradient_checkpointing = True
412
+ _no_split_modules = ["Cohere2DecoderLayer"]
413
+ _skip_keys_device_placement = ["past_key_values"]
414
+ _supports_flash_attn_2 = True
415
+ _supports_sdpa = True
416
+ _supports_flex_attn = True
417
+ _supports_cache_class = True
418
+ _supports_quantized_cache = True
419
+ _supports_static_cache = True
420
+
421
+ def _init_weights(self, module):
422
+ std = self.config.initializer_range
423
+ if isinstance(module, nn.Linear):
424
+ module.weight.data.normal_(mean=0.0, std=std)
425
+ if module.bias is not None:
426
+ module.bias.data.zero_()
427
+ elif isinstance(module, nn.Embedding):
428
+ module.weight.data.normal_(mean=0.0, std=std)
429
+ if module.padding_idx is not None:
430
+ module.weight.data[module.padding_idx].zero_()
431
+
432
+
433
+ COHERE2_INPUTS_DOCSTRING = r"""
434
+ Args:
435
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
436
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
437
+ it.
438
+
439
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
440
+ [`PreTrainedTokenizer.__call__`] for details.
441
+
442
+ [What are input IDs?](../glossary#input-ids)
443
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
444
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
445
+
446
+ - 1 for tokens that are **not masked**,
447
+ - 0 for tokens that are **masked**.
448
+
449
+ [What are attention masks?](../glossary#attention-mask)
450
+
451
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
452
+ [`PreTrainedTokenizer.__call__`] for details.
453
+
454
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
455
+ `past_key_values`).
456
+
457
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
458
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
459
+ information on the default strategy.
460
+
461
+ - 1 indicates the head is **not masked**,
462
+ - 0 indicates the head is **masked**.
463
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
464
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
465
+ config.n_positions - 1]`.
466
+
467
+ [What are position IDs?](../glossary#position-ids)
468
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
469
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
470
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
471
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
472
+
473
+ Two formats are allowed:
474
+ - a [`~cache_utils.Cache`] instance, see our
475
+ [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache);
476
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
477
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
478
+ cache format.
479
+
480
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
481
+ legacy cache format will be returned.
482
+
483
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
484
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
485
+ of shape `(batch_size, sequence_length)`.
486
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
487
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
488
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
489
+ model's internal embedding lookup matrix.
490
+ use_cache (`bool`, *optional*):
491
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
492
+ `past_key_values`).
493
+ output_attentions (`bool`, *optional*):
494
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
495
+ tensors for more detail.
496
+ output_hidden_states (`bool`, *optional*):
497
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
498
+ more detail.
499
+ return_dict (`bool`, *optional*):
500
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
501
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
502
+ Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
503
+ this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
504
+ the complete sequence length.
505
+ """
506
+
507
+
508
+ @add_start_docstrings(
509
+ "The bare Cohere2 Model outputting raw hidden-states without any specific head on top.",
510
+ COHERE2_START_DOCSTRING,
511
+ )
512
+ class Cohere2Model(Cohere2PreTrainedModel):
513
+ """
514
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Cohere2DecoderLayer`]
515
+ Args:
516
+ config: Cohere2Config
517
+ """
518
+
519
+ def __init__(self, config: Cohere2Config):
520
+ super().__init__(config)
521
+ self.padding_idx = config.pad_token_id
522
+ self.vocab_size = config.vocab_size
523
+
524
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
525
+ self.layers = nn.ModuleList(
526
+ [Cohere2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
527
+ )
528
+ self.norm = Cohere2LayerNorm(hidden_size=(config.hidden_size), eps=config.layer_norm_eps)
529
+ self.rotary_emb = Cohere2RotaryEmbedding(config=config)
530
+ self.gradient_checkpointing = False
531
+
532
+ # Initialize weights and apply final processing
533
+ self.post_init()
534
+
535
+ def get_input_embeddings(self):
536
+ return self.embed_tokens
537
+
538
+ def set_input_embeddings(self, value):
539
+ self.embed_tokens = value
540
+
541
+ @add_start_docstrings_to_model_forward(COHERE2_INPUTS_DOCSTRING)
542
+ def forward(
543
+ self,
544
+ input_ids: torch.LongTensor = None,
545
+ attention_mask: Optional[torch.Tensor] = None,
546
+ position_ids: Optional[torch.LongTensor] = None,
547
+ past_key_values: Optional[HybridCache] = None,
548
+ inputs_embeds: Optional[torch.FloatTensor] = None,
549
+ use_cache: Optional[bool] = None,
550
+ output_attentions: Optional[bool] = None,
551
+ output_hidden_states: Optional[bool] = None,
552
+ return_dict: Optional[bool] = None,
553
+ cache_position: Optional[torch.LongTensor] = None,
554
+ **flash_attn_kwargs: Unpack[FlashAttentionKwargs],
555
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
556
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
557
+ output_hidden_states = (
558
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
559
+ )
560
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
561
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
562
+
563
+ if (input_ids is None) ^ (inputs_embeds is not None):
564
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
565
+
566
+ if self.gradient_checkpointing and self.training and use_cache:
567
+ logger.warning_once(
568
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
569
+ )
570
+ use_cache = False
571
+
572
+ if inputs_embeds is None:
573
+ inputs_embeds = self.embed_tokens(input_ids)
574
+
575
+ if use_cache and past_key_values is None and not self.training:
576
+ batch_size, seq_len, _ = inputs_embeds.shape
577
+ past_key_values = HybridCache(
578
+ self.config,
579
+ batch_size=batch_size,
580
+ max_cache_len=seq_len,
581
+ device=self.device,
582
+ dtype=inputs_embeds.dtype,
583
+ )
584
+
585
+ if cache_position is None:
586
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
587
+ cache_position = torch.arange(
588
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
589
+ )
590
+ if position_ids is None:
591
+ position_ids = cache_position.unsqueeze(0)
592
+
593
+ causal_mask = self._update_causal_mask(
594
+ attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
595
+ )
596
+ hidden_states = inputs_embeds
597
+
598
+ # create position embeddings to be shared across the decoder layers
599
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
600
+
601
+ # decoder layers
602
+ all_hidden_states = () if output_hidden_states else None
603
+ all_self_attns = () if output_attentions else None
604
+
605
+ for decoder_layer in self.layers:
606
+ if output_hidden_states:
607
+ all_hidden_states += (hidden_states,)
608
+
609
+ if self.gradient_checkpointing and self.training:
610
+ layer_outputs = self._gradient_checkpointing_func(
611
+ decoder_layer.__call__,
612
+ hidden_states,
613
+ position_embeddings,
614
+ causal_mask,
615
+ past_key_values,
616
+ output_attentions,
617
+ use_cache,
618
+ cache_position,
619
+ )
620
+ else:
621
+ layer_outputs = decoder_layer(
622
+ hidden_states,
623
+ position_embeddings=position_embeddings,
624
+ attention_mask=causal_mask,
625
+ past_key_value=past_key_values,
626
+ output_attentions=output_attentions,
627
+ use_cache=use_cache,
628
+ cache_position=cache_position,
629
+ **flash_attn_kwargs,
630
+ )
631
+
632
+ hidden_states = layer_outputs[0]
633
+
634
+ if output_attentions:
635
+ all_self_attns += (layer_outputs[1],)
636
+
637
+ hidden_states = self.norm(hidden_states)
638
+
639
+ # add hidden states from the last decoder layer
640
+ if output_hidden_states:
641
+ all_hidden_states += (hidden_states,)
642
+
643
+ output = BaseModelOutputWithPast(
644
+ last_hidden_state=hidden_states,
645
+ past_key_values=past_key_values,
646
+ hidden_states=all_hidden_states,
647
+ attentions=all_self_attns,
648
+ )
649
+ return output if return_dict else output.to_tuple()
650
+
651
+ @torch.no_grad()
652
+ def _update_causal_mask(
653
+ self,
654
+ attention_mask: torch.Tensor,
655
+ input_tensor: torch.Tensor,
656
+ cache_position: torch.Tensor,
657
+ past_key_values: HybridCache,
658
+ output_attentions: bool,
659
+ ):
660
+ # Flash Attention currently doesn't support static cache but Cohere2 work only with static cache.
661
+ # So we will pass in attention mask as is in any case, not only when ther's padding. Then we'll use its shape
662
+ # to cut out keys/values trailing 0 used in static cache. This workaround should be compile compatible
663
+ # as it doesn't cause dynamic control issues.
664
+ if self.config._attn_implementation == "flash_attention_2":
665
+ return attention_mask
666
+
667
+ dtype, device = input_tensor.dtype, input_tensor.device
668
+ sequence_length = input_tensor.shape[1]
669
+ if isinstance(past_key_values, HybridCache):
670
+ target_length = past_key_values.get_max_cache_shape()
671
+ else:
672
+ target_length = attention_mask.shape[-1] if attention_mask is not None else input_tensor.shape[1]
673
+
674
+ # In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
675
+ causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
676
+ attention_mask,
677
+ sequence_length=sequence_length,
678
+ target_length=target_length,
679
+ dtype=dtype,
680
+ device=device,
681
+ cache_position=cache_position,
682
+ batch_size=input_tensor.shape[0],
683
+ )
684
+ return causal_mask
685
+
686
+ @staticmethod
687
+ def _prepare_4d_causal_attention_mask_with_cache_position(
688
+ attention_mask: torch.Tensor,
689
+ sequence_length: int,
690
+ target_length: int,
691
+ dtype: torch.dtype,
692
+ device: torch.device,
693
+ cache_position: torch.Tensor,
694
+ batch_size: int,
695
+ **kwargs,
696
+ ):
697
+ """
698
+ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
699
+ `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
700
+
701
+ Args:
702
+ attention_mask (`torch.Tensor`):
703
+ A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
704
+ `(batch_size, 1, query_length, key_value_length)`.
705
+ sequence_length (`int`):
706
+ The sequence length being processed.
707
+ target_length (`int`):
708
+ The target length: when generating with static cache, the mask should be as long as the static cache,
709
+ to account for the 0 padding, the part of the cache that is not filled yet.
710
+ dtype (`torch.dtype`):
711
+ The dtype to use for the 4D attention mask.
712
+ device (`torch.device`):
713
+ The device to plcae the 4D attention mask on.
714
+ cache_position (`torch.Tensor`):
715
+ Indices depicting the position of the input sequence tokens in the sequence.
716
+ batch_size (`torch.Tensor`):
717
+ Batch size.
718
+ """
719
+ if attention_mask is not None and attention_mask.dim() == 4:
720
+ # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
721
+ causal_mask = attention_mask
722
+ else:
723
+ min_dtype = torch.finfo(dtype).min
724
+ causal_mask = torch.full(
725
+ (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device
726
+ )
727
+ if sequence_length != 1:
728
+ causal_mask = torch.triu(causal_mask, diagonal=1)
729
+ causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
730
+ causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
731
+ if attention_mask is not None:
732
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
733
+ mask_length = attention_mask.shape[-1]
734
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
735
+ padding_mask = padding_mask == 0
736
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
737
+ padding_mask, min_dtype
738
+ )
739
+
740
+ return causal_mask
741
+
742
+
743
+ class KwargsForCausalLM(FlashAttentionKwargs, LossKwargs): ...
744
+
745
+
746
+ class Cohere2ForCausalLM(Cohere2PreTrainedModel, GenerationMixin):
747
+ _tied_weights_keys = ["lm_head.weight"]
748
+ _tp_plan = {"lm_head": "colwise_rep"}
749
+
750
+ def __init__(self, config: Cohere2Config):
751
+ super().__init__(config)
752
+ self.model = Cohere2Model(config)
753
+ self.vocab_size = config.vocab_size
754
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
755
+ self.logit_scale = config.logit_scale
756
+ self.tie_word_embeddings = config.tie_word_embeddings
757
+
758
+ # Initialize weights and apply final processing
759
+ self.post_init()
760
+
761
+ def get_input_embeddings(self):
762
+ return self.model.embed_tokens
763
+
764
+ def set_input_embeddings(self, value):
765
+ self.model.embed_tokens = value
766
+
767
+ def get_output_embeddings(self):
768
+ return self.lm_head
769
+
770
+ def set_output_embeddings(self, new_embeddings):
771
+ self.lm_head = new_embeddings
772
+
773
+ def set_decoder(self, decoder):
774
+ self.model = decoder
775
+
776
+ def get_decoder(self):
777
+ return self.model
778
+
779
+ @add_start_docstrings_to_model_forward(COHERE2_INPUTS_DOCSTRING)
780
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
781
+ def forward(
782
+ self,
783
+ input_ids: torch.LongTensor = None,
784
+ attention_mask: Optional[torch.Tensor] = None,
785
+ position_ids: Optional[torch.LongTensor] = None,
786
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
787
+ inputs_embeds: Optional[torch.FloatTensor] = None,
788
+ labels: Optional[torch.LongTensor] = None,
789
+ use_cache: Optional[bool] = None,
790
+ output_attentions: Optional[bool] = None,
791
+ output_hidden_states: Optional[bool] = None,
792
+ return_dict: Optional[bool] = None,
793
+ cache_position: Optional[torch.LongTensor] = None,
794
+ num_logits_to_keep: int = 0,
795
+ **kwargs: Unpack[KwargsForCausalLM],
796
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
797
+ r"""
798
+ Args:
799
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
800
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
801
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
802
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
803
+
804
+ num_logits_to_keep (`int`, *optional*):
805
+ Calculate logits for the last `num_logits_to_keep` tokens. If `0`, calculate logits for all
806
+ `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
807
+ token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
808
+
809
+ Returns:
810
+
811
+ Example:
812
+
813
+ ```python
814
+ >> from transformers import AutoTokenizer, Cohere2ForCausalLM
815
+
816
+ >> model = Cohere2ForCausalLM.from_pretrained("Cohere2ForAI/c4ai-command-r-v01")
817
+ >> tokenizer = AutoTokenizer.from_pretrained("Cohere2ForAI/c4ai-command-r-v01")
818
+
819
+ >> prompt = "Hey, are you conscious? Can you talk to me?"
820
+ >> inputs = tokenizer(prompt, return_tensors="pt")
821
+
822
+ >> # Generate
823
+ >> generate_ids = model.generate(inputs.input_ids, max_length=30)
824
+ >> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
825
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
826
+ ```"""
827
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
828
+ output_hidden_states = (
829
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
830
+ )
831
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
832
+
833
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
834
+ outputs = self.model(
835
+ input_ids=input_ids,
836
+ attention_mask=attention_mask,
837
+ position_ids=position_ids,
838
+ past_key_values=past_key_values,
839
+ inputs_embeds=inputs_embeds,
840
+ use_cache=use_cache,
841
+ output_attentions=output_attentions,
842
+ output_hidden_states=output_hidden_states,
843
+ return_dict=return_dict,
844
+ cache_position=cache_position,
845
+ **kwargs,
846
+ )
847
+
848
+ hidden_states = outputs[0]
849
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
850
+ logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :])
851
+ logits = logits * self.logit_scale # main diff from Llama
852
+
853
+ loss = None
854
+ if labels is not None:
855
+ loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
856
+
857
+ if not return_dict:
858
+ output = (logits,) + outputs[1:]
859
+ return (loss,) + output if loss is not None else output
860
+
861
+ return CausalLMOutputWithPast(
862
+ loss=loss,
863
+ logits=logits,
864
+ past_key_values=outputs.past_key_values,
865
+ hidden_states=outputs.hidden_states,
866
+ attentions=outputs.attentions,
867
+ )
868
+
869
+ def prepare_inputs_for_generation(
870
+ self,
871
+ input_ids,
872
+ past_key_values=None,
873
+ attention_mask=None,
874
+ inputs_embeds=None,
875
+ cache_position=None,
876
+ position_ids=None,
877
+ use_cache=True,
878
+ num_logits_to_keep=None,
879
+ **kwargs,
880
+ ):
881
+ # Overwritten: has a special cache type, `HybridCache`
882
+
883
+ # If we have cache: let's slice `input_ids` through `cache_position`, to keep only the unprocessed tokens
884
+ # Exception 1: when passing input_embeds, input_ids may be missing entries
885
+ # Exception 2: some generation methods do special slicing of input_ids, so we don't need to do it here
886
+ if past_key_values is not None:
887
+ if inputs_embeds is not None: # Exception 1
888
+ input_ids = input_ids[:, -cache_position.shape[0] :]
889
+ elif input_ids.shape[1] != cache_position.shape[0]: # Default case (the "else", a no op, is Exception 2)
890
+ input_ids = input_ids[:, cache_position]
891
+ if attention_mask is not None and position_ids is None:
892
+ # create position_ids on the fly for batch generation
893
+ position_ids = attention_mask.long().cumsum(-1) - 1
894
+ position_ids.masked_fill_(attention_mask == 0, 1)
895
+ if past_key_values:
896
+ position_ids = position_ids[:, -input_ids.shape[1] :]
897
+ # This `clone` call is needed to avoid recapturing cuda graphs with `torch.compile`'s
898
+ # `mode="reduce-overhead`, as otherwise the input `position_ids` would have various stride
899
+ # during the decoding. Here, simply using `.contiguous()` is not sufficient as in the
900
+ # batch size = 1 case, `position_ids` is already contiguous but with varying stride
901
+ # which retriggers a capture.
902
+ position_ids = position_ids.clone(memory_format=torch.contiguous_format)
903
+
904
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
905
+ if inputs_embeds is not None and cache_position[0] == 0:
906
+ model_inputs = {"inputs_embeds": inputs_embeds, "input_ids": None}
907
+ else:
908
+ # The clone here is for the same reason as for `position_ids`.
909
+ model_inputs = {"input_ids": input_ids.clone(memory_format=torch.contiguous_format), "inputs_embeds": None}
910
+
911
+ if (
912
+ isinstance(past_key_values, HybridCache)
913
+ and attention_mask.ndim == 2
914
+ and not self.config._attn_implementation == "flash_attention_2"
915
+ ):
916
+ if model_inputs["inputs_embeds"] is not None:
917
+ batch_size, sequence_length, _ = model_inputs["inputs_embeds"].shape
918
+ device = model_inputs["inputs_embeds"].device
919
+ else:
920
+ batch_size, sequence_length = model_inputs["input_ids"].shape
921
+ device = model_inputs["input_ids"].device
922
+
923
+ attention_mask = self.model._prepare_4d_causal_attention_mask_with_cache_position(
924
+ attention_mask,
925
+ sequence_length=sequence_length,
926
+ target_length=past_key_values.get_max_cache_shape(),
927
+ dtype=self.lm_head.weight.dtype,
928
+ device=device,
929
+ cache_position=cache_position,
930
+ batch_size=batch_size,
931
+ )
932
+
933
+ if num_logits_to_keep is not None:
934
+ model_inputs["num_logits_to_keep"] = num_logits_to_keep
935
+
936
+ model_inputs.update(
937
+ {
938
+ "position_ids": position_ids,
939
+ "cache_position": cache_position,
940
+ "past_key_values": past_key_values,
941
+ "use_cache": use_cache,
942
+ "attention_mask": attention_mask,
943
+ }
944
+ )
945
+ return model_inputs
946
+
947
+
948
+ __all__ = ["Cohere2ForCausalLM", "Cohere2Model", "Cohere2PreTrainedModel"]
janus/lib/python3.10/site-packages/transformers/models/cohere2/modular_cohere2.py ADDED
@@ -0,0 +1,618 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 Cohere Inc. HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ from typing import Callable, Optional, Tuple, Union
17
+
18
+ import torch
19
+ import torch.nn as nn
20
+ import torch.utils.checkpoint
21
+
22
+ from ...cache_utils import Cache, HybridCache
23
+ from ...configuration_utils import PretrainedConfig
24
+ from ...modeling_flash_attention_utils import FlashAttentionKwargs
25
+ from ...modeling_outputs import (
26
+ BaseModelOutputWithPast,
27
+ )
28
+ from ...modeling_rope_utils import rope_config_validation
29
+ from ...modeling_utils import ALL_ATTENTION_FUNCTIONS
30
+ from ...processing_utils import Unpack
31
+ from ...utils import (
32
+ logging,
33
+ )
34
+ from ..cohere.modeling_cohere import (
35
+ CohereAttention,
36
+ CohereDecoderLayer,
37
+ CohereForCausalLM,
38
+ CohereLayerNorm,
39
+ CoherePreTrainedModel,
40
+ CohereRotaryEmbedding,
41
+ apply_rotary_pos_emb,
42
+ eager_attention_forward,
43
+ )
44
+ from ..gemma2.modeling_gemma2 import Gemma2Model
45
+
46
+
47
+ logger = logging.get_logger(__name__)
48
+
49
+
50
+ class Cohere2Config(PretrainedConfig):
51
+ r"""
52
+ This is the configuration class to store the configuration of a [`CohereModel`]. It is used to instantiate an Cohere
53
+ model according to the specified arguments, defining the model architecture.
54
+
55
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
56
+ documentation from [`PretrainedConfig`] for more information. Instantiating a configuration
57
+ with the defaults will yield a similar configuration to that of the [CohereForAI/c4ai-command-r-v01](https://huggingface.co/CohereForAI/c4ai-command-r-v01) model.
58
+
59
+
60
+ Args:
61
+ vocab_size (`int`, *optional*, defaults to 256000):
62
+ Vocabulary size of the Cohere model. Defines the number of different tokens that can be represented by the
63
+ `inputs_ids` passed when calling [`CohereModel`]
64
+ hidden_size (`int`, *optional*, defaults to 8192):
65
+ Dimension of the hidden representations.
66
+ intermediate_size (`int`, *optional*, defaults to 22528):
67
+ Dimension of the MLP representations.
68
+ logit_scale (`float`, *optional*, defaults to 0.0625):
69
+ The scaling factor for the output logits.
70
+ num_hidden_layers (`int`, *optional*, defaults to 40):
71
+ Number of hidden layers in the Transformer decoder.
72
+ num_attention_heads (`int`, *optional*, defaults to 64):
73
+ Number of attention heads for each attention layer in the Transformer decoder.
74
+ num_key_value_heads (`int`, *optional*):
75
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
76
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
77
+ `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
78
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
79
+ by meanpooling all the original heads within that group. For more details checkout [this
80
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
81
+ `num_attention_heads`.
82
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
83
+ The non-linear activation function (function or string) in the decoder.
84
+ max_position_embeddings (`int`, *optional*, defaults to 8192):
85
+ The maximum sequence length that this model might ever be used with.
86
+ initializer_range (`float`, *optional*, defaults to 0.02):
87
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
88
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
89
+ The epsilon used by the layer normalization.
90
+ use_cache (`bool`, *optional*, defaults to `True`):
91
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
92
+ relevant if `config.is_decoder=True`.
93
+ pad_token_id (`int`, *optional*, defaults to 0):
94
+ Padding token id.
95
+ bos_token_id (`int`, *optional*, defaults to 5):
96
+ Beginning of stream token id.
97
+ eos_token_id (`int`, *optional*, defaults to 255001):
98
+ End of stream token id.
99
+ tie_word_embeddings (`bool`, *optional*, defaults to `True`):
100
+ Whether to tie weight embeddings
101
+ rope_theta (`float`, *optional*, defaults to 10000.0):
102
+ The base period of the RoPE embeddings.
103
+ rope_scaling (`Dict`, *optional*):
104
+ Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
105
+ and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
106
+ accordingly.
107
+ Expected contents:
108
+ `rope_type` (`str`):
109
+ The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
110
+ 'llama3'], with 'default' being the original RoPE implementation.
111
+ `factor` (`float`, *optional*):
112
+ Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
113
+ most scaling types, a `factor` of x will enable the model to handle sequences of length x *
114
+ original maximum pre-trained length.
115
+ `original_max_position_embeddings` (`int`, *optional*):
116
+ Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
117
+ pretraining.
118
+ `attention_factor` (`float`, *optional*):
119
+ Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
120
+ computation. If unspecified, it defaults to value recommended by the implementation, using the
121
+ `factor` field to infer the suggested value.
122
+ `beta_fast` (`float`, *optional*):
123
+ Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
124
+ ramp function. If unspecified, it defaults to 32.
125
+ `beta_slow` (`float`, *optional*):
126
+ Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
127
+ ramp function. If unspecified, it defaults to 1.
128
+ `short_factor` (`List[float]`, *optional*):
129
+ Only used with 'longrope'. The scaling factor to be applied to short contexts (<
130
+ `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
131
+ size divided by the number of attention heads divided by 2
132
+ `long_factor` (`List[float]`, *optional*):
133
+ Only used with 'longrope'. The scaling factor to be applied to long contexts (<
134
+ `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
135
+ size divided by the number of attention heads divided by 2
136
+ `low_freq_factor` (`float`, *optional*):
137
+ Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
138
+ `high_freq_factor` (`float`, *optional*):
139
+ Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
140
+ attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
141
+ Whether to use a bias in the query, key, value and output projection layers during self-attention.
142
+ attention_dropout (`float`, *optional*, defaults to 0.0):
143
+ The dropout ratio for the attention probabilities.
144
+ sliding_window (`int`, *optional*, defaults to 4096):
145
+ Size of the sliding window attention context.
146
+ sliding_window_pattern (`int`, *optional*, defaults to 4):
147
+ Pattern for the sliding window attention.
148
+ cache_implementation (`str`, *optional*, defaults to `"hybrid"`): the cache type to be used with `generate`.
149
+
150
+ ```python
151
+ >>> from transformers import Cohere2Model, Cohere2Config
152
+
153
+ >>> # Initializing a Cohere Nextmodel configuration
154
+ >>> configuration = Cohere2Config()
155
+
156
+ >>> # Initializing a model from the Cohere2 configuration
157
+ >>> model = Cohere2Model(configuration) # doctest: +SKIP
158
+
159
+ >>> # Accessing the model configuration
160
+ >>> configuration = model.config # doctest: +SKIP
161
+ ```
162
+ """
163
+
164
+ model_type = "cohere2"
165
+ keys_to_ignore_at_inference = ["past_key_values"]
166
+
167
+ def __init__(
168
+ self,
169
+ vocab_size=256000,
170
+ hidden_size=8192,
171
+ intermediate_size=22528,
172
+ logit_scale=0.0625,
173
+ num_hidden_layers=40,
174
+ num_attention_heads=64,
175
+ num_key_value_heads=None,
176
+ hidden_act="silu",
177
+ max_position_embeddings=8192,
178
+ initializer_range=0.02,
179
+ layer_norm_eps=1e-5,
180
+ use_cache=True,
181
+ pad_token_id=0,
182
+ bos_token_id=5,
183
+ eos_token_id=255001,
184
+ tie_word_embeddings=True,
185
+ rope_theta=10000.0,
186
+ rope_scaling=None,
187
+ attention_bias=False,
188
+ attention_dropout=0.0,
189
+ sliding_window=4096,
190
+ sliding_window_pattern=4,
191
+ cache_implementation="hybrid",
192
+ **kwargs,
193
+ ):
194
+ self.vocab_size = vocab_size
195
+ self.max_position_embeddings = max_position_embeddings
196
+ self.hidden_size = hidden_size
197
+ self.logit_scale = logit_scale
198
+ self.intermediate_size = intermediate_size
199
+ self.num_hidden_layers = num_hidden_layers
200
+ self.num_attention_heads = num_attention_heads
201
+
202
+ # for backward compatibility
203
+ if num_key_value_heads is None:
204
+ num_key_value_heads = num_attention_heads
205
+
206
+ self.num_key_value_heads = num_key_value_heads
207
+ self.hidden_act = hidden_act
208
+ self.initializer_range = initializer_range
209
+ self.layer_norm_eps = layer_norm_eps
210
+ self.use_cache = use_cache
211
+ self.rope_theta = rope_theta
212
+ self.rope_scaling = rope_scaling
213
+ self.attention_bias = attention_bias
214
+ self.attention_dropout = attention_dropout
215
+ self.sliding_window = sliding_window
216
+ self.sliding_window_pattern = sliding_window_pattern
217
+ # Need to specify head_dim in the config so it can be used in the attention forward functions
218
+ self.head_dim = hidden_size // num_attention_heads
219
+ self.cache_implementation = cache_implementation
220
+
221
+ # Validate the correctness of rotary position embeddings parameters
222
+ rope_config_validation(self)
223
+
224
+ super().__init__(
225
+ pad_token_id=pad_token_id,
226
+ bos_token_id=bos_token_id,
227
+ eos_token_id=eos_token_id,
228
+ tie_word_embeddings=tie_word_embeddings,
229
+ **kwargs,
230
+ )
231
+
232
+
233
+ class Cohere2RotaryEmbedding(CohereRotaryEmbedding):
234
+ pass
235
+
236
+
237
+ class Cohere2LayerNorm(CohereLayerNorm):
238
+ pass
239
+
240
+
241
+ class Cohere2Attention(CohereAttention, nn.Module):
242
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
243
+
244
+ def __init__(self, config: Cohere2Config, layer_idx: Optional[int] = None):
245
+ nn.Module.__init__()
246
+ self.config = config
247
+ self.layer_idx = layer_idx
248
+ self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
249
+ self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
250
+ self.scaling = self.head_dim**-0.5
251
+ self.attention_dropout = config.attention_dropout
252
+ self.is_causal = True
253
+
254
+ self.q_proj = nn.Linear(
255
+ config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
256
+ )
257
+ self.k_proj = nn.Linear(
258
+ config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
259
+ )
260
+ self.v_proj = nn.Linear(
261
+ config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
262
+ )
263
+ self.o_proj = nn.Linear(
264
+ config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
265
+ )
266
+ self.sliding_window = (
267
+ config.sliding_window if (self.layer_idx + 1) % self.config.sliding_window_pattern != 0 else None
268
+ )
269
+
270
+ def forward(
271
+ self,
272
+ hidden_states: torch.Tensor,
273
+ position_embeddings: Tuple[torch.Tensor, torch.Tensor],
274
+ attention_mask: Optional[torch.Tensor],
275
+ past_key_value: Optional[Cache] = None,
276
+ cache_position: Optional[torch.LongTensor] = None,
277
+ **kwargs: Unpack[FlashAttentionKwargs],
278
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
279
+ input_shape = hidden_states.shape[:-1]
280
+ hidden_shape = (*input_shape, -1, self.head_dim)
281
+
282
+ query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
283
+ key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
284
+ value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
285
+
286
+ cos, sin = position_embeddings
287
+ if self.sliding_window is not None:
288
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
289
+
290
+ if past_key_value is not None:
291
+ cache_kwargs = {
292
+ "sin": sin,
293
+ "cos": cos,
294
+ "sliding_window": self.sliding_window,
295
+ "cache_position": cache_position,
296
+ }
297
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
298
+
299
+ attention_interface: Callable = eager_attention_forward
300
+ if self.config._attn_implementation != "eager":
301
+ if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False):
302
+ logger.warning_once(
303
+ "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to "
304
+ 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
305
+ )
306
+ else:
307
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
308
+
309
+ attn_output, attn_weights = attention_interface(
310
+ self,
311
+ query_states,
312
+ key_states,
313
+ value_states,
314
+ attention_mask,
315
+ dropout=0.0 if not self.training else self.attention_dropout,
316
+ scaling=self.scaling,
317
+ sliding_window=self.sliding_window,
318
+ **kwargs,
319
+ )
320
+
321
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
322
+ attn_output = self.o_proj(attn_output)
323
+ return attn_output, attn_weights
324
+
325
+
326
+ class Cohere2DecoderLayer(CohereDecoderLayer):
327
+ def __init__(self, config: Cohere2Config, layer_idx: int):
328
+ super().__init__(config, layer_idx)
329
+ self.self_attn = Cohere2Attention(config, layer_idx)
330
+ self.config = config
331
+ self.is_sliding = (layer_idx + 1) % self.config.sliding_window_pattern != 0
332
+ self.sliding_window = config.sliding_window
333
+
334
+ def forward(
335
+ self,
336
+ hidden_states: torch.Tensor,
337
+ position_embeddings: Tuple[torch.Tensor, torch.Tensor],
338
+ attention_mask: Optional[torch.Tensor] = None,
339
+ past_key_value: Optional[Cache] = None,
340
+ output_attentions: Optional[bool] = False,
341
+ use_cache: Optional[bool] = False,
342
+ cache_position: Optional[torch.LongTensor] = None,
343
+ **kwargs: Unpack[FlashAttentionKwargs],
344
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
345
+ """
346
+ Args:
347
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
348
+ position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`):
349
+ Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
350
+ with `head_dim` being the embedding dimension of each attention head.
351
+ attention_mask (`torch.FloatTensor`, *optional*):
352
+ attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
353
+ query_sequence_length, key_sequence_length)` if default attention is used.
354
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
355
+ output_attentions (`bool`, *optional*):
356
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
357
+ returned tensors for more detail.
358
+ use_cache (`bool`, *optional*):
359
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
360
+ (see `past_key_values`).
361
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
362
+ Indices depicting the position of the input sequence tokens in the sequence
363
+ """
364
+
365
+ if self.is_sliding and attention_mask is not None: # efficient SDPA and no padding
366
+ # Flash-attn is a 2D tensor
367
+ if self.config._attn_implementation == "flash_attention_2":
368
+ if past_key_value is not None: # when decoding
369
+ attention_mask = attention_mask[:, -self.sliding_window :]
370
+ else:
371
+ min_dtype = torch.finfo(hidden_states.dtype).min
372
+ sliding_window_mask = torch.tril(
373
+ torch.ones_like(attention_mask, dtype=torch.bool), diagonal=-self.sliding_window
374
+ )
375
+ attention_mask = torch.where(sliding_window_mask, min_dtype, attention_mask)
376
+ if attention_mask.shape[-1] <= 1: # when decoding
377
+ attention_mask = attention_mask[:, :, :, -self.sliding_window :]
378
+
379
+ residual = hidden_states
380
+
381
+ hidden_states = self.input_layernorm(hidden_states)
382
+
383
+ # Self Attention
384
+ hidden_states_attention, self_attn_weights = self.self_attn(
385
+ hidden_states=hidden_states,
386
+ position_embeddings=position_embeddings,
387
+ attention_mask=attention_mask,
388
+ past_key_value=past_key_value,
389
+ output_attentions=output_attentions,
390
+ use_cache=use_cache,
391
+ cache_position=cache_position,
392
+ **kwargs,
393
+ )
394
+
395
+ # Fully Connected
396
+ hidden_states_mlp = self.mlp(hidden_states)
397
+
398
+ # Add everything together
399
+ hidden_states = residual + hidden_states_attention + hidden_states_mlp
400
+
401
+ outputs = (hidden_states,)
402
+
403
+ if output_attentions:
404
+ outputs += (self_attn_weights,)
405
+
406
+ return outputs
407
+
408
+
409
+ class Cohere2PreTrainedModel(CoherePreTrainedModel):
410
+ config_class = Cohere2Config
411
+
412
+
413
+ class Cohere2Model(Gemma2Model):
414
+ """
415
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Cohere2DecoderLayer`]
416
+ Args:
417
+ config: Cohere2Config
418
+ """
419
+
420
+ def __init__(self, config: Cohere2Config):
421
+ super().__init__(config)
422
+ self.norm = Cohere2LayerNorm(hidden_size=(config.hidden_size), eps=config.layer_norm_eps)
423
+ self.rotary_emb = Cohere2RotaryEmbedding(config=config)
424
+
425
+ def forward(
426
+ self,
427
+ input_ids: torch.LongTensor = None,
428
+ attention_mask: Optional[torch.Tensor] = None,
429
+ position_ids: Optional[torch.LongTensor] = None,
430
+ past_key_values: Optional[HybridCache] = None,
431
+ inputs_embeds: Optional[torch.FloatTensor] = None,
432
+ use_cache: Optional[bool] = None,
433
+ output_attentions: Optional[bool] = None,
434
+ output_hidden_states: Optional[bool] = None,
435
+ return_dict: Optional[bool] = None,
436
+ cache_position: Optional[torch.LongTensor] = None,
437
+ **flash_attn_kwargs: Unpack[FlashAttentionKwargs],
438
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
439
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
440
+ output_hidden_states = (
441
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
442
+ )
443
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
444
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
445
+
446
+ if (input_ids is None) ^ (inputs_embeds is not None):
447
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
448
+
449
+ if self.gradient_checkpointing and self.training and use_cache:
450
+ logger.warning_once(
451
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
452
+ )
453
+ use_cache = False
454
+
455
+ if inputs_embeds is None:
456
+ inputs_embeds = self.embed_tokens(input_ids)
457
+
458
+ if use_cache and past_key_values is None and not self.training:
459
+ batch_size, seq_len, _ = inputs_embeds.shape
460
+ past_key_values = HybridCache(
461
+ self.config,
462
+ batch_size=batch_size,
463
+ max_cache_len=seq_len,
464
+ device=self.device,
465
+ dtype=inputs_embeds.dtype,
466
+ )
467
+
468
+ if cache_position is None:
469
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
470
+ cache_position = torch.arange(
471
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
472
+ )
473
+ if position_ids is None:
474
+ position_ids = cache_position.unsqueeze(0)
475
+
476
+ causal_mask = self._update_causal_mask(
477
+ attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
478
+ )
479
+ hidden_states = inputs_embeds
480
+
481
+ # create position embeddings to be shared across the decoder layers
482
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
483
+
484
+ # decoder layers
485
+ all_hidden_states = () if output_hidden_states else None
486
+ all_self_attns = () if output_attentions else None
487
+
488
+ for decoder_layer in self.layers:
489
+ if output_hidden_states:
490
+ all_hidden_states += (hidden_states,)
491
+
492
+ if self.gradient_checkpointing and self.training:
493
+ layer_outputs = self._gradient_checkpointing_func(
494
+ decoder_layer.__call__,
495
+ hidden_states,
496
+ position_embeddings,
497
+ causal_mask,
498
+ past_key_values,
499
+ output_attentions,
500
+ use_cache,
501
+ cache_position,
502
+ )
503
+ else:
504
+ layer_outputs = decoder_layer(
505
+ hidden_states,
506
+ position_embeddings=position_embeddings,
507
+ attention_mask=causal_mask,
508
+ past_key_value=past_key_values,
509
+ output_attentions=output_attentions,
510
+ use_cache=use_cache,
511
+ cache_position=cache_position,
512
+ **flash_attn_kwargs,
513
+ )
514
+
515
+ hidden_states = layer_outputs[0]
516
+
517
+ if output_attentions:
518
+ all_self_attns += (layer_outputs[1],)
519
+
520
+ hidden_states = self.norm(hidden_states)
521
+
522
+ # add hidden states from the last decoder layer
523
+ if output_hidden_states:
524
+ all_hidden_states += (hidden_states,)
525
+
526
+ output = BaseModelOutputWithPast(
527
+ last_hidden_state=hidden_states,
528
+ past_key_values=past_key_values,
529
+ hidden_states=all_hidden_states,
530
+ attentions=all_self_attns,
531
+ )
532
+ return output if return_dict else output.to_tuple()
533
+
534
+
535
+ class Cohere2ForCausalLM(CohereForCausalLM):
536
+ def __init__(self, config: Cohere2Config):
537
+ super().__init__(config)
538
+
539
+ def prepare_inputs_for_generation(
540
+ self,
541
+ input_ids,
542
+ past_key_values=None,
543
+ attention_mask=None,
544
+ inputs_embeds=None,
545
+ cache_position=None,
546
+ position_ids=None,
547
+ use_cache=True,
548
+ num_logits_to_keep=None,
549
+ **kwargs,
550
+ ):
551
+ # Overwritten: has a special cache type, `HybridCache`
552
+
553
+ # If we have cache: let's slice `input_ids` through `cache_position`, to keep only the unprocessed tokens
554
+ # Exception 1: when passing input_embeds, input_ids may be missing entries
555
+ # Exception 2: some generation methods do special slicing of input_ids, so we don't need to do it here
556
+ if past_key_values is not None:
557
+ if inputs_embeds is not None: # Exception 1
558
+ input_ids = input_ids[:, -cache_position.shape[0] :]
559
+ elif input_ids.shape[1] != cache_position.shape[0]: # Default case (the "else", a no op, is Exception 2)
560
+ input_ids = input_ids[:, cache_position]
561
+ if attention_mask is not None and position_ids is None:
562
+ # create position_ids on the fly for batch generation
563
+ position_ids = attention_mask.long().cumsum(-1) - 1
564
+ position_ids.masked_fill_(attention_mask == 0, 1)
565
+ if past_key_values:
566
+ position_ids = position_ids[:, -input_ids.shape[1] :]
567
+ # This `clone` call is needed to avoid recapturing cuda graphs with `torch.compile`'s
568
+ # `mode="reduce-overhead`, as otherwise the input `position_ids` would have various stride
569
+ # during the decoding. Here, simply using `.contiguous()` is not sufficient as in the
570
+ # batch size = 1 case, `position_ids` is already contiguous but with varying stride
571
+ # which retriggers a capture.
572
+ position_ids = position_ids.clone(memory_format=torch.contiguous_format)
573
+
574
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
575
+ if inputs_embeds is not None and cache_position[0] == 0:
576
+ model_inputs = {"inputs_embeds": inputs_embeds, "input_ids": None}
577
+ else:
578
+ # The clone here is for the same reason as for `position_ids`.
579
+ model_inputs = {"input_ids": input_ids.clone(memory_format=torch.contiguous_format), "inputs_embeds": None}
580
+
581
+ if (
582
+ isinstance(past_key_values, HybridCache)
583
+ and attention_mask.ndim == 2
584
+ and not self.config._attn_implementation == "flash_attention_2"
585
+ ):
586
+ if model_inputs["inputs_embeds"] is not None:
587
+ batch_size, sequence_length, _ = model_inputs["inputs_embeds"].shape
588
+ device = model_inputs["inputs_embeds"].device
589
+ else:
590
+ batch_size, sequence_length = model_inputs["input_ids"].shape
591
+ device = model_inputs["input_ids"].device
592
+
593
+ attention_mask = self.model._prepare_4d_causal_attention_mask_with_cache_position(
594
+ attention_mask,
595
+ sequence_length=sequence_length,
596
+ target_length=past_key_values.get_max_cache_shape(),
597
+ dtype=self.lm_head.weight.dtype,
598
+ device=device,
599
+ cache_position=cache_position,
600
+ batch_size=batch_size,
601
+ )
602
+
603
+ if num_logits_to_keep is not None:
604
+ model_inputs["num_logits_to_keep"] = num_logits_to_keep
605
+
606
+ model_inputs.update(
607
+ {
608
+ "position_ids": position_ids,
609
+ "cache_position": cache_position,
610
+ "past_key_values": past_key_values,
611
+ "use_cache": use_cache,
612
+ "attention_mask": attention_mask,
613
+ }
614
+ )
615
+ return model_inputs
616
+
617
+
618
+ __all__ = ["Cohere2Config", "Cohere2ForCausalLM", "Cohere2Model", "Cohere2PreTrainedModel"]
janus/lib/python3.10/site-packages/transformers/models/cpm/__init__.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import _LazyModule
17
+ from ...utils.import_utils import define_import_structure
18
+
19
+
20
+ if TYPE_CHECKING:
21
+ from .tokenization_cpm import *
22
+ from .tokenization_cpm_fast import *
23
+ else:
24
+ import sys
25
+
26
+ _file = globals()["__file__"]
27
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
janus/lib/python3.10/site-packages/transformers/models/cpm/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (536 Bytes). View file
 
janus/lib/python3.10/site-packages/transformers/models/cpm/__pycache__/tokenization_cpm.cpython-310.pyc ADDED
Binary file (12.7 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/cpm/__pycache__/tokenization_cpm_fast.cpython-310.pyc ADDED
Binary file (9.33 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/cpm/tokenization_cpm.py ADDED
@@ -0,0 +1,348 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes."""
16
+
17
+ import os
18
+ import unicodedata
19
+ from shutil import copyfile
20
+ from typing import Any, Dict, List, Optional, Tuple
21
+
22
+ import sentencepiece as spm
23
+
24
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer
25
+ from ...utils import SPIECE_UNDERLINE, logging
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+ VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"}
31
+
32
+
33
+ class CpmTokenizer(PreTrainedTokenizer):
34
+ """Runs pre-tokenization with Jieba segmentation tool. It is used in CPM models."""
35
+
36
+ vocab_files_names = VOCAB_FILES_NAMES
37
+
38
+ def __init__(
39
+ self,
40
+ vocab_file,
41
+ do_lower_case=False,
42
+ remove_space=True,
43
+ keep_accents=False,
44
+ bos_token="<s>",
45
+ eos_token="</s>",
46
+ unk_token="<unk>",
47
+ sep_token="<sep>",
48
+ pad_token="<pad>",
49
+ cls_token="<cls>",
50
+ mask_token="<mask>",
51
+ additional_special_tokens=["<eop>", "<eod>"],
52
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
53
+ **kwargs,
54
+ ) -> None:
55
+ """
56
+ Construct a CPM tokenizer. Based on [Jieba](https://pypi.org/project/jieba/) and
57
+ [SentencePiece](https://github.com/google/sentencepiece).
58
+
59
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should
60
+ refer to this superclass for more information regarding those methods.
61
+
62
+ Args:
63
+ vocab_file (`str`):
64
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm extension) that
65
+ contains the vocabulary necessary to instantiate a tokenizer.
66
+ do_lower_case (`bool`, *optional*, defaults to `True`):
67
+ Whether to lowercase the input when tokenizing.
68
+ remove_space (`bool`, *optional*, defaults to `True`):
69
+ Whether to strip the text when tokenizing (removing excess spaces before and after the string).
70
+ keep_accents (`bool`, *optional*, defaults to `False`):
71
+ Whether to keep accents when tokenizing.
72
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
73
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier
74
+ token.
75
+
76
+ <Tip>
77
+
78
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
79
+ sequence. The token used is the `cls_token`.
80
+
81
+ </Tip>
82
+
83
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
84
+ The end of sequence token.
85
+
86
+ <Tip>
87
+
88
+ When building a sequence using special tokens, this is not the token that is used for the end of
89
+ sequence. The token used is the `sep_token`.
90
+
91
+ </Tip>
92
+
93
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
94
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be
95
+ this token instead.
96
+ sep_token (`str`, *optional*, defaults to `"<sep>"`):
97
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences
98
+ for sequence classification or for a text and a question for question answering. It is also used as the
99
+ last token of a sequence built with special tokens.
100
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
101
+ The token used for padding, for example when batching sequences of different lengths.
102
+ cls_token (`str`, *optional*, defaults to `"<cls>"`):
103
+ The classifier token which is used when doing sequence classification (classification of the whole
104
+ sequence instead of per-token classification). It is the first token of the sequence when built with
105
+ special tokens.
106
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
107
+ The token used for masking values. This is the token used when training this model with masked language
108
+ modeling. This is the token which the model will try to predict.
109
+ additional_special_tokens (`List[str]`, *optional*, defaults to `["<eop>", "<eod>"]`):
110
+ Additional special tokens used by the tokenizer.
111
+
112
+ Attributes:
113
+ sp_model (`SentencePieceProcessor`):
114
+ The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
115
+ """
116
+ # Mask token behave like a normal word, i.e. include the space before it
117
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
118
+
119
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
120
+
121
+ self.do_lower_case = do_lower_case
122
+ self.remove_space = remove_space
123
+ self.keep_accents = keep_accents
124
+ self.vocab_file = vocab_file
125
+
126
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
127
+ self.sp_model.Load(vocab_file)
128
+
129
+ try:
130
+ import jieba
131
+ except ModuleNotFoundError as error:
132
+ raise error.__class__(
133
+ "You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
134
+ "See https://pypi.org/project/jieba/ for installation."
135
+ )
136
+ self.jieba = jieba
137
+ self.translator = str.maketrans(" \n", "\u2582\u2583")
138
+
139
+ super().__init__(
140
+ do_lower_case=do_lower_case,
141
+ remove_space=remove_space,
142
+ keep_accents=keep_accents,
143
+ bos_token=bos_token,
144
+ eos_token=eos_token,
145
+ unk_token=unk_token,
146
+ sep_token=sep_token,
147
+ pad_token=pad_token,
148
+ cls_token=cls_token,
149
+ mask_token=mask_token,
150
+ additional_special_tokens=additional_special_tokens,
151
+ sp_model_kwargs=self.sp_model_kwargs,
152
+ **kwargs,
153
+ )
154
+
155
+ self._pad_token_type_id = 3
156
+
157
+ @property
158
+ # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
159
+ def vocab_size(self):
160
+ return len(self.sp_model)
161
+
162
+ # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.get_vocab
163
+ def get_vocab(self):
164
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
165
+ vocab.update(self.added_tokens_encoder)
166
+ return vocab
167
+
168
+ # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.__getstate__
169
+ def __getstate__(self):
170
+ state = self.__dict__.copy()
171
+ state["sp_model"] = None
172
+ return state
173
+
174
+ # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.__setstate__
175
+ def __setstate__(self, d):
176
+ self.__dict__ = d
177
+
178
+ # for backward compatibility
179
+ if not hasattr(self, "sp_model_kwargs"):
180
+ self.sp_model_kwargs = {}
181
+
182
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
183
+ self.sp_model.Load(self.vocab_file)
184
+
185
+ # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.preprocess_text
186
+ def preprocess_text(self, inputs):
187
+ if self.remove_space:
188
+ outputs = " ".join(inputs.strip().split())
189
+ else:
190
+ outputs = inputs
191
+ outputs = outputs.replace("``", '"').replace("''", '"')
192
+
193
+ if not self.keep_accents:
194
+ outputs = unicodedata.normalize("NFKD", outputs)
195
+ outputs = "".join([c for c in outputs if not unicodedata.combining(c)])
196
+ if self.do_lower_case:
197
+ outputs = outputs.lower()
198
+
199
+ return outputs
200
+
201
+ # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer._tokenize
202
+ def _tokenize(self, text: str) -> List[str]:
203
+ """Tokenize a string."""
204
+ text = self.preprocess_text(text)
205
+ pieces = self.sp_model.encode(text, out_type=str)
206
+ new_pieces = []
207
+ for piece in pieces:
208
+ if len(piece) > 1 and piece[-1] == str(",") and piece[-2].isdigit():
209
+ cur_pieces = self.sp_model.EncodeAsPieces(piece[:-1].replace(SPIECE_UNDERLINE, ""))
210
+ if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
211
+ if len(cur_pieces[0]) == 1:
212
+ cur_pieces = cur_pieces[1:]
213
+ else:
214
+ cur_pieces[0] = cur_pieces[0][1:]
215
+ cur_pieces.append(piece[-1])
216
+ new_pieces.extend(cur_pieces)
217
+ else:
218
+ new_pieces.append(piece)
219
+
220
+ return new_pieces
221
+
222
+ # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer._convert_token_to_id
223
+ def _convert_token_to_id(self, token):
224
+ """Converts a token (str) in an id using the vocab."""
225
+ return self.sp_model.PieceToId(token)
226
+
227
+ # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer._convert_id_to_token
228
+ def _convert_id_to_token(self, index):
229
+ """Converts an index (integer) in a token (str) using the vocab."""
230
+ return self.sp_model.IdToPiece(index)
231
+
232
+ # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.convert_tokens_to_string
233
+ def convert_tokens_to_string(self, tokens):
234
+ """Converts a sequence of tokens (strings for sub-words) in a single string."""
235
+ out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
236
+ return out_string
237
+
238
+ # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.build_inputs_with_special_tokens
239
+ def build_inputs_with_special_tokens(
240
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
241
+ ) -> List[int]:
242
+ """
243
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
244
+ adding special tokens. An XLNet sequence has the following format:
245
+
246
+ - single sequence: `X <sep> <cls>`
247
+ - pair of sequences: `A <sep> B <sep> <cls>`
248
+
249
+ Args:
250
+ token_ids_0 (`List[int]`):
251
+ List of IDs to which the special tokens will be added.
252
+ token_ids_1 (`List[int]`, *optional*):
253
+ Optional second list of IDs for sequence pairs.
254
+
255
+ Returns:
256
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
257
+ """
258
+ sep = [self.sep_token_id]
259
+ cls = [self.cls_token_id]
260
+ if token_ids_1 is None:
261
+ return token_ids_0 + sep + cls
262
+ return token_ids_0 + sep + token_ids_1 + sep + cls
263
+
264
+ # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.get_special_tokens_mask
265
+ def get_special_tokens_mask(
266
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
267
+ ) -> List[int]:
268
+ """
269
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
270
+ special tokens using the tokenizer `prepare_for_model` method.
271
+
272
+ Args:
273
+ token_ids_0 (`List[int]`):
274
+ List of IDs.
275
+ token_ids_1 (`List[int]`, *optional*):
276
+ Optional second list of IDs for sequence pairs.
277
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
278
+ Whether or not the token list is already formatted with special tokens for the model.
279
+
280
+ Returns:
281
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
282
+ """
283
+
284
+ if already_has_special_tokens:
285
+ return super().get_special_tokens_mask(
286
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
287
+ )
288
+
289
+ if token_ids_1 is not None:
290
+ return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1, 1]
291
+ return ([0] * len(token_ids_0)) + [1, 1]
292
+
293
+ # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.create_token_type_ids_from_sequences
294
+ def create_token_type_ids_from_sequences(
295
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
296
+ ) -> List[int]:
297
+ """
298
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLNet
299
+ sequence pair mask has the following format:
300
+
301
+ ```
302
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
303
+ | first sequence | second sequence |
304
+ ```
305
+
306
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
307
+
308
+ Args:
309
+ token_ids_0 (`List[int]`):
310
+ List of IDs.
311
+ token_ids_1 (`List[int]`, *optional*):
312
+ Optional second list of IDs for sequence pairs.
313
+
314
+ Returns:
315
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
316
+ """
317
+ sep = [self.sep_token_id]
318
+ cls_segment_id = [2]
319
+
320
+ if token_ids_1 is None:
321
+ return len(token_ids_0 + sep) * [0] + cls_segment_id
322
+ return len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + cls_segment_id
323
+
324
+ # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.save_vocabulary
325
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
326
+ if not os.path.isdir(save_directory):
327
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
328
+ return
329
+ out_vocab_file = os.path.join(
330
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
331
+ )
332
+
333
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
334
+ copyfile(self.vocab_file, out_vocab_file)
335
+ elif not os.path.isfile(self.vocab_file):
336
+ with open(out_vocab_file, "wb") as fi:
337
+ content_spiece_model = self.sp_model.serialized_model_proto()
338
+ fi.write(content_spiece_model)
339
+
340
+ return (out_vocab_file,)
341
+
342
+ def _decode(self, *args, **kwargs):
343
+ text = super()._decode(*args, **kwargs)
344
+ text = text.replace(" ", "").replace("\u2582", " ").replace("\u2583", "\n")
345
+ return text
346
+
347
+
348
+ __all__ = ["CpmTokenizer"]
janus/lib/python3.10/site-packages/transformers/models/cpm/tokenization_cpm_fast.py ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes."""
16
+
17
+ import os
18
+ from shutil import copyfile
19
+ from typing import List, Optional, Tuple
20
+
21
+ from ...tokenization_utils_fast import AddedToken, PreTrainedTokenizerFast
22
+ from ...utils import logging
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+ VOCAB_FILES_NAMES = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
28
+
29
+
30
+ class CpmTokenizerFast(PreTrainedTokenizerFast):
31
+ """Runs pre-tokenization with Jieba segmentation tool. It is used in CPM models."""
32
+
33
+ def __init__(
34
+ self,
35
+ vocab_file=None,
36
+ tokenizer_file=None,
37
+ do_lower_case=False,
38
+ remove_space=True,
39
+ keep_accents=False,
40
+ bos_token="<s>",
41
+ eos_token="</s>",
42
+ unk_token="<unk>",
43
+ sep_token="<sep>",
44
+ pad_token="<pad>",
45
+ cls_token="<cls>",
46
+ mask_token="<mask>",
47
+ additional_special_tokens=["<eop>", "<eod>"],
48
+ **kwargs,
49
+ ):
50
+ """
51
+ Construct a CPM tokenizer. Based on [Jieba](https://pypi.org/project/jieba/) and
52
+ [SentencePiece](https://github.com/google/sentencepiece).
53
+
54
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should
55
+ refer to this superclass for more information regarding those methods.
56
+
57
+ Args:
58
+ vocab_file (`str`):
59
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm extension) that
60
+ contains the vocabulary necessary to instantiate a tokenizer.
61
+ do_lower_case (`bool`, *optional*, defaults to `True`):
62
+ Whether to lowercase the input when tokenizing.
63
+ remove_space (`bool`, *optional*, defaults to `True`):
64
+ Whether to strip the text when tokenizing (removing excess spaces before and after the string).
65
+ keep_accents (`bool`, *optional*, defaults to `False`):
66
+ Whether to keep accents when tokenizing.
67
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
68
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier
69
+ token.
70
+
71
+ <Tip>
72
+
73
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
74
+ sequence. The token used is the `cls_token`.
75
+
76
+ </Tip>
77
+
78
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
79
+ The end of sequence token.
80
+
81
+ <Tip>
82
+
83
+ When building a sequence using special tokens, this is not the token that is used for the end of
84
+ sequence. The token used is the `sep_token`.
85
+
86
+ </Tip>
87
+
88
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
89
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be
90
+ this token instead.
91
+ sep_token (`str`, *optional*, defaults to `"<sep>"`):
92
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences
93
+ for sequence classification or for a text and a question for question answering. It is also used as the
94
+ last token of a sequence built with special tokens.
95
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
96
+ The token used for padding, for example when batching sequences of different lengths.
97
+ cls_token (`str`, *optional*, defaults to `"<cls>"`):
98
+ The classifier token which is used when doing sequence classification (classification of the whole
99
+ sequence instead of per-token classification). It is the first token of the sequence when built with
100
+ special tokens.
101
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
102
+ The token used for masking values. This is the token used when training this model with masked language
103
+ modeling. This is the token which the model will try to predict.
104
+ additional_special_tokens (`List[str]`, *optional*, defaults to `["<eop>", "<eod>"]`):
105
+ Additional special tokens used by the tokenizer.
106
+
107
+ Attributes:
108
+ sp_model (`SentencePieceProcessor`):
109
+ The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
110
+ """
111
+ # Mask token behave like a normal word, i.e. include the space before it
112
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
113
+
114
+ super().__init__(
115
+ vocab_file=vocab_file,
116
+ tokenizer_file=tokenizer_file,
117
+ do_lower_case=do_lower_case,
118
+ remove_space=remove_space,
119
+ keep_accents=keep_accents,
120
+ bos_token=bos_token,
121
+ eos_token=eos_token,
122
+ unk_token=unk_token,
123
+ sep_token=sep_token,
124
+ pad_token=pad_token,
125
+ cls_token=cls_token,
126
+ mask_token=mask_token,
127
+ additional_special_tokens=additional_special_tokens,
128
+ **kwargs,
129
+ )
130
+
131
+ self._pad_token_type_id = 3
132
+ self.do_lower_case = do_lower_case
133
+ self.remove_space = remove_space
134
+ self.keep_accents = keep_accents
135
+ self.vocab_file = vocab_file
136
+
137
+ try:
138
+ import jieba
139
+ except ModuleNotFoundError as error:
140
+ raise error.__class__(
141
+ "You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
142
+ "See https://pypi.org/project/jieba/ for installation."
143
+ )
144
+ self.jieba = jieba
145
+ self.translator = str.maketrans(" \n", "\u2582\u2583")
146
+
147
+ @property
148
+ def can_save_slow_tokenizer(self) -> bool:
149
+ return os.path.isfile(self.vocab_file) if self.vocab_file else False
150
+
151
+ # Copied from transformers.models.xlnet.tokenization_xlnet_fast.XLNetTokenizerFast.build_inputs_with_special_tokens
152
+ def build_inputs_with_special_tokens(
153
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
154
+ ) -> List[int]:
155
+ """
156
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
157
+ adding special tokens. An XLNet sequence has the following format:
158
+
159
+ - single sequence: `X <sep> <cls>`
160
+ - pair of sequences: `A <sep> B <sep> <cls>`
161
+
162
+ Args:
163
+ token_ids_0 (`List[int]`):
164
+ List of IDs to which the special tokens will be added.
165
+ token_ids_1 (`List[int]`, *optional*):
166
+ Optional second list of IDs for sequence pairs.
167
+
168
+ Returns:
169
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
170
+ """
171
+ sep = [self.sep_token_id]
172
+ cls = [self.cls_token_id]
173
+ if token_ids_1 is None:
174
+ return token_ids_0 + sep + cls
175
+ return token_ids_0 + sep + token_ids_1 + sep + cls
176
+
177
+ # Copied from transformers.models.xlnet.tokenization_xlnet_fast.XLNetTokenizerFast.create_token_type_ids_from_sequences
178
+ def create_token_type_ids_from_sequences(
179
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
180
+ ) -> List[int]:
181
+ """
182
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLNet
183
+ sequence pair mask has the following format:
184
+
185
+ ```
186
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
187
+ | first sequence | second sequence |
188
+ ```
189
+
190
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
191
+
192
+ Args:
193
+ token_ids_0 (`List[int]`):
194
+ List of IDs.
195
+ token_ids_1 (`List[int]`, *optional*):
196
+ Optional second list of IDs for sequence pairs.
197
+
198
+ Returns:
199
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
200
+ """
201
+ sep = [self.sep_token_id]
202
+ cls_segment_id = [2]
203
+
204
+ if token_ids_1 is None:
205
+ return len(token_ids_0 + sep) * [0] + cls_segment_id
206
+ return len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + cls_segment_id
207
+
208
+ # Copied from transformers.models.xlnet.tokenization_xlnet_fast.XLNetTokenizerFast.save_vocabulary
209
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
210
+ if not self.can_save_slow_tokenizer:
211
+ raise ValueError(
212
+ "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
213
+ "tokenizer."
214
+ )
215
+
216
+ if not os.path.isdir(save_directory):
217
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
218
+ return
219
+ out_vocab_file = os.path.join(
220
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
221
+ )
222
+
223
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
224
+ copyfile(self.vocab_file, out_vocab_file)
225
+
226
+ return (out_vocab_file,)
227
+
228
+ def _batch_encode_plus(self, batch_text_or_text_pairs, *args, **kwargs):
229
+ batch_text_or_text_pairs = [
230
+ " ".join([x.translate(self.translator) for x in self.jieba.cut(text, cut_all=False)])
231
+ for text in batch_text_or_text_pairs
232
+ ]
233
+ return super()._batch_encode_plus(batch_text_or_text_pairs, *args, **kwargs)
234
+
235
+ def _decode(self, *args, **kwargs):
236
+ text = super()._decode(*args, **kwargs)
237
+ text = text.replace(" ", "").replace("\u2582", " ").replace("\u2583", "\n")
238
+ return text
239
+
240
+
241
+ __all__ = ["CpmTokenizerFast"]
janus/lib/python3.10/site-packages/transformers/models/deit/__pycache__/image_processing_deit.cpython-310.pyc ADDED
Binary file (12.3 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/deit/__pycache__/modeling_tf_deit.cpython-310.pyc ADDED
Binary file (37.8 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/deit/image_processing_deit.py ADDED
@@ -0,0 +1,299 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for DeiT."""
16
+
17
+ from typing import Dict, List, Optional, Union
18
+
19
+ import numpy as np
20
+
21
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
22
+ from ...image_transforms import resize, to_channel_dimension_format
23
+ from ...image_utils import (
24
+ IMAGENET_STANDARD_MEAN,
25
+ IMAGENET_STANDARD_STD,
26
+ ChannelDimension,
27
+ ImageInput,
28
+ PILImageResampling,
29
+ infer_channel_dimension_format,
30
+ is_scaled_image,
31
+ make_list_of_images,
32
+ to_numpy_array,
33
+ valid_images,
34
+ validate_preprocess_arguments,
35
+ )
36
+ from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging
37
+
38
+
39
+ if is_vision_available():
40
+ import PIL
41
+
42
+
43
+ logger = logging.get_logger(__name__)
44
+
45
+
46
+ class DeiTImageProcessor(BaseImageProcessor):
47
+ r"""
48
+ Constructs a DeiT image processor.
49
+
50
+ Args:
51
+ do_resize (`bool`, *optional*, defaults to `True`):
52
+ Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
53
+ `do_resize` in `preprocess`.
54
+ size (`Dict[str, int]` *optional*, defaults to `{"height": 256, "width": 256}`):
55
+ Size of the image after `resize`. Can be overridden by `size` in `preprocess`.
56
+ resample (`PILImageResampling` filter, *optional*, defaults to `Resampling.BICUBIC`):
57
+ Resampling filter to use if resizing the image. Can be overridden by `resample` in `preprocess`.
58
+ do_center_crop (`bool`, *optional*, defaults to `True`):
59
+ Whether to center crop the image. If the input size is smaller than `crop_size` along any edge, the image
60
+ is padded with 0's and then center cropped. Can be overridden by `do_center_crop` in `preprocess`.
61
+ crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`):
62
+ Desired output size when applying center-cropping. Can be overridden by `crop_size` in `preprocess`.
63
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
64
+ Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
65
+ `preprocess` method.
66
+ do_rescale (`bool`, *optional*, defaults to `True`):
67
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
68
+ parameter in the `preprocess` method.
69
+ do_normalize (`bool`, *optional*, defaults to `True`):
70
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
71
+ method.
72
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
73
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
74
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
75
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
76
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
77
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
78
+ """
79
+
80
+ model_input_names = ["pixel_values"]
81
+
82
+ def __init__(
83
+ self,
84
+ do_resize: bool = True,
85
+ size: Dict[str, int] = None,
86
+ resample: PILImageResampling = PIL.Image.BICUBIC,
87
+ do_center_crop: bool = True,
88
+ crop_size: Dict[str, int] = None,
89
+ rescale_factor: Union[int, float] = 1 / 255,
90
+ do_rescale: bool = True,
91
+ do_normalize: bool = True,
92
+ image_mean: Optional[Union[float, List[float]]] = None,
93
+ image_std: Optional[Union[float, List[float]]] = None,
94
+ **kwargs,
95
+ ) -> None:
96
+ super().__init__(**kwargs)
97
+ size = size if size is not None else {"height": 256, "width": 256}
98
+ size = get_size_dict(size)
99
+ crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
100
+ crop_size = get_size_dict(crop_size, param_name="crop_size")
101
+
102
+ self.do_resize = do_resize
103
+ self.size = size
104
+ self.resample = resample
105
+ self.do_center_crop = do_center_crop
106
+ self.crop_size = crop_size
107
+ self.do_rescale = do_rescale
108
+ self.rescale_factor = rescale_factor
109
+ self.do_normalize = do_normalize
110
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
111
+ self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
112
+
113
+ # Copied from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize with PILImageResampling.BILINEAR->PILImageResampling.BICUBIC
114
+ def resize(
115
+ self,
116
+ image: np.ndarray,
117
+ size: Dict[str, int],
118
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
119
+ data_format: Optional[Union[str, ChannelDimension]] = None,
120
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
121
+ **kwargs,
122
+ ) -> np.ndarray:
123
+ """
124
+ Resize an image to `(size["height"], size["width"])`.
125
+
126
+ Args:
127
+ image (`np.ndarray`):
128
+ Image to resize.
129
+ size (`Dict[str, int]`):
130
+ Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
131
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
132
+ `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`.
133
+ data_format (`ChannelDimension` or `str`, *optional*):
134
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
135
+ image is used. Can be one of:
136
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
137
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
138
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
139
+ input_data_format (`ChannelDimension` or `str`, *optional*):
140
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
141
+ from the input image. Can be one of:
142
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
143
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
144
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
145
+
146
+ Returns:
147
+ `np.ndarray`: The resized image.
148
+ """
149
+ size = get_size_dict(size)
150
+ if "height" not in size or "width" not in size:
151
+ raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}")
152
+ output_size = (size["height"], size["width"])
153
+ return resize(
154
+ image,
155
+ size=output_size,
156
+ resample=resample,
157
+ data_format=data_format,
158
+ input_data_format=input_data_format,
159
+ **kwargs,
160
+ )
161
+
162
+ @filter_out_non_signature_kwargs()
163
+ def preprocess(
164
+ self,
165
+ images: ImageInput,
166
+ do_resize: bool = None,
167
+ size: Dict[str, int] = None,
168
+ resample=None,
169
+ do_center_crop: bool = None,
170
+ crop_size: Dict[str, int] = None,
171
+ do_rescale: bool = None,
172
+ rescale_factor: float = None,
173
+ do_normalize: bool = None,
174
+ image_mean: Optional[Union[float, List[float]]] = None,
175
+ image_std: Optional[Union[float, List[float]]] = None,
176
+ return_tensors: Optional[Union[str, TensorType]] = None,
177
+ data_format: ChannelDimension = ChannelDimension.FIRST,
178
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
179
+ ) -> PIL.Image.Image:
180
+ """
181
+ Preprocess an image or batch of images.
182
+
183
+ Args:
184
+ images (`ImageInput`):
185
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
186
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
187
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
188
+ Whether to resize the image.
189
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
190
+ Size of the image after `resize`.
191
+ resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
192
+ PILImageResampling filter to use if resizing the image Only has an effect if `do_resize` is set to
193
+ `True`.
194
+ do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
195
+ Whether to center crop the image.
196
+ crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
197
+ Size of the image after center crop. If one edge the image is smaller than `crop_size`, it will be
198
+ padded with zeros and then cropped
199
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
200
+ Whether to rescale the image values between [0 - 1].
201
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
202
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
203
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
204
+ Whether to normalize the image.
205
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
206
+ Image mean.
207
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
208
+ Image standard deviation.
209
+ return_tensors (`str` or `TensorType`, *optional*):
210
+ The type of tensors to return. Can be one of:
211
+ - `None`: Return a list of `np.ndarray`.
212
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
213
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
214
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
215
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
216
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
217
+ The channel dimension format for the output image. Can be one of:
218
+ - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
219
+ - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
220
+ input_data_format (`ChannelDimension` or `str`, *optional*):
221
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
222
+ from the input image. Can be one of:
223
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
224
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
225
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
226
+ """
227
+ do_resize = do_resize if do_resize is not None else self.do_resize
228
+ resample = resample if resample is not None else self.resample
229
+ do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
230
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
231
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
232
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
233
+ image_mean = image_mean if image_mean is not None else self.image_mean
234
+ image_std = image_std if image_std is not None else self.image_std
235
+
236
+ size = size if size is not None else self.size
237
+ size = get_size_dict(size)
238
+ crop_size = crop_size if crop_size is not None else self.crop_size
239
+ crop_size = get_size_dict(crop_size, param_name="crop_size")
240
+
241
+ images = make_list_of_images(images)
242
+
243
+ if not valid_images(images):
244
+ raise ValueError(
245
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
246
+ "torch.Tensor, tf.Tensor or jax.ndarray."
247
+ )
248
+ validate_preprocess_arguments(
249
+ do_rescale=do_rescale,
250
+ rescale_factor=rescale_factor,
251
+ do_normalize=do_normalize,
252
+ image_mean=image_mean,
253
+ image_std=image_std,
254
+ do_center_crop=do_center_crop,
255
+ crop_size=crop_size,
256
+ do_resize=do_resize,
257
+ size=size,
258
+ resample=resample,
259
+ )
260
+ # All transformations expect numpy arrays.
261
+ images = [to_numpy_array(image) for image in images]
262
+
263
+ if do_rescale and is_scaled_image(images[0]):
264
+ logger.warning_once(
265
+ "It looks like you are trying to rescale already rescaled images. If the input"
266
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
267
+ )
268
+
269
+ if input_data_format is None:
270
+ # We assume that all images have the same channel dimension format.
271
+ input_data_format = infer_channel_dimension_format(images[0])
272
+
273
+ all_images = []
274
+ for image in images:
275
+ if do_resize:
276
+ image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
277
+
278
+ if do_center_crop:
279
+ image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)
280
+
281
+ if do_rescale:
282
+ image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
283
+
284
+ if do_normalize:
285
+ image = self.normalize(
286
+ image=image, mean=image_mean, std=image_std, input_data_format=input_data_format
287
+ )
288
+
289
+ all_images.append(image)
290
+ images = [
291
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
292
+ for image in all_images
293
+ ]
294
+
295
+ data = {"pixel_values": images}
296
+ return BatchFeature(data=data, tensor_type=return_tensors)
297
+
298
+
299
+ __all__ = ["DeiTImageProcessor"]
janus/lib/python3.10/site-packages/transformers/models/deit/modeling_deit.py ADDED
@@ -0,0 +1,1021 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 Facebook AI Research (FAIR), Ross Wightman, The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """PyTorch DeiT model."""
16
+
17
+ import collections.abc
18
+ import math
19
+ from dataclasses import dataclass
20
+ from typing import Optional, Set, Tuple, Union
21
+
22
+ import torch
23
+ import torch.utils.checkpoint
24
+ from torch import nn
25
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
26
+
27
+ from ...activations import ACT2FN
28
+ from ...modeling_outputs import (
29
+ BaseModelOutput,
30
+ BaseModelOutputWithPooling,
31
+ ImageClassifierOutput,
32
+ MaskedImageModelingOutput,
33
+ )
34
+ from ...modeling_utils import PreTrainedModel
35
+ from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
36
+ from ...utils import (
37
+ ModelOutput,
38
+ add_code_sample_docstrings,
39
+ add_start_docstrings,
40
+ add_start_docstrings_to_model_forward,
41
+ logging,
42
+ replace_return_docstrings,
43
+ torch_int,
44
+ )
45
+ from .configuration_deit import DeiTConfig
46
+
47
+
48
+ logger = logging.get_logger(__name__)
49
+
50
+ # General docstring
51
+ _CONFIG_FOR_DOC = "DeiTConfig"
52
+
53
+ # Base docstring
54
+ _CHECKPOINT_FOR_DOC = "facebook/deit-base-distilled-patch16-224"
55
+ _EXPECTED_OUTPUT_SHAPE = [1, 198, 768]
56
+
57
+ # Image classification docstring
58
+ _IMAGE_CLASS_CHECKPOINT = "facebook/deit-base-distilled-patch16-224"
59
+ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
60
+
61
+
62
+ class DeiTEmbeddings(nn.Module):
63
+ """
64
+ Construct the CLS token, distillation token, position and patch embeddings. Optionally, also the mask token.
65
+ """
66
+
67
+ def __init__(self, config: DeiTConfig, use_mask_token: bool = False) -> None:
68
+ super().__init__()
69
+
70
+ self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
71
+ self.distillation_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
72
+ self.mask_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) if use_mask_token else None
73
+ self.patch_embeddings = DeiTPatchEmbeddings(config)
74
+ num_patches = self.patch_embeddings.num_patches
75
+ self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 2, config.hidden_size))
76
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
77
+ self.patch_size = config.patch_size
78
+
79
+ def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
80
+ """
81
+ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
82
+ images. This method is also adapted to support torch.jit tracing and 2 class embeddings.
83
+
84
+ Adapted from:
85
+ - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
86
+ - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
87
+ """
88
+
89
+ num_patches = embeddings.shape[1] - 2
90
+ num_positions = self.position_embeddings.shape[1] - 2
91
+
92
+ # always interpolate when tracing to ensure the exported model works for dynamic input shapes
93
+ if not torch.jit.is_tracing() and num_patches == num_positions and height == width:
94
+ return self.position_embeddings
95
+
96
+ class_and_dist_pos_embed = self.position_embeddings[:, :2]
97
+ patch_pos_embed = self.position_embeddings[:, 2:]
98
+
99
+ dim = embeddings.shape[-1]
100
+
101
+ new_height = height // self.patch_size
102
+ new_width = width // self.patch_size
103
+
104
+ sqrt_num_positions = torch_int(num_positions**0.5)
105
+ patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
106
+ patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
107
+
108
+ patch_pos_embed = nn.functional.interpolate(
109
+ patch_pos_embed,
110
+ size=(new_height, new_width),
111
+ mode="bicubic",
112
+ align_corners=False,
113
+ )
114
+
115
+ patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
116
+
117
+ return torch.cat((class_and_dist_pos_embed, patch_pos_embed), dim=1)
118
+
119
+ def forward(
120
+ self,
121
+ pixel_values: torch.Tensor,
122
+ bool_masked_pos: Optional[torch.BoolTensor] = None,
123
+ interpolate_pos_encoding: bool = False,
124
+ ) -> torch.Tensor:
125
+ _, _, height, width = pixel_values.shape
126
+ embeddings = self.patch_embeddings(pixel_values)
127
+
128
+ batch_size, seq_length, _ = embeddings.size()
129
+
130
+ if bool_masked_pos is not None:
131
+ mask_tokens = self.mask_token.expand(batch_size, seq_length, -1)
132
+ # replace the masked visual tokens by mask_tokens
133
+ mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens)
134
+ embeddings = embeddings * (1.0 - mask) + mask_tokens * mask
135
+
136
+ cls_tokens = self.cls_token.expand(batch_size, -1, -1)
137
+
138
+ distillation_tokens = self.distillation_token.expand(batch_size, -1, -1)
139
+
140
+ embeddings = torch.cat((cls_tokens, distillation_tokens, embeddings), dim=1)
141
+ position_embedding = self.position_embeddings
142
+
143
+ if interpolate_pos_encoding:
144
+ position_embedding = self.interpolate_pos_encoding(embeddings, height, width)
145
+
146
+ embeddings = embeddings + position_embedding
147
+ embeddings = self.dropout(embeddings)
148
+ return embeddings
149
+
150
+
151
+ class DeiTPatchEmbeddings(nn.Module):
152
+ """
153
+ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
154
+ `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
155
+ Transformer.
156
+ """
157
+
158
+ def __init__(self, config):
159
+ super().__init__()
160
+ image_size, patch_size = config.image_size, config.patch_size
161
+ num_channels, hidden_size = config.num_channels, config.hidden_size
162
+
163
+ image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
164
+ patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
165
+ num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
166
+ self.image_size = image_size
167
+ self.patch_size = patch_size
168
+ self.num_channels = num_channels
169
+ self.num_patches = num_patches
170
+
171
+ self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
172
+
173
+ def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
174
+ batch_size, num_channels, height, width = pixel_values.shape
175
+ if num_channels != self.num_channels:
176
+ raise ValueError(
177
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
178
+ )
179
+ x = self.projection(pixel_values).flatten(2).transpose(1, 2)
180
+ return x
181
+
182
+
183
+ # Copied from transformers.models.vit.modeling_vit.ViTSelfAttention with ViT->DeiT
184
+ class DeiTSelfAttention(nn.Module):
185
+ def __init__(self, config: DeiTConfig) -> None:
186
+ super().__init__()
187
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
188
+ raise ValueError(
189
+ f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
190
+ f"heads {config.num_attention_heads}."
191
+ )
192
+
193
+ self.num_attention_heads = config.num_attention_heads
194
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
195
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
196
+
197
+ self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
198
+ self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
199
+ self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
200
+
201
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
202
+
203
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
204
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
205
+ x = x.view(new_x_shape)
206
+ return x.permute(0, 2, 1, 3)
207
+
208
+ def forward(
209
+ self, hidden_states, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False
210
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
211
+ mixed_query_layer = self.query(hidden_states)
212
+
213
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
214
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
215
+ query_layer = self.transpose_for_scores(mixed_query_layer)
216
+
217
+ # Take the dot product between "query" and "key" to get the raw attention scores.
218
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
219
+
220
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
221
+
222
+ # Normalize the attention scores to probabilities.
223
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
224
+
225
+ # This is actually dropping out entire tokens to attend to, which might
226
+ # seem a bit unusual, but is taken from the original Transformer paper.
227
+ attention_probs = self.dropout(attention_probs)
228
+
229
+ # Mask heads if we want to
230
+ if head_mask is not None:
231
+ attention_probs = attention_probs * head_mask
232
+
233
+ context_layer = torch.matmul(attention_probs, value_layer)
234
+
235
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
236
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
237
+ context_layer = context_layer.view(new_context_layer_shape)
238
+
239
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
240
+
241
+ return outputs
242
+
243
+
244
+ # Copied from transformers.models.vit.modeling_vit.ViTSdpaSelfAttention with ViT->DeiT
245
+ class DeiTSdpaSelfAttention(DeiTSelfAttention):
246
+ def __init__(self, config: DeiTConfig) -> None:
247
+ super().__init__(config)
248
+ self.attention_probs_dropout_prob = config.attention_probs_dropout_prob
249
+
250
+ def forward(
251
+ self,
252
+ hidden_states: torch.FloatTensor,
253
+ head_mask: Optional[torch.Tensor] = None,
254
+ output_attentions: bool = False,
255
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
256
+ if output_attentions or head_mask is not None:
257
+ logger.warning_once(
258
+ "`DeiTSdpaAttention` is used but `torch.nn.functional.scaled_dot_product_attention` does not support "
259
+ "`output_attentions=True` or `head_mask`. Falling back to the manual attention implementation, but "
260
+ "specifying the manual implementation will be required from Transformers version v5.0.0 onwards. "
261
+ 'This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
262
+ )
263
+ return super().forward(
264
+ hidden_states=hidden_states,
265
+ head_mask=head_mask,
266
+ output_attentions=output_attentions,
267
+ )
268
+
269
+ mixed_query_layer = self.query(hidden_states)
270
+
271
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
272
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
273
+ query_layer = self.transpose_for_scores(mixed_query_layer)
274
+
275
+ context_layer = torch.nn.functional.scaled_dot_product_attention(
276
+ query_layer,
277
+ key_layer,
278
+ value_layer,
279
+ head_mask,
280
+ self.attention_probs_dropout_prob if self.training else 0.0,
281
+ is_causal=False,
282
+ scale=None,
283
+ )
284
+
285
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
286
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
287
+ context_layer = context_layer.view(new_context_layer_shape)
288
+
289
+ return context_layer, None
290
+
291
+
292
+ # Copied from transformers.models.vit.modeling_vit.ViTSelfOutput with ViT->DeiT
293
+ class DeiTSelfOutput(nn.Module):
294
+ """
295
+ The residual connection is defined in DeiTLayer instead of here (as is the case with other models), due to the
296
+ layernorm applied before each block.
297
+ """
298
+
299
+ def __init__(self, config: DeiTConfig) -> None:
300
+ super().__init__()
301
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
302
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
303
+
304
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
305
+ hidden_states = self.dense(hidden_states)
306
+ hidden_states = self.dropout(hidden_states)
307
+
308
+ return hidden_states
309
+
310
+
311
+ # Copied from transformers.models.vit.modeling_vit.ViTAttention with ViT->DeiT
312
+ class DeiTAttention(nn.Module):
313
+ def __init__(self, config: DeiTConfig) -> None:
314
+ super().__init__()
315
+ self.attention = DeiTSelfAttention(config)
316
+ self.output = DeiTSelfOutput(config)
317
+ self.pruned_heads = set()
318
+
319
+ def prune_heads(self, heads: Set[int]) -> None:
320
+ if len(heads) == 0:
321
+ return
322
+ heads, index = find_pruneable_heads_and_indices(
323
+ heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
324
+ )
325
+
326
+ # Prune linear layers
327
+ self.attention.query = prune_linear_layer(self.attention.query, index)
328
+ self.attention.key = prune_linear_layer(self.attention.key, index)
329
+ self.attention.value = prune_linear_layer(self.attention.value, index)
330
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
331
+
332
+ # Update hyper params and store pruned heads
333
+ self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
334
+ self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
335
+ self.pruned_heads = self.pruned_heads.union(heads)
336
+
337
+ def forward(
338
+ self,
339
+ hidden_states: torch.Tensor,
340
+ head_mask: Optional[torch.Tensor] = None,
341
+ output_attentions: bool = False,
342
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
343
+ self_outputs = self.attention(hidden_states, head_mask, output_attentions)
344
+
345
+ attention_output = self.output(self_outputs[0], hidden_states)
346
+
347
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
348
+ return outputs
349
+
350
+
351
+ # Copied from transformers.models.vit.modeling_vit.ViTSdpaAttention with ViT->DeiT
352
+ class DeiTSdpaAttention(DeiTAttention):
353
+ def __init__(self, config: DeiTConfig) -> None:
354
+ super().__init__(config)
355
+ self.attention = DeiTSdpaSelfAttention(config)
356
+
357
+
358
+ # Copied from transformers.models.vit.modeling_vit.ViTIntermediate with ViT->DeiT
359
+ class DeiTIntermediate(nn.Module):
360
+ def __init__(self, config: DeiTConfig) -> None:
361
+ super().__init__()
362
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
363
+ if isinstance(config.hidden_act, str):
364
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
365
+ else:
366
+ self.intermediate_act_fn = config.hidden_act
367
+
368
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
369
+ hidden_states = self.dense(hidden_states)
370
+ hidden_states = self.intermediate_act_fn(hidden_states)
371
+
372
+ return hidden_states
373
+
374
+
375
+ # Copied from transformers.models.vit.modeling_vit.ViTOutput with ViT->DeiT
376
+ class DeiTOutput(nn.Module):
377
+ def __init__(self, config: DeiTConfig) -> None:
378
+ super().__init__()
379
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
380
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
381
+
382
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
383
+ hidden_states = self.dense(hidden_states)
384
+ hidden_states = self.dropout(hidden_states)
385
+
386
+ hidden_states = hidden_states + input_tensor
387
+
388
+ return hidden_states
389
+
390
+
391
+ DEIT_ATTENTION_CLASSES = {
392
+ "eager": DeiTAttention,
393
+ "sdpa": DeiTSdpaAttention,
394
+ }
395
+
396
+
397
+ # Copied from transformers.models.vit.modeling_vit.ViTLayer with ViT->DeiT,VIT->DEIT
398
+ class DeiTLayer(nn.Module):
399
+ """This corresponds to the Block class in the timm implementation."""
400
+
401
+ def __init__(self, config: DeiTConfig) -> None:
402
+ super().__init__()
403
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
404
+ self.seq_len_dim = 1
405
+ self.attention = DEIT_ATTENTION_CLASSES[config._attn_implementation](config)
406
+ self.intermediate = DeiTIntermediate(config)
407
+ self.output = DeiTOutput(config)
408
+ self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
409
+ self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
410
+
411
+ def forward(
412
+ self,
413
+ hidden_states: torch.Tensor,
414
+ head_mask: Optional[torch.Tensor] = None,
415
+ output_attentions: bool = False,
416
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
417
+ self_attention_outputs = self.attention(
418
+ self.layernorm_before(hidden_states), # in DeiT, layernorm is applied before self-attention
419
+ head_mask,
420
+ output_attentions=output_attentions,
421
+ )
422
+ attention_output = self_attention_outputs[0]
423
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
424
+
425
+ # first residual connection
426
+ hidden_states = attention_output + hidden_states
427
+
428
+ # in DeiT, layernorm is also applied after self-attention
429
+ layer_output = self.layernorm_after(hidden_states)
430
+ layer_output = self.intermediate(layer_output)
431
+
432
+ # second residual connection is done here
433
+ layer_output = self.output(layer_output, hidden_states)
434
+
435
+ outputs = (layer_output,) + outputs
436
+
437
+ return outputs
438
+
439
+
440
+ # Copied from transformers.models.vit.modeling_vit.ViTEncoder with ViT->DeiT
441
+ class DeiTEncoder(nn.Module):
442
+ def __init__(self, config: DeiTConfig) -> None:
443
+ super().__init__()
444
+ self.config = config
445
+ self.layer = nn.ModuleList([DeiTLayer(config) for _ in range(config.num_hidden_layers)])
446
+ self.gradient_checkpointing = False
447
+
448
+ def forward(
449
+ self,
450
+ hidden_states: torch.Tensor,
451
+ head_mask: Optional[torch.Tensor] = None,
452
+ output_attentions: bool = False,
453
+ output_hidden_states: bool = False,
454
+ return_dict: bool = True,
455
+ ) -> Union[tuple, BaseModelOutput]:
456
+ all_hidden_states = () if output_hidden_states else None
457
+ all_self_attentions = () if output_attentions else None
458
+
459
+ for i, layer_module in enumerate(self.layer):
460
+ if output_hidden_states:
461
+ all_hidden_states = all_hidden_states + (hidden_states,)
462
+
463
+ layer_head_mask = head_mask[i] if head_mask is not None else None
464
+
465
+ if self.gradient_checkpointing and self.training:
466
+ layer_outputs = self._gradient_checkpointing_func(
467
+ layer_module.__call__,
468
+ hidden_states,
469
+ layer_head_mask,
470
+ output_attentions,
471
+ )
472
+ else:
473
+ layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions)
474
+
475
+ hidden_states = layer_outputs[0]
476
+
477
+ if output_attentions:
478
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
479
+
480
+ if output_hidden_states:
481
+ all_hidden_states = all_hidden_states + (hidden_states,)
482
+
483
+ if not return_dict:
484
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
485
+ return BaseModelOutput(
486
+ last_hidden_state=hidden_states,
487
+ hidden_states=all_hidden_states,
488
+ attentions=all_self_attentions,
489
+ )
490
+
491
+
492
+ class DeiTPreTrainedModel(PreTrainedModel):
493
+ """
494
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
495
+ models.
496
+ """
497
+
498
+ config_class = DeiTConfig
499
+ base_model_prefix = "deit"
500
+ main_input_name = "pixel_values"
501
+ supports_gradient_checkpointing = True
502
+ _no_split_modules = ["DeiTLayer"]
503
+ _supports_sdpa = True
504
+
505
+ def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
506
+ """Initialize the weights"""
507
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
508
+ # Upcast the input in `fp32` and cast it back to desired `dtype` to avoid
509
+ # `trunc_normal_cpu` not implemented in `half` issues
510
+ module.weight.data = nn.init.trunc_normal_(
511
+ module.weight.data.to(torch.float32), mean=0.0, std=self.config.initializer_range
512
+ ).to(module.weight.dtype)
513
+ if module.bias is not None:
514
+ module.bias.data.zero_()
515
+ elif isinstance(module, nn.LayerNorm):
516
+ module.bias.data.zero_()
517
+ module.weight.data.fill_(1.0)
518
+
519
+
520
+ DEIT_START_DOCSTRING = r"""
521
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
522
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
523
+ behavior.
524
+
525
+ Parameters:
526
+ config ([`DeiTConfig`]): Model configuration class with all the parameters of the model.
527
+ Initializing with a config file does not load the weights associated with the model, only the
528
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
529
+ """
530
+
531
+ DEIT_INPUTS_DOCSTRING = r"""
532
+ Args:
533
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
534
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
535
+ [`DeiTImageProcessor.__call__`] for details.
536
+
537
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
538
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
539
+
540
+ - 1 indicates the head is **not masked**,
541
+ - 0 indicates the head is **masked**.
542
+
543
+ output_attentions (`bool`, *optional*):
544
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
545
+ tensors for more detail.
546
+ output_hidden_states (`bool`, *optional*):
547
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
548
+ more detail.
549
+ return_dict (`bool`, *optional*):
550
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
551
+ interpolate_pos_encoding (`bool`, *optional*, defaults to `False`):
552
+ Whether to interpolate the pre-trained position encodings.
553
+ """
554
+
555
+
556
+ @add_start_docstrings(
557
+ "The bare DeiT Model transformer outputting raw hidden-states without any specific head on top.",
558
+ DEIT_START_DOCSTRING,
559
+ )
560
+ class DeiTModel(DeiTPreTrainedModel):
561
+ def __init__(self, config: DeiTConfig, add_pooling_layer: bool = True, use_mask_token: bool = False) -> None:
562
+ super().__init__(config)
563
+ self.config = config
564
+
565
+ self.embeddings = DeiTEmbeddings(config, use_mask_token=use_mask_token)
566
+ self.encoder = DeiTEncoder(config)
567
+
568
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
569
+ self.pooler = DeiTPooler(config) if add_pooling_layer else None
570
+
571
+ # Initialize weights and apply final processing
572
+ self.post_init()
573
+
574
+ def get_input_embeddings(self) -> DeiTPatchEmbeddings:
575
+ return self.embeddings.patch_embeddings
576
+
577
+ def _prune_heads(self, heads_to_prune):
578
+ """
579
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
580
+ class PreTrainedModel
581
+ """
582
+ for layer, heads in heads_to_prune.items():
583
+ self.encoder.layer[layer].attention.prune_heads(heads)
584
+
585
+ @add_start_docstrings_to_model_forward(DEIT_INPUTS_DOCSTRING)
586
+ @add_code_sample_docstrings(
587
+ checkpoint=_CHECKPOINT_FOR_DOC,
588
+ output_type=BaseModelOutputWithPooling,
589
+ config_class=_CONFIG_FOR_DOC,
590
+ modality="vision",
591
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
592
+ )
593
+ def forward(
594
+ self,
595
+ pixel_values: Optional[torch.Tensor] = None,
596
+ bool_masked_pos: Optional[torch.BoolTensor] = None,
597
+ head_mask: Optional[torch.Tensor] = None,
598
+ output_attentions: Optional[bool] = None,
599
+ output_hidden_states: Optional[bool] = None,
600
+ return_dict: Optional[bool] = None,
601
+ interpolate_pos_encoding: bool = False,
602
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
603
+ r"""
604
+ bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*):
605
+ Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
606
+ """
607
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
608
+ output_hidden_states = (
609
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
610
+ )
611
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
612
+
613
+ if pixel_values is None:
614
+ raise ValueError("You have to specify pixel_values")
615
+
616
+ # Prepare head mask if needed
617
+ # 1.0 in head_mask indicate we keep the head
618
+ # attention_probs has shape bsz x n_heads x N x N
619
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
620
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
621
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
622
+
623
+ # TODO: maybe have a cleaner way to cast the input (from `ImageProcessor` side?)
624
+ expected_dtype = self.embeddings.patch_embeddings.projection.weight.dtype
625
+ if pixel_values.dtype != expected_dtype:
626
+ pixel_values = pixel_values.to(expected_dtype)
627
+
628
+ embedding_output = self.embeddings(
629
+ pixel_values, bool_masked_pos=bool_masked_pos, interpolate_pos_encoding=interpolate_pos_encoding
630
+ )
631
+
632
+ encoder_outputs = self.encoder(
633
+ embedding_output,
634
+ head_mask=head_mask,
635
+ output_attentions=output_attentions,
636
+ output_hidden_states=output_hidden_states,
637
+ return_dict=return_dict,
638
+ )
639
+ sequence_output = encoder_outputs[0]
640
+ sequence_output = self.layernorm(sequence_output)
641
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
642
+
643
+ if not return_dict:
644
+ head_outputs = (sequence_output, pooled_output) if pooled_output is not None else (sequence_output,)
645
+ return head_outputs + encoder_outputs[1:]
646
+
647
+ return BaseModelOutputWithPooling(
648
+ last_hidden_state=sequence_output,
649
+ pooler_output=pooled_output,
650
+ hidden_states=encoder_outputs.hidden_states,
651
+ attentions=encoder_outputs.attentions,
652
+ )
653
+
654
+
655
+ # Copied from transformers.models.vit.modeling_vit.ViTPooler with ViT->DeiT
656
+ class DeiTPooler(nn.Module):
657
+ def __init__(self, config: DeiTConfig):
658
+ super().__init__()
659
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
660
+ self.activation = nn.Tanh()
661
+
662
+ def forward(self, hidden_states):
663
+ # We "pool" the model by simply taking the hidden state corresponding
664
+ # to the first token.
665
+ first_token_tensor = hidden_states[:, 0]
666
+ pooled_output = self.dense(first_token_tensor)
667
+ pooled_output = self.activation(pooled_output)
668
+ return pooled_output
669
+
670
+
671
+ @add_start_docstrings(
672
+ """DeiT Model with a decoder on top for masked image modeling, as proposed in [SimMIM](https://arxiv.org/abs/2111.09886).
673
+
674
+ <Tip>
675
+
676
+ Note that we provide a script to pre-train this model on custom data in our [examples
677
+ directory](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining).
678
+
679
+ </Tip>
680
+ """,
681
+ DEIT_START_DOCSTRING,
682
+ )
683
+ class DeiTForMaskedImageModeling(DeiTPreTrainedModel):
684
+ def __init__(self, config: DeiTConfig) -> None:
685
+ super().__init__(config)
686
+
687
+ self.deit = DeiTModel(config, add_pooling_layer=False, use_mask_token=True)
688
+
689
+ self.decoder = nn.Sequential(
690
+ nn.Conv2d(
691
+ in_channels=config.hidden_size,
692
+ out_channels=config.encoder_stride**2 * config.num_channels,
693
+ kernel_size=1,
694
+ ),
695
+ nn.PixelShuffle(config.encoder_stride),
696
+ )
697
+
698
+ # Initialize weights and apply final processing
699
+ self.post_init()
700
+
701
+ @add_start_docstrings_to_model_forward(DEIT_INPUTS_DOCSTRING)
702
+ @replace_return_docstrings(output_type=MaskedImageModelingOutput, config_class=_CONFIG_FOR_DOC)
703
+ def forward(
704
+ self,
705
+ pixel_values: Optional[torch.Tensor] = None,
706
+ bool_masked_pos: Optional[torch.BoolTensor] = None,
707
+ head_mask: Optional[torch.Tensor] = None,
708
+ output_attentions: Optional[bool] = None,
709
+ output_hidden_states: Optional[bool] = None,
710
+ return_dict: Optional[bool] = None,
711
+ interpolate_pos_encoding: bool = False,
712
+ ) -> Union[tuple, MaskedImageModelingOutput]:
713
+ r"""
714
+ bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`):
715
+ Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
716
+
717
+ Returns:
718
+
719
+ Examples:
720
+ ```python
721
+ >>> from transformers import AutoImageProcessor, DeiTForMaskedImageModeling
722
+ >>> import torch
723
+ >>> from PIL import Image
724
+ >>> import requests
725
+
726
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
727
+ >>> image = Image.open(requests.get(url, stream=True).raw)
728
+
729
+ >>> image_processor = AutoImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224")
730
+ >>> model = DeiTForMaskedImageModeling.from_pretrained("facebook/deit-base-distilled-patch16-224")
731
+
732
+ >>> num_patches = (model.config.image_size // model.config.patch_size) ** 2
733
+ >>> pixel_values = image_processor(images=image, return_tensors="pt").pixel_values
734
+ >>> # create random boolean mask of shape (batch_size, num_patches)
735
+ >>> bool_masked_pos = torch.randint(low=0, high=2, size=(1, num_patches)).bool()
736
+
737
+ >>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos)
738
+ >>> loss, reconstructed_pixel_values = outputs.loss, outputs.reconstruction
739
+ >>> list(reconstructed_pixel_values.shape)
740
+ [1, 3, 224, 224]
741
+ ```"""
742
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
743
+
744
+ outputs = self.deit(
745
+ pixel_values,
746
+ bool_masked_pos=bool_masked_pos,
747
+ head_mask=head_mask,
748
+ output_attentions=output_attentions,
749
+ output_hidden_states=output_hidden_states,
750
+ return_dict=return_dict,
751
+ interpolate_pos_encoding=interpolate_pos_encoding,
752
+ )
753
+
754
+ sequence_output = outputs[0]
755
+
756
+ # Reshape to (batch_size, num_channels, height, width)
757
+ sequence_output = sequence_output[:, 1:-1]
758
+ batch_size, sequence_length, num_channels = sequence_output.shape
759
+ height = width = int(sequence_length**0.5)
760
+ sequence_output = sequence_output.permute(0, 2, 1).reshape(batch_size, num_channels, height, width)
761
+
762
+ # Reconstruct pixel values
763
+ reconstructed_pixel_values = self.decoder(sequence_output)
764
+
765
+ masked_im_loss = None
766
+ if bool_masked_pos is not None:
767
+ size = self.config.image_size // self.config.patch_size
768
+ bool_masked_pos = bool_masked_pos.reshape(-1, size, size)
769
+ mask = (
770
+ bool_masked_pos.repeat_interleave(self.config.patch_size, 1)
771
+ .repeat_interleave(self.config.patch_size, 2)
772
+ .unsqueeze(1)
773
+ .contiguous()
774
+ )
775
+ reconstruction_loss = nn.functional.l1_loss(pixel_values, reconstructed_pixel_values, reduction="none")
776
+ masked_im_loss = (reconstruction_loss * mask).sum() / (mask.sum() + 1e-5) / self.config.num_channels
777
+
778
+ if not return_dict:
779
+ output = (reconstructed_pixel_values,) + outputs[1:]
780
+ return ((masked_im_loss,) + output) if masked_im_loss is not None else output
781
+
782
+ return MaskedImageModelingOutput(
783
+ loss=masked_im_loss,
784
+ reconstruction=reconstructed_pixel_values,
785
+ hidden_states=outputs.hidden_states,
786
+ attentions=outputs.attentions,
787
+ )
788
+
789
+
790
+ @add_start_docstrings(
791
+ """
792
+ DeiT Model transformer with an image classification head on top (a linear layer on top of the final hidden state of
793
+ the [CLS] token) e.g. for ImageNet.
794
+ """,
795
+ DEIT_START_DOCSTRING,
796
+ )
797
+ class DeiTForImageClassification(DeiTPreTrainedModel):
798
+ def __init__(self, config: DeiTConfig) -> None:
799
+ super().__init__(config)
800
+
801
+ self.num_labels = config.num_labels
802
+ self.deit = DeiTModel(config, add_pooling_layer=False)
803
+
804
+ # Classifier head
805
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
806
+
807
+ # Initialize weights and apply final processing
808
+ self.post_init()
809
+
810
+ @add_start_docstrings_to_model_forward(DEIT_INPUTS_DOCSTRING)
811
+ @replace_return_docstrings(output_type=ImageClassifierOutput, config_class=_CONFIG_FOR_DOC)
812
+ def forward(
813
+ self,
814
+ pixel_values: Optional[torch.Tensor] = None,
815
+ head_mask: Optional[torch.Tensor] = None,
816
+ labels: Optional[torch.Tensor] = None,
817
+ output_attentions: Optional[bool] = None,
818
+ output_hidden_states: Optional[bool] = None,
819
+ return_dict: Optional[bool] = None,
820
+ interpolate_pos_encoding: bool = False,
821
+ ) -> Union[tuple, ImageClassifierOutput]:
822
+ r"""
823
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
824
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
825
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
826
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
827
+
828
+ Returns:
829
+
830
+ Examples:
831
+
832
+ ```python
833
+ >>> from transformers import AutoImageProcessor, DeiTForImageClassification
834
+ >>> import torch
835
+ >>> from PIL import Image
836
+ >>> import requests
837
+
838
+ >>> torch.manual_seed(3) # doctest: +IGNORE_RESULT
839
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
840
+ >>> image = Image.open(requests.get(url, stream=True).raw)
841
+
842
+ >>> # note: we are loading a DeiTForImageClassificationWithTeacher from the hub here,
843
+ >>> # so the head will be randomly initialized, hence the predictions will be random
844
+ >>> image_processor = AutoImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224")
845
+ >>> model = DeiTForImageClassification.from_pretrained("facebook/deit-base-distilled-patch16-224")
846
+
847
+ >>> inputs = image_processor(images=image, return_tensors="pt")
848
+ >>> outputs = model(**inputs)
849
+ >>> logits = outputs.logits
850
+ >>> # model predicts one of the 1000 ImageNet classes
851
+ >>> predicted_class_idx = logits.argmax(-1).item()
852
+ >>> print("Predicted class:", model.config.id2label[predicted_class_idx])
853
+ Predicted class: Polaroid camera, Polaroid Land camera
854
+ ```"""
855
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
856
+
857
+ outputs = self.deit(
858
+ pixel_values,
859
+ head_mask=head_mask,
860
+ output_attentions=output_attentions,
861
+ output_hidden_states=output_hidden_states,
862
+ return_dict=return_dict,
863
+ interpolate_pos_encoding=interpolate_pos_encoding,
864
+ )
865
+
866
+ sequence_output = outputs[0]
867
+
868
+ logits = self.classifier(sequence_output[:, 0, :])
869
+ # we don't use the distillation token
870
+
871
+ loss = None
872
+ if labels is not None:
873
+ labels = labels.to(logits.device)
874
+ if self.config.problem_type is None:
875
+ if self.num_labels == 1:
876
+ self.config.problem_type = "regression"
877
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
878
+ self.config.problem_type = "single_label_classification"
879
+ else:
880
+ self.config.problem_type = "multi_label_classification"
881
+
882
+ if self.config.problem_type == "regression":
883
+ loss_fct = MSELoss()
884
+ if self.num_labels == 1:
885
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
886
+ else:
887
+ loss = loss_fct(logits, labels)
888
+ elif self.config.problem_type == "single_label_classification":
889
+ loss_fct = CrossEntropyLoss()
890
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
891
+ elif self.config.problem_type == "multi_label_classification":
892
+ loss_fct = BCEWithLogitsLoss()
893
+ loss = loss_fct(logits, labels)
894
+ if not return_dict:
895
+ output = (logits,) + outputs[1:]
896
+ return ((loss,) + output) if loss is not None else output
897
+
898
+ return ImageClassifierOutput(
899
+ loss=loss,
900
+ logits=logits,
901
+ hidden_states=outputs.hidden_states,
902
+ attentions=outputs.attentions,
903
+ )
904
+
905
+
906
+ @dataclass
907
+ class DeiTForImageClassificationWithTeacherOutput(ModelOutput):
908
+ """
909
+ Output type of [`DeiTForImageClassificationWithTeacher`].
910
+
911
+ Args:
912
+ logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
913
+ Prediction scores as the average of the cls_logits and distillation logits.
914
+ cls_logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
915
+ Prediction scores of the classification head (i.e. the linear layer on top of the final hidden state of the
916
+ class token).
917
+ distillation_logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
918
+ Prediction scores of the distillation head (i.e. the linear layer on top of the final hidden state of the
919
+ distillation token).
920
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
921
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
922
+ shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
923
+ plus the initial embedding outputs.
924
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
925
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
926
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
927
+ the self-attention heads.
928
+ """
929
+
930
+ logits: torch.FloatTensor = None
931
+ cls_logits: torch.FloatTensor = None
932
+ distillation_logits: torch.FloatTensor = None
933
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
934
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
935
+
936
+
937
+ @add_start_docstrings(
938
+ """
939
+ DeiT Model transformer with image classification heads on top (a linear layer on top of the final hidden state of
940
+ the [CLS] token and a linear layer on top of the final hidden state of the distillation token) e.g. for ImageNet.
941
+
942
+ .. warning::
943
+
944
+ This model supports inference-only. Fine-tuning with distillation (i.e. with a teacher) is not yet
945
+ supported.
946
+ """,
947
+ DEIT_START_DOCSTRING,
948
+ )
949
+ class DeiTForImageClassificationWithTeacher(DeiTPreTrainedModel):
950
+ def __init__(self, config: DeiTConfig) -> None:
951
+ super().__init__(config)
952
+
953
+ self.num_labels = config.num_labels
954
+ self.deit = DeiTModel(config, add_pooling_layer=False)
955
+
956
+ # Classifier heads
957
+ self.cls_classifier = (
958
+ nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
959
+ )
960
+ self.distillation_classifier = (
961
+ nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
962
+ )
963
+
964
+ # Initialize weights and apply final processing
965
+ self.post_init()
966
+
967
+ @add_start_docstrings_to_model_forward(DEIT_INPUTS_DOCSTRING)
968
+ @add_code_sample_docstrings(
969
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
970
+ output_type=DeiTForImageClassificationWithTeacherOutput,
971
+ config_class=_CONFIG_FOR_DOC,
972
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
973
+ )
974
+ def forward(
975
+ self,
976
+ pixel_values: Optional[torch.Tensor] = None,
977
+ head_mask: Optional[torch.Tensor] = None,
978
+ output_attentions: Optional[bool] = None,
979
+ output_hidden_states: Optional[bool] = None,
980
+ return_dict: Optional[bool] = None,
981
+ interpolate_pos_encoding: bool = False,
982
+ ) -> Union[tuple, DeiTForImageClassificationWithTeacherOutput]:
983
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
984
+
985
+ outputs = self.deit(
986
+ pixel_values,
987
+ head_mask=head_mask,
988
+ output_attentions=output_attentions,
989
+ output_hidden_states=output_hidden_states,
990
+ return_dict=return_dict,
991
+ interpolate_pos_encoding=interpolate_pos_encoding,
992
+ )
993
+
994
+ sequence_output = outputs[0]
995
+
996
+ cls_logits = self.cls_classifier(sequence_output[:, 0, :])
997
+ distillation_logits = self.distillation_classifier(sequence_output[:, 1, :])
998
+
999
+ # during inference, return the average of both classifier predictions
1000
+ logits = (cls_logits + distillation_logits) / 2
1001
+
1002
+ if not return_dict:
1003
+ output = (logits, cls_logits, distillation_logits) + outputs[1:]
1004
+ return output
1005
+
1006
+ return DeiTForImageClassificationWithTeacherOutput(
1007
+ logits=logits,
1008
+ cls_logits=cls_logits,
1009
+ distillation_logits=distillation_logits,
1010
+ hidden_states=outputs.hidden_states,
1011
+ attentions=outputs.attentions,
1012
+ )
1013
+
1014
+
1015
+ __all__ = [
1016
+ "DeiTForImageClassification",
1017
+ "DeiTForImageClassificationWithTeacher",
1018
+ "DeiTForMaskedImageModeling",
1019
+ "DeiTModel",
1020
+ "DeiTPreTrainedModel",
1021
+ ]
janus/lib/python3.10/site-packages/transformers/models/dpr/__init__.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import _LazyModule
17
+ from ...utils.import_utils import define_import_structure
18
+
19
+
20
+ if TYPE_CHECKING:
21
+ from .configuration_dpr import *
22
+ from .modeling_dpr import *
23
+ from .modeling_tf_dpr import *
24
+ from .tokenization_dpr import *
25
+ from .tokenization_dpr_fast import *
26
+ else:
27
+ import sys
28
+
29
+ _file = globals()["__file__"]
30
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
janus/lib/python3.10/site-packages/transformers/models/longt5/__init__.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import _LazyModule
17
+ from ...utils.import_utils import define_import_structure
18
+
19
+
20
+ if TYPE_CHECKING:
21
+ from .configuration_longt5 import *
22
+ from .modeling_flax_longt5 import *
23
+ from .modeling_longt5 import *
24
+ else:
25
+ import sys
26
+
27
+ _file = globals()["__file__"]
28
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
janus/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (569 Bytes). View file
 
janus/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/configuration_longt5.cpython-310.pyc ADDED
Binary file (6.85 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/modeling_flax_longt5.cpython-310.pyc ADDED
Binary file (60.1 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/modeling_longt5.cpython-310.pyc ADDED
Binary file (61.8 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/longt5/configuration_longt5.py ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022, The LongT5 Authors and HuggingFace Inc.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """LongT5 model configuration"""
16
+
17
+ from typing import Mapping
18
+
19
+ from ...configuration_utils import PretrainedConfig
20
+ from ...onnx import OnnxSeq2SeqConfigWithPast
21
+ from ...utils import logging
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+
27
+ class LongT5Config(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`LongT5Model`] or a [`FlaxLongT5Model`]. It is
30
+ used to instantiate a LongT5 model according to the specified arguments, defining the model architecture.
31
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the LongT5
32
+ [google/long-t5-local-base](https://huggingface.co/google/long-t5-local-base) architecture.
33
+
34
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
35
+ documentation from [`PretrainedConfig`] for more information.
36
+
37
+ Arguments:
38
+ vocab_size (`int`, *optional*, defaults to 32128):
39
+ Vocabulary size of the LongT5 model. Defines the number of different tokens that can be represented by the
40
+ `inputs_ids` passed when calling [`LongT5Model`].
41
+ d_model (`int`, *optional*, defaults to 512):
42
+ Size of the encoder layers and the pooler layer.
43
+ d_kv (`int`, *optional*, defaults to 64):
44
+ Size of the key, query, value projections per attention head. `d_kv` has to be equal to `d_model //
45
+ num_heads`.
46
+ d_ff (`int`, *optional*, defaults to 2048):
47
+ Size of the intermediate feed forward layer in each `LongT5Block`.
48
+ num_layers (`int`, *optional*, defaults to 6):
49
+ Number of hidden layers in the Transformer encoder.
50
+ num_decoder_layers (`int`, *optional*):
51
+ Number of hidden layers in the Transformer decoder. Will use the same value as `num_layers` if not set.
52
+ num_heads (`int`, *optional*, defaults to 8):
53
+ Number of attention heads for each attention layer in the Transformer encoder.
54
+ local_radius (`int`, *optional*, defaults to 127)
55
+ Number of tokens to the left/right for each token to locally self-attend in a local attention mechanism.
56
+ global_block_size (`int`, *optional*, defaults to 16)
57
+ Lenght of blocks an input sequence is divided into for a global token representation. Used only for
58
+ `encoder_attention_type = "transient-global"`.
59
+ relative_attention_num_buckets (`int`, *optional*, defaults to 32):
60
+ The number of buckets to use for each attention layer.
61
+ relative_attention_max_distance (`int`, *optional*, defaults to 128):
62
+ The maximum distance of the longer sequences for the bucket separation.
63
+ dropout_rate (`float`, *optional*, defaults to 0.1):
64
+ The ratio for all dropout layers.
65
+ layer_norm_eps (`float`, *optional*, defaults to 1e-6):
66
+ The epsilon used by the layer normalization layers.
67
+ initializer_factor (`float`, *optional*, defaults to 1):
68
+ A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
69
+ testing).
70
+ feed_forward_proj (`string`, *optional*, defaults to `"relu"`):
71
+ Type of feed forward layer to be used. Should be one of `"relu"` or `"gated-gelu"`. LongT5v1.1 uses the
72
+ `"gated-gelu"` feed forward projection. Original LongT5 implementation uses `"gated-gelu"`.
73
+ encoder_attention_type (`string`, *optional*, defaults to `"local"`):
74
+ Type of encoder attention to be used. Should be one of `"local"` or `"transient-global"`, which are
75
+ supported by LongT5 implementation.
76
+ use_cache (`bool`, *optional*, defaults to `True`):
77
+ Whether or not the model should return the last key/values attentions (not used by all models).
78
+ """
79
+
80
+ model_type = "longt5"
81
+ keys_to_ignore_at_inference = ["past_key_values"]
82
+ attribute_map = {
83
+ "hidden_size": "d_model",
84
+ "num_attention_heads": "num_heads",
85
+ "num_hidden_layers": "num_layers",
86
+ "head_dim": "d_kv",
87
+ }
88
+
89
+ def __init__(
90
+ self,
91
+ vocab_size=32128,
92
+ d_model=512,
93
+ d_kv=64,
94
+ d_ff=2048,
95
+ num_layers=6,
96
+ num_decoder_layers=None,
97
+ num_heads=8,
98
+ local_radius=127,
99
+ global_block_size=16,
100
+ relative_attention_num_buckets=32,
101
+ relative_attention_max_distance=128,
102
+ dropout_rate=0.1,
103
+ layer_norm_epsilon=1e-6,
104
+ initializer_factor=1.0,
105
+ feed_forward_proj="relu",
106
+ is_encoder_decoder=True,
107
+ encoder_attention_type="local",
108
+ use_cache=True,
109
+ pad_token_id=0,
110
+ eos_token_id=1,
111
+ **kwargs,
112
+ ):
113
+ self.vocab_size = vocab_size
114
+ self.d_model = d_model
115
+ self.d_kv = d_kv
116
+ self.d_ff = d_ff
117
+ self.num_layers = num_layers
118
+ # default = symmetry
119
+ self.num_decoder_layers = num_decoder_layers if num_decoder_layers is not None else self.num_layers
120
+ self.num_heads = num_heads
121
+ self.local_radius = local_radius
122
+ self.global_block_size = global_block_size
123
+ self.relative_attention_num_buckets = relative_attention_num_buckets
124
+ self.relative_attention_max_distance = relative_attention_max_distance
125
+ self.dropout_rate = dropout_rate
126
+ self.layer_norm_epsilon = layer_norm_epsilon
127
+ self.initializer_factor = initializer_factor
128
+ self.feed_forward_proj = feed_forward_proj
129
+ self.encoder_attention_type = encoder_attention_type
130
+ self.use_cache = use_cache
131
+
132
+ act_info = self.feed_forward_proj.split("-")
133
+ self.dense_act_fn = act_info[-1]
134
+ self.is_gated_act = act_info[0] == "gated"
135
+
136
+ if len(act_info) > 1 and act_info[0] != "gated" or len(act_info) > 2:
137
+ raise ValueError(
138
+ f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer. "
139
+ "Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
140
+ "'gated-gelu' or 'relu'"
141
+ )
142
+
143
+ # for backwards compatibility
144
+ if feed_forward_proj == "gated-gelu":
145
+ self.dense_act_fn = "gelu_new"
146
+
147
+ super().__init__(
148
+ pad_token_id=pad_token_id,
149
+ eos_token_id=eos_token_id,
150
+ is_encoder_decoder=is_encoder_decoder,
151
+ **kwargs,
152
+ )
153
+
154
+
155
+ class LongT5OnnxConfig(OnnxSeq2SeqConfigWithPast):
156
+ @property
157
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
158
+ common_inputs = {
159
+ "input_ids": {0: "batch", 1: "encoder_sequence"},
160
+ "attention_mask": {0: "batch", 1: "encoder_sequence"},
161
+ }
162
+ if self.use_past:
163
+ common_inputs["attention_mask"][1] = "past_encoder_sequence + sequence"
164
+ common_inputs["decoder_input_ids"] = {0: "batch"}
165
+ common_inputs["decoder_attention_mask"] = {0: "batch", 1: "past_decoder_sequence + sequence"}
166
+ else:
167
+ common_inputs["decoder_input_ids"] = {0: "batch", 1: "decoder_sequence"}
168
+ common_inputs["decoder_attention_mask"] = {0: "batch", 1: "decoder_sequence"}
169
+
170
+ if self.use_past:
171
+ self.fill_with_past_key_values_(common_inputs, direction="inputs")
172
+
173
+ return common_inputs
174
+
175
+ @property
176
+ def default_onnx_opset(self) -> int:
177
+ return 13
178
+
179
+
180
+ __all__ = ["LongT5Config", "LongT5OnnxConfig"]
janus/lib/python3.10/site-packages/transformers/models/longt5/modeling_flax_longt5.py ADDED
The diff for this file is too large to render. See raw diff
 
janus/lib/python3.10/site-packages/transformers/models/longt5/modeling_longt5.py ADDED
The diff for this file is too large to render. See raw diff
 
janus/lib/python3.10/site-packages/transformers/models/megatron_gpt2/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (184 Bytes). View file
 
janus/lib/python3.10/site-packages/transformers/models/megatron_gpt2/checkpoint_reshaping_and_interoperability.py ADDED
@@ -0,0 +1,922 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import argparse
16
+ import importlib.util
17
+ import json
18
+ import os
19
+ import re
20
+ import sys
21
+ import types
22
+
23
+ import torch
24
+ from huggingface_hub import split_torch_state_dict_into_shards
25
+ from packaging import version
26
+
27
+ from transformers import AutoTokenizer, GPT2Config
28
+ from transformers.modeling_utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
29
+
30
+
31
+ def add_checkpointing_args(parser):
32
+ parser.add_argument("--megatron-path", type=str, default=None, help="Base directory of Megatron repository")
33
+ parser.add_argument(
34
+ "--convert_checkpoint_from_megatron_to_transformers",
35
+ action="store_true",
36
+ help=(
37
+ "If True, convert a Megatron checkpoint to a Transformers checkpoint. "
38
+ "If False, convert a Transformers checkpoint to a Megatron checkpoint."
39
+ ),
40
+ )
41
+ parser.add_argument(
42
+ "--load_path",
43
+ type=str,
44
+ required=True,
45
+ help="Path to the checkpoint to convert.",
46
+ )
47
+ parser.add_argument(
48
+ "--save_path",
49
+ type=str,
50
+ required=True,
51
+ help="Path to the converted checkpoint.",
52
+ )
53
+ parser.add_argument("--print-checkpoint-structure", action="store_true")
54
+ return parser
55
+
56
+
57
+ def add_megatron_checkpoint_args(parser):
58
+ parser.add_argument(
59
+ "--target_tensor_model_parallel_size",
60
+ type=int,
61
+ default=1,
62
+ help=(
63
+ "The tensor model parallel size of the converted checkpoint. "
64
+ "Only used when converting a Transformers checkpoint to a Megatron checkpoint."
65
+ ),
66
+ )
67
+ parser.add_argument(
68
+ "--target_pipeline_model_parallel_size",
69
+ type=int,
70
+ default=1,
71
+ help=(
72
+ "The pipeline model parallel size of the converted checkpoint. "
73
+ "Only used when converting a Transformers checkpoint to a Megatron checkpoint."
74
+ ),
75
+ )
76
+ parser.add_argument(
77
+ "--target_data_parallel_size",
78
+ type=int,
79
+ default=1,
80
+ help=(
81
+ "The data parallel size of the converted checkpoint. "
82
+ "Only used when converting a Transformers checkpoint to a Megatron checkpoint."
83
+ ),
84
+ )
85
+ parser.add_argument(
86
+ "--target_params_dtype",
87
+ type=str,
88
+ default="fp32",
89
+ help=(
90
+ "The dtype of the converted checkpoint. "
91
+ "Only used when converting a Transformers checkpoint to a Megatron checkpoint."
92
+ ),
93
+ )
94
+ parser.add_argument(
95
+ "--make_vocab_size_divisible_by",
96
+ type=int,
97
+ default=128,
98
+ help=(
99
+ "Pad the vocab size to be divisible by this value. "
100
+ "This is added for computational efficieny reasons. "
101
+ "Only used when converting a Transformers checkpoint to a Megatron checkpoint."
102
+ ),
103
+ )
104
+ parser.add_argument(
105
+ "--use_distributed_optimizer",
106
+ action="store_true",
107
+ help=(
108
+ "If True, use the distributed optimizer. "
109
+ "Only used when converting a Transformers checkpoint to a Megatron checkpoint."
110
+ ),
111
+ )
112
+ return parser
113
+
114
+
115
+ def add_transformers_checkpoint_args(parser):
116
+ parser.add_argument(
117
+ "--tokenizer_name",
118
+ type=str,
119
+ default=None,
120
+ help=(
121
+ "The name of the pre-trained tokenizer to save. "
122
+ "If not None, the tokenizer will be saved. "
123
+ "Only used when converting a Megatron checkpoint to a Transformers checkpoint."
124
+ ),
125
+ )
126
+ parser.add_argument(
127
+ "--max_shard_size",
128
+ type=str,
129
+ default="10GB",
130
+ help=(
131
+ "The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size "
132
+ "lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`). "
133
+ "Only used when converting a Megatron checkpoint to a Transformers checkpoint."
134
+ ),
135
+ )
136
+
137
+ return parser
138
+
139
+
140
+ # The simple map of names for "automated" rules.
141
+ megatron_to_transformers = {
142
+ "attention.dense": ".attn.c_proj.",
143
+ "self_attention.dense": ".attn.c_proj.",
144
+ "mlp.dense_h_to_4h": ".mlp.c_fc.",
145
+ "mlp.dense_4h_to_h": ".mlp.c_proj.",
146
+ }
147
+ transformers_to_megatron = {v[1:-1]: k for k, v in megatron_to_transformers.items()}
148
+
149
+ tensor_parallel_params = [
150
+ # megatron-lm layers to merge across tp ranks
151
+ "self_attention.query_key_value.weight",
152
+ "self_attention.query_key_value.bias",
153
+ "self_attention.dense.weight",
154
+ "mlp.dense_h_to_4h.weight",
155
+ "mlp.dense_h_to_4h.bias",
156
+ "mlp.dense_4h_to_h.weight",
157
+ # deprecated
158
+ "attention.query_key_value.weight",
159
+ "attention.query_key_value.bias",
160
+ "attention.dense.weight",
161
+ # transformers layers to split across tp ranks
162
+ "attn.c_attn.weight",
163
+ "attn.c_attn.bias",
164
+ "attn.c_proj.weight",
165
+ "mlp.c_fc.weight",
166
+ "mlp.c_fc.bias",
167
+ "mlp.c_proj.weight",
168
+ ]
169
+
170
+
171
+ def recursive_print(name, val, spaces=0):
172
+ """
173
+ Recursively print the structure of a checkpoint. This function is taken from `convert_megatron_gpt2_checkpoint.py`
174
+
175
+ Args:
176
+ name (str): the name of the current tensor parameter
177
+ val (Tuple(int)): the shape of the current tensor parameter
178
+ spaces (int): the number of spaces to print before the output for a nested structure
179
+ """
180
+ # Format the message.
181
+ if name is None:
182
+ msg = None
183
+ else:
184
+ fmt = "." * max(0, spaces - 2) + "# {:" + str(50 - spaces) + "s}"
185
+ msg = fmt.format(name)
186
+
187
+ # Print and recurse (if needed).
188
+ if isinstance(val, dict):
189
+ if msg is not None:
190
+ print(msg)
191
+ for k in val.keys():
192
+ recursive_print(k, val[k], spaces + 2)
193
+ elif isinstance(val, torch.Tensor):
194
+ print(msg, ":", val.size())
195
+ else:
196
+ print(msg, ":", val)
197
+
198
+
199
+ def megatron_to_transformers_fix_query_key_value_ordering(
200
+ param, checkpoint_version, num_splits, num_heads, hidden_size
201
+ ):
202
+ """
203
+ Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :] for compatibility with later versions
204
+ of NVIDIA Megatron-LM. The inverse operation is performed inside Megatron-LM to read checkpoints:
205
+ https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209 If param is the weight tensor of the
206
+ self-attention block, the returned tensor will have to be transposed one more time to be read by HuggingFace GPT2.
207
+ This function is taken from `convert_megatron_gpt2_checkpoint.py`
208
+
209
+ Args:
210
+ param (torch.Tensor): the tensor to permute
211
+ checkpoint_version (int): the version of the checkpoint.
212
+ num_splits (int): the number of projections, usually 3 for (Query, Key, Value)
213
+ num_heads (int): the number of attention heads
214
+ hidden_size (int): the hidden size per head
215
+ """
216
+
217
+ input_shape = param.size()
218
+ if checkpoint_version == 1.0:
219
+ # version 1.0 stores [num_heads * hidden_size * num_splits, :]
220
+ saved_shape = (num_heads, hidden_size, num_splits) + input_shape[1:]
221
+ param = param.view(*saved_shape)
222
+ param = param.transpose(0, 2)
223
+ param = param.transpose(1, 2).contiguous()
224
+ elif checkpoint_version >= 2.0:
225
+ # other versions store [num_heads * num_splits * hidden_size, :]
226
+ saved_shape = (num_heads, num_splits, hidden_size) + input_shape[1:]
227
+ param = param.view(*saved_shape)
228
+ param = param.transpose(0, 1).contiguous()
229
+ param = param.view(*input_shape)
230
+ return param
231
+
232
+
233
+ def transformers_to_megatron_fix_query_key_value_ordering(
234
+ param, checkpoint_version, num_splits, num_heads, hidden_size
235
+ ):
236
+ """
237
+ Permutes layout of param tensor to the one compatible with respective NVIDIA Megatron-LM chekpoint versions. Input
238
+ is [num_splits * num_heads * hidden_size, :] and output is [num_heads * hidden_size * num_splits, :] for version
239
+ 1.0 and [num_heads * num_splits * hidden_size, :] for version 2.0 and later. If param is the weight tensor of the
240
+ self-attention block, the param needs to be already transposed before calling this function.
241
+
242
+ Args:
243
+ param (torch.Tensor): the tensor to permute
244
+ checkpoint_version (int): the version of the checkpoint.
245
+ num_splits (int): the number of projections, usually 3 for (Query, Key, Value)
246
+ num_heads (int): the number of attention heads
247
+ hidden_size (int): the hidden size per head
248
+ """
249
+
250
+ # Input is [num_splits * num_heads * hidden_size, :]
251
+ input_shape = param.size()
252
+ if checkpoint_version == 1.0:
253
+ # version 1.0 stores [num_heads * hidden_size * num_splits, :]
254
+ current_shape = (num_splits, num_heads, hidden_size) + input_shape[1:]
255
+ param = param.view(*current_shape)
256
+ param = param.transpose(0, 2)
257
+ param = param.transpose(1, 2).contiguous()
258
+ elif checkpoint_version >= 2.0:
259
+ # other versions store [num_heads * num_splits * hidden_size, :]
260
+ current_shape = (num_splits, num_heads, hidden_size) + input_shape[1:]
261
+ param = param.view(*current_shape)
262
+ param = param.transpose(0, 1).contiguous()
263
+ param = param.view(*input_shape)
264
+ return param
265
+
266
+
267
+ def merge_transformers_sharded_states(path, num_checkpoints):
268
+ """
269
+ Merge sharded checkpoints from transformers into a single checkpoint.
270
+
271
+ Args:
272
+ path (str): the path to the sharded checkpoints
273
+ num_checkpoints (int): the number of checkpoints to merge
274
+ """
275
+ state_dict = {}
276
+ for i in range(1, num_checkpoints + 1):
277
+ checkpoint_path = os.path.join(path, f"pytorch_model-{i:05d}-of-{num_checkpoints:05d}.bin")
278
+ current_chunk = torch.load(checkpoint_path, map_location="cpu")
279
+ state_dict.update(current_chunk)
280
+ return state_dict
281
+
282
+
283
+ def get_megatron_sharded_states(args, tp_size, pp_size, pp_rank):
284
+ """
285
+ Get sharded checkpoints from NVIDIA Megatron-LM checkpoint based on the provided tensor parallel size, pipeline
286
+ parallel size and pipeline parallel rank.
287
+
288
+ Args:
289
+ args (argparse.Namespace): the arguments to the script
290
+ tp_size (int): the tensor parallel size
291
+ pp_size (int): the pipeline parallel size
292
+ pp_rank (int): the pipeline parallel rank
293
+ """
294
+ tp_state_dicts = []
295
+ for i in range(tp_size):
296
+ sub_dir_name = f"mp_rank_{i:02d}" if pp_size == 1 else f"mp_rank_{i:02d}_{pp_rank:03d}"
297
+ for checkpoint_name in ["model_optim_rng.pt", "model_rng.pt"]:
298
+ checkpoint_path = os.path.join(args.load_path, sub_dir_name, checkpoint_name)
299
+ if os.path.isfile(checkpoint_path):
300
+ break
301
+ state_dict = torch.load(checkpoint_path, map_location="cpu")
302
+ tp_state_dicts.append(state_dict)
303
+ return tp_state_dicts
304
+
305
+
306
+ def get_element_from_dict_by_path(d, path):
307
+ """
308
+ Get element from dictionary by path. If element is not present, recursively add empty dictionaries.
309
+
310
+ Args:
311
+ d (dict): the dictionary to get the element from
312
+ path (list): the path to the element which is delimited by "."
313
+ """
314
+ path = path.split(".")
315
+ for k in path:
316
+ if k not in d:
317
+ d[k] = {}
318
+ d = d[k]
319
+ return d
320
+
321
+
322
+ def convert_checkpoint_from_megatron_to_transformers(args):
323
+ """
324
+ Convert NVIDIA Megatron-LM checkpoint to HuggingFace Transformers checkpoint. This handles Megatron checkpoints
325
+ with different tensor parallelism and pipeline parallelism sizes. It saves the converted checkpoint into shards
326
+ using HuggingFace Transformers checkpoint sharding functionality. This greatly extends the functionality of
327
+ `convert_megatron_gpt2_checkpoint.py`
328
+
329
+ Args:
330
+ args (argparse.Namespace): the arguments to the script
331
+ """
332
+ # Load Megatron-LM checkpoint arguments from the state dict
333
+ sub_dirs = os.listdir(args.load_path)
334
+ possible_sub_dirs = ["mp_rank_00", "mp_rank_00_000"]
335
+ for sub_dir in possible_sub_dirs:
336
+ if sub_dir in sub_dirs:
337
+ rank0_checkpoint_name = os.listdir(os.path.join(args.load_path, sub_dir))[0]
338
+ rank0_checkpoint_path = os.path.join(args.load_path, sub_dir, rank0_checkpoint_name)
339
+ break
340
+ print(f"Loading Megatron-LM checkpoint arguments from: {rank0_checkpoint_path}")
341
+ state_dict = torch.load(rank0_checkpoint_path, map_location="cpu")
342
+ megatron_args = state_dict.get("args", None)
343
+ if megatron_args is None:
344
+ raise ValueError(
345
+ "Megatron-LM checkpoint does not contain arguments. This utility only supports Megatron-LM checkpoints"
346
+ " containing all the megatron arguments. This is because it loads all config related to model"
347
+ " architecture, the tensor and pipeline model parallel size from the checkpoint insead of user having to"
348
+ " manually specify all the details. Please save Megatron-LM checkpoint along with all the megatron"
349
+ " arguments to use this utility."
350
+ )
351
+
352
+ # Create Transformers GPT2 config from Megatron-LM arguments
353
+ if megatron_args is not None:
354
+ if megatron_args.bias_gelu_fusion:
355
+ activation_function = "gelu_fast"
356
+ elif megatron_args.openai_gelu:
357
+ activation_function = "gelu_new"
358
+ else:
359
+ activation_function = "gelu"
360
+ else:
361
+ # in the very early days this used to be "gelu_new"
362
+ activation_function = "gelu_new"
363
+ vocab_size = (
364
+ megatron_args.padded_vocab_size
365
+ if getattr(megatron_args, "orig_vocab_size", None) is None
366
+ else megatron_args.orig_vocab_size
367
+ )
368
+ print(vocab_size)
369
+
370
+ config = GPT2Config(
371
+ vocab_size=vocab_size,
372
+ n_positions=megatron_args.max_position_embeddings,
373
+ n_embd=megatron_args.hidden_size,
374
+ n_layer=megatron_args.num_layers,
375
+ n_head=megatron_args.num_attention_heads,
376
+ n_inner=megatron_args.ffn_hidden_size,
377
+ activation_function=activation_function,
378
+ resid_pdrop=0.1,
379
+ embd_pdrop=0.1,
380
+ attn_pdrop=0.1,
381
+ layer_norm_epsilon=1e-5,
382
+ initializer_range=0.02,
383
+ summary_type="cls_index",
384
+ summary_use_proj=True,
385
+ summary_activation=None,
386
+ summary_proj_to_labels=True,
387
+ summary_first_dropout=0.1,
388
+ scale_attn_weights=True,
389
+ use_cache=True,
390
+ bos_token_id=vocab_size - 1,
391
+ eos_token_id=vocab_size - 1,
392
+ architectures=["GPT2LMHeadModel"],
393
+ )
394
+
395
+ output_state_dict = {}
396
+
397
+ checkpoint_version = state_dict.get("checkpoint_version", 0.0)
398
+ tp_size = megatron_args.tensor_model_parallel_size
399
+ pp_size = megatron_args.pipeline_model_parallel_size
400
+ dtype = torch.float32
401
+ # The regex to extract layer names.
402
+ layer_re = re.compile(r"layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)")
403
+
404
+ # Convert.
405
+ print("Converting")
406
+
407
+ # Embeddings
408
+ print("Converting embeddings")
409
+ tp_state_dicts = get_megatron_sharded_states(args, tp_size, pp_size, 0)
410
+
411
+ # Convert and store the position embeddings.
412
+ position_embeddings = get_element_from_dict_by_path(
413
+ tp_state_dicts[0], "model.language_model.embedding.position_embeddings.weight"
414
+ )
415
+ output_state_dict["transformer.wpe.weight"] = position_embeddings.to(dtype)
416
+
417
+ # Convert and store the word embeddings.
418
+ word_embeddings = torch.cat(
419
+ [
420
+ get_element_from_dict_by_path(
421
+ tp_state_dicts[tp_rank], "model.language_model.embedding.word_embeddings.weight"
422
+ )
423
+ for tp_rank in range(tp_size)
424
+ ],
425
+ dim=0,
426
+ )
427
+ word_embeddings = word_embeddings[:vocab_size].to(dtype)
428
+ output_state_dict["transformer.wte.weight"] = word_embeddings
429
+
430
+ # Transformer Layers
431
+ print("Converting transformer layers")
432
+ # The number of heads.
433
+ heads = config.n_head
434
+ # The hidden_size per head.
435
+ hidden_size_per_head = config.n_embd // config.n_head
436
+ n_positions = config.n_positions
437
+ num_layers = config.num_hidden_layers // pp_size
438
+
439
+ for pp_rank in range(pp_size):
440
+ if pp_size > 0:
441
+ print(f"Converting pipeline parallel rank {pp_rank}")
442
+ tp_state_dicts = get_megatron_sharded_states(args, tp_size, pp_size, pp_rank)
443
+
444
+ # The transformer.
445
+ path = (
446
+ "model.language_model.transformer"
447
+ if "transformer" in get_element_from_dict_by_path(tp_state_dicts[0], "model.language_model").keys()
448
+ else "model.language_model.encoder"
449
+ )
450
+ # Extract the layers.
451
+ for key, val in get_element_from_dict_by_path(tp_state_dicts[0], path).items():
452
+ # Match the name.
453
+ m = layer_re.match(key)
454
+ # Stop if that's not a layer
455
+ if m is None:
456
+ break
457
+
458
+ # The index of the layer.
459
+ layer_idx = int(m.group(1)) + pp_rank * num_layers
460
+ # The name of the operation.
461
+ op_name = m.group(2)
462
+ # Is it a weight or a bias?
463
+ weight_or_bias = m.group(3)
464
+
465
+ # The name of the layer.
466
+ layer_name = f"transformer.h.{layer_idx}"
467
+
468
+ if op_name + "." + weight_or_bias not in tensor_parallel_params:
469
+ params = val.to(dtype)
470
+ else:
471
+ dim = 1 if op_name in ["self_attention.dense", "mlp.dense_4h_to_h", "attention.dense"] else 0
472
+ params = torch.cat(
473
+ [val]
474
+ + [
475
+ get_element_from_dict_by_path(tp_state_dicts[tp_rank], f"{path}")[key]
476
+ for tp_rank in range(1, tp_size)
477
+ ],
478
+ dim=dim,
479
+ ).to(dtype)
480
+
481
+ # For layernorm(s), simply store the layer norm.
482
+ if op_name.endswith("layernorm"):
483
+ ln_name = "ln_1" if op_name.startswith("input") else "ln_2"
484
+ output_state_dict[layer_name + "." + ln_name + "." + weight_or_bias] = params
485
+
486
+ # Transpose the QKV matrix.
487
+ elif (
488
+ op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
489
+ ) and weight_or_bias == "weight":
490
+ # Insert a tensor of 1x1xDxD bias.
491
+ causal_mask = torch.tril(torch.ones((n_positions, n_positions), dtype=dtype)).view(
492
+ 1, 1, n_positions, n_positions
493
+ )
494
+ output_state_dict[layer_name + ".attn.bias"] = causal_mask
495
+
496
+ # Insert a "dummy" tensor for masked_bias.
497
+ masked_bias = torch.tensor(-1e4, dtype=dtype)
498
+ output_state_dict[layer_name + ".attn.masked_bias"] = masked_bias
499
+
500
+ out_val = megatron_to_transformers_fix_query_key_value_ordering(
501
+ params,
502
+ checkpoint_version,
503
+ 3,
504
+ heads,
505
+ hidden_size_per_head,
506
+ )
507
+ # Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
508
+ out_val = out_val.transpose(0, 1).contiguous()
509
+ # Store.
510
+ output_state_dict[layer_name + ".attn.c_attn.weight"] = out_val
511
+
512
+ # Transpose the bias.
513
+ elif (
514
+ op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
515
+ ) and weight_or_bias == "bias":
516
+ out_val = megatron_to_transformers_fix_query_key_value_ordering(
517
+ params, checkpoint_version, 3, heads, hidden_size_per_head
518
+ )
519
+ # Store. No change of shape.
520
+ output_state_dict[layer_name + ".attn.c_attn.bias"] = out_val
521
+
522
+ # Transpose the weights.
523
+ elif weight_or_bias == "weight":
524
+ out_name = megatron_to_transformers[op_name]
525
+ output_state_dict[layer_name + out_name + "weight"] = params.transpose(0, 1)
526
+
527
+ # Copy the bias.
528
+ elif weight_or_bias == "bias":
529
+ out_name = megatron_to_transformers[op_name]
530
+ output_state_dict[layer_name + out_name + "bias"] = params
531
+
532
+ if config.n_layer != (layer_idx + 1):
533
+ raise ValueError(f"Expected {config.n_layer} layers but found {layer_idx + 1}")
534
+
535
+ # The final layernorm.
536
+ print("Converting final layernorm")
537
+ params = get_element_from_dict_by_path(tp_state_dicts[0], str(path))
538
+ output_state_dict["transformer.ln_f.weight"] = params["final_layernorm.weight"].to(dtype)
539
+ output_state_dict["transformer.ln_f.bias"] = params["final_layernorm.bias"].to(dtype)
540
+
541
+ # For LM head, transformers' wants the matrix to weight embeddings.
542
+ print("Converting LM head")
543
+ output_state_dict["lm_head.weight"] = word_embeddings.to(dtype)
544
+
545
+ # It should be done!
546
+ print("Conversion from Megatron-LM to Transformers is done!")
547
+
548
+ # Print the structure of converted state dict.
549
+ if args.print_checkpoint_structure:
550
+ recursive_print(None, output_state_dict)
551
+
552
+ # Add tokenizer class info to config
553
+ # see https://github.com/huggingface/transformers/issues/13906)
554
+
555
+ if args.tokenizer_name is None:
556
+ tokenizer_name = "openai-community/gpt2"
557
+ else:
558
+ tokenizer_name = args.tokenizer_name
559
+
560
+ tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
561
+ tokenizer_class = type(tokenizer).__name__
562
+ config.tokenizer_class = tokenizer_class
563
+
564
+ # Store the config to file.
565
+ print("Saving config")
566
+ config.save_pretrained(args.save_path)
567
+
568
+ # Save tokenizer based on args
569
+ if args.tokenizer_name is not None:
570
+ print(f"Adding {tokenizer_class} tokenizer files")
571
+ tokenizer.save_pretrained(args.save_path)
572
+
573
+ # Store the state_dict to file.
574
+ max_shard_size = int(args.max_shard_size) if args.max_shard_size.isdigit() else args.max_shard_size
575
+ state_dict_split = split_torch_state_dict_into_shards(output_state_dict, max_shard_size=max_shard_size)
576
+ shards = index = None
577
+ for tensors in state_dict_split.filename_to_tensors.values():
578
+ shards = {tensor: state_dict[tensor] for tensor in tensors}
579
+ if state_dict_split.is_sharded:
580
+ index = {
581
+ "metadata": state_dict_split.metadata,
582
+ "weight_map": state_dict_split.tensor_to_filename,
583
+ }
584
+
585
+ # Save the model
586
+ for shard_file, shard in shards.items():
587
+ torch.save(shard, os.path.join(args.save_path, shard_file))
588
+
589
+ if index is None:
590
+ print(f"Model weights saved in {os.path.join(args.save_path, WEIGHTS_NAME)}")
591
+ else:
592
+ save_index_file = os.path.join(args.save_path, WEIGHTS_INDEX_NAME)
593
+ # Save the index as well
594
+ with open(save_index_file, "w", encoding="utf-8") as f:
595
+ content = json.dumps(index, indent=2, sort_keys=True) + "\n"
596
+ f.write(content)
597
+ print(
598
+ f"The model is bigger than the maximum size per checkpoint ({args.max_shard_size}) and is going to be "
599
+ f"split in {len(shards)} checkpoint shards. You can find where each parameters has been saved in the "
600
+ f"index located at {save_index_file}."
601
+ )
602
+
603
+
604
+ def convert_checkpoint_from_transformers_to_megatron(args):
605
+ """
606
+ Convert a checkpoint from HuggingFace Transformers to Megatron-LM. This allows converted checkpoints with variable
607
+ tensor parallelism and pipeline parallelism sizes. It takes as input a checkpoint from HuggingFace Transformers
608
+ which can have multiple shards.
609
+
610
+ Args:
611
+ args (argparse.Namespace): the arguments to the script
612
+
613
+ """
614
+ os.makedirs(args.save_path, exist_ok=True)
615
+ # Search in directory above this
616
+ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
617
+ if args.megatron_path is not None:
618
+ sys.path.insert(0, args.megatron_path)
619
+
620
+ megatron_exists = importlib.util.find_spec("megatron") is not None
621
+ if megatron_exists:
622
+ from megatron.core import package_info
623
+
624
+ if version.parse(package_info.__version__) >= version.parse("0.6.0"):
625
+ from megatron.training.tokenizer.tokenizer import _vocab_size_with_padding
626
+ else:
627
+ from megatron.tokenizer.tokenizer import _vocab_size_with_padding
628
+
629
+ else:
630
+ print("Unable to import Megatron, please specify the path to Megatron using --megatron-path. Exiting.")
631
+ exit(1)
632
+
633
+ # load the transformers model state dict and config
634
+ sub_dirs = [x for x in os.listdir(args.load_path) if x.startswith("pytorch_model")]
635
+ if len(sub_dirs) == 1:
636
+ checkpoint_name = "pytorch_model.bin"
637
+ state_dict = torch.load(os.path.join(args.load_path, checkpoint_name), map_location="cpu")
638
+ else:
639
+ num_checkpoints = len(sub_dirs) - 1
640
+ state_dict = merge_transformers_sharded_states(args.load_path, num_checkpoints)
641
+
642
+ config = GPT2Config.from_pretrained(args.load_path)
643
+
644
+ # Saving the tracker file
645
+ tracker_filepath = os.path.join(args.save_path, "latest_checkpointed_iteration.txt")
646
+ with open(tracker_filepath, "w") as f:
647
+ f.write("release")
648
+
649
+ # create `release` dir in args.load_path
650
+ release_dir = os.path.join(args.save_path, "release")
651
+ os.makedirs(release_dir, exist_ok=True)
652
+
653
+ # megatron args
654
+ megatron_args = {
655
+ "orig_vocab_size": config.vocab_size,
656
+ "max_position_embeddings": config.n_positions,
657
+ "hidden_size": config.n_embd,
658
+ "num_layers": config.n_layer,
659
+ "num_attention_heads": config.n_head,
660
+ "ffn_hidden_size": config.n_inner,
661
+ "tensor_model_parallel_size": args.target_tensor_model_parallel_size,
662
+ "pipeline_model_parallel_size": args.target_pipeline_model_parallel_size,
663
+ "data_parallel_size": args.target_data_parallel_size,
664
+ "make_vocab_size_divisible_by": args.make_vocab_size_divisible_by,
665
+ "rank": 0,
666
+ "tokenizer_type": "GPT2BPETokenizer",
667
+ }
668
+
669
+ if config.activation_function == "gelu":
670
+ megatron_args["bias_gelu_fusion"] = False
671
+ megatron_args["openai_gelu"] = False
672
+ elif config.activation_function == "gelu_fast":
673
+ megatron_args["bias_gelu_fusion"] = True
674
+ megatron_args["openai_gelu"] = False
675
+ elif config.activation_function == "gelu_new":
676
+ megatron_args["bias_gelu_fusion"] = False
677
+ megatron_args["openai_gelu"] = True
678
+
679
+ margs = types.SimpleNamespace()
680
+ for k, v in megatron_args.items():
681
+ setattr(margs, k, v)
682
+
683
+ # params dtype
684
+ if args.target_params_dtype == "fp16":
685
+ dtype = torch.float16
686
+ elif args.target_params_dtype == "bf16":
687
+ dtype = torch.bfloat16
688
+ else:
689
+ dtype = torch.float32
690
+ setattr(margs, "params_dtype", dtype)
691
+
692
+ # save dummy optim state dict
693
+ dummy_optim_state_dict = {}
694
+ dummy_optim_state_dict["optimizer"] = {
695
+ "step": 0,
696
+ "param_groups": [
697
+ {
698
+ "lr": 0.0,
699
+ "beta1": 0.0,
700
+ "beta2": 0.0,
701
+ "eps": 0.0,
702
+ "weight_decay": 0.0,
703
+ "correct_bias": False,
704
+ "params": [],
705
+ }
706
+ ],
707
+ }
708
+ if args.use_distributed_optimizer:
709
+ for i in range(args.target_pipeline_model_parallel_size):
710
+ for j in range(args.target_tensor_model_parallel_size):
711
+ for k in range(args.target_data_parallel_size):
712
+ if args.target_pipeline_model_parallel_size == 1:
713
+ checkpoint_dir = f"mp_rank_{j:02d}_{k:03d}"
714
+ else:
715
+ checkpoint_dir = f"mp_rank_{j:02d}_{i:03d}_{k:03d}"
716
+ checkpoint_dir = os.path.join(release_dir, checkpoint_dir)
717
+ os.makedirs(checkpoint_dir, exist_ok=True)
718
+ torch.save(
719
+ dummy_optim_state_dict,
720
+ os.path.join(checkpoint_dir, "optim.pt"),
721
+ )
722
+
723
+ # Convert.
724
+ print("Converting")
725
+ output_state_dict = []
726
+ for i in range(args.target_tensor_model_parallel_size):
727
+ output_state_dict.append({})
728
+
729
+ # Embedding layer
730
+ print("converting embedding layer")
731
+ pos_embedding = state_dict["transformer.wpe.weight"].to(dtype)
732
+ word_embedding = state_dict["transformer.wte.weight"].to(dtype)
733
+ orig_vocab_size = config.vocab_size
734
+ padded_vocab_size = _vocab_size_with_padding(orig_vocab_size, margs)
735
+ setattr(margs, "padded_vocab_size", padded_vocab_size)
736
+ # Cut out extra padding we don't need
737
+ if orig_vocab_size > padded_vocab_size:
738
+ full_word_embed = word_embedding[0:padded_vocab_size, :]
739
+ # Expanding embedding to larger size by replicating final entry
740
+ elif orig_vocab_size < padded_vocab_size:
741
+ padding_size = padded_vocab_size - orig_vocab_size
742
+ full_word_embed = torch.cat((word_embedding, word_embedding[-1].unsqueeze(0).expand(padding_size, -1)))
743
+ # Same size!
744
+ else:
745
+ full_word_embed = word_embedding
746
+
747
+ # Split into new tensor model parallel sizes
748
+ out_word_embed = torch.chunk(full_word_embed, args.target_tensor_model_parallel_size, dim=0)
749
+ for i in range(args.target_tensor_model_parallel_size):
750
+ pos_emb_dict = get_element_from_dict_by_path(
751
+ output_state_dict[i], "model.language_model.embedding.position_embeddings"
752
+ )
753
+ pos_emb_dict["weight"] = pos_embedding
754
+
755
+ word_emb_dict = get_element_from_dict_by_path(
756
+ output_state_dict[i], "model.language_model.embedding.word_embeddings"
757
+ )
758
+ word_emb_dict["weight"] = out_word_embed[i].clone()
759
+
760
+ # Transformer layers
761
+ print("converting transformer layers")
762
+ if config.num_attention_heads % args.target_tensor_model_parallel_size != 0:
763
+ raise ValueError(
764
+ f"Number of attention heads ({config.num_attention_heads}) must be divisible by number of tensor parallelism"
765
+ f" ({args.target_tensor_model_parallel_size})"
766
+ )
767
+
768
+ if config.num_hidden_layers % args.target_pipeline_model_parallel_size != 0:
769
+ raise ValueError(
770
+ f"Number of layers ({config.num_hidden_layers}) must be divisible by number of pipeline parallelism"
771
+ f" ({args.target_pipeline_model_parallel_size})"
772
+ )
773
+
774
+ num_layers = config.num_hidden_layers // args.target_pipeline_model_parallel_size
775
+
776
+ layer_re = re.compile(r"transformer.h\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)")
777
+ # The number of heads.
778
+ heads = config.n_head
779
+ # The hidden_size per head.
780
+ hidden_size_per_head = config.n_embd // config.n_head
781
+ for pp_rank in range(args.target_pipeline_model_parallel_size):
782
+ layer_offset = pp_rank * num_layers
783
+ if pp_rank > 0:
784
+ output_state_dict = []
785
+ for i in range(args.target_tensor_model_parallel_size):
786
+ output_state_dict.append({})
787
+
788
+ for layer in range(num_layers):
789
+ pp_layer_id = layer + layer_offset
790
+ layers_to_copy = [
791
+ layer_name
792
+ for layer_name in state_dict.keys()
793
+ if layer_name.startswith(f"transformer.h.{pp_layer_id}.")
794
+ ]
795
+
796
+ for layer_name in layers_to_copy:
797
+ m = layer_re.match(layer_name)
798
+ # Stop if that's not a layer
799
+ if m is None:
800
+ break
801
+
802
+ # The index of the layer.
803
+ _ = int(m.group(1))
804
+ # The name of the operation.
805
+ op_name = m.group(2)
806
+ # Is it a weight or a bias?
807
+ weight_or_bias = m.group(3)
808
+
809
+ params = state_dict[layer_name].to(dtype)
810
+ # handle layernorm
811
+ if op_name.startswith("ln"):
812
+ out_name = "input_layernorm" if op_name.endswith("1") else "post_attention_layernorm"
813
+ layer_name = f"layers.{layer}.{out_name}.{weight_or_bias}"
814
+
815
+ # handle attention K, V, Q weights
816
+ elif op_name.startswith("attn.c_attn") and weight_or_bias == "weight":
817
+ # transformers stores D X (3*D) but Megatron-LM expects (3*D) X D.
818
+ params = params.transpose(0, 1).contiguous()
819
+
820
+ params = transformers_to_megatron_fix_query_key_value_ordering(
821
+ params,
822
+ 3.0,
823
+ 3,
824
+ heads,
825
+ hidden_size_per_head,
826
+ )
827
+ layer_name = f"layers.{layer}.self_attention.query_key_value.{weight_or_bias}"
828
+
829
+ # handle attention K, V, Q bias
830
+ elif op_name.startswith("attn.c_attn") and weight_or_bias == "bias":
831
+ params = transformers_to_megatron_fix_query_key_value_ordering(
832
+ params,
833
+ 3.0,
834
+ 3,
835
+ heads,
836
+ hidden_size_per_head,
837
+ )
838
+ layer_name = f"layers.{layer}.self_attention.query_key_value.{weight_or_bias}"
839
+
840
+ # handle attention and mlp weights
841
+ elif weight_or_bias == "weight":
842
+ out_name = transformers_to_megatron.get(op_name, None)
843
+ if out_name is None:
844
+ continue
845
+ params = params.transpose(0, 1)
846
+ layer_name = f"layers.{layer}.{out_name}.{weight_or_bias}"
847
+
848
+ # handle attention and mlp bias
849
+ elif weight_or_bias == "bias":
850
+ out_name = transformers_to_megatron.get(op_name, None)
851
+ if out_name is None:
852
+ continue
853
+ layer_name = f"layers.{layer}.{out_name}.{weight_or_bias}"
854
+
855
+ # skip
856
+ else:
857
+ continue
858
+
859
+ if op_name + "." + weight_or_bias in tensor_parallel_params:
860
+ dim = 1 if op_name in ["attn.c_proj", "mlp.c_proj"] else 0
861
+ params = torch.chunk(params, args.target_tensor_model_parallel_size, dim=dim)
862
+
863
+ for i in range(args.target_tensor_model_parallel_size):
864
+ params_dict = get_element_from_dict_by_path(output_state_dict[i], "model.language_model.encoder")
865
+ params_dict[layer_name] = (
866
+ params[i].clone() if (op_name + "." + weight_or_bias in tensor_parallel_params) else params
867
+ )
868
+
869
+ if pp_rank == args.target_pipeline_model_parallel_size - 1:
870
+ # handle final layernorm
871
+ for weight_or_bias in ["weight", "bias"]:
872
+ params = state_dict[f"transformer.ln_f.{weight_or_bias}"].to(dtype)
873
+ layer_name = f"final_layernorm.{weight_or_bias}"
874
+ for i in range(args.target_tensor_model_parallel_size):
875
+ params_dict = get_element_from_dict_by_path(output_state_dict[i], "model.language_model.encoder")
876
+ params_dict[layer_name] = params
877
+
878
+ # add the LM head
879
+ for i in range(args.target_tensor_model_parallel_size):
880
+ params_dict = get_element_from_dict_by_path(output_state_dict[i], "model.word_embeddings_for_head")
881
+ params_dict["weight"] = out_word_embed[i].clone()
882
+
883
+ # saving the state dict as per the tp_rank and pp_rank
884
+ for tp_rank in range(args.target_tensor_model_parallel_size):
885
+ output_state_dict[tp_rank]["checkpoint_version"] = 3.0
886
+ output_state_dict[tp_rank]["args"] = margs
887
+ checkpoint_dir = (
888
+ f"mp_rank_{tp_rank:02d}"
889
+ if args.target_pipeline_model_parallel_size == 1
890
+ else f"mp_rank_{tp_rank:02d}_{pp_rank:03d}"
891
+ )
892
+ if args.use_distributed_optimizer:
893
+ checkpoint_name = "model_rng.pt"
894
+ else:
895
+ checkpoint_name = "model_optim_rng.pt"
896
+ output_state_dict[tp_rank]["optimizer"] = dummy_optim_state_dict["optimizer"]
897
+ checkpoint_dir = os.path.join(release_dir, checkpoint_dir)
898
+ os.makedirs(checkpoint_dir, exist_ok=True)
899
+ checkpoint_path = os.path.join(checkpoint_dir, checkpoint_name)
900
+ if args.print_checkpoint_structure:
901
+ print(
902
+ f"Checkpoint structure of model state dict shard belonging to TP rank {tp_rank} and PP rank"
903
+ f" {pp_rank}:"
904
+ )
905
+ recursive_print(None, output_state_dict[tp_rank])
906
+ torch.save(output_state_dict[tp_rank], checkpoint_path)
907
+
908
+
909
+ def main():
910
+ parser = argparse.ArgumentParser()
911
+ parser = add_checkpointing_args(parser)
912
+ parser = add_megatron_checkpoint_args(parser)
913
+ parser = add_transformers_checkpoint_args(parser)
914
+ args = parser.parse_args()
915
+ if args.convert_checkpoint_from_megatron_to_transformers:
916
+ convert_checkpoint_from_megatron_to_transformers(args)
917
+ else:
918
+ convert_checkpoint_from_transformers_to_megatron(args)
919
+
920
+
921
+ if __name__ == "__main__":
922
+ main()
janus/lib/python3.10/site-packages/transformers/models/mobilevitv2/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (552 Bytes). View file
 
janus/lib/python3.10/site-packages/transformers/models/mobilevitv2/__pycache__/modeling_mobilevitv2.cpython-310.pyc ADDED
Binary file (26.2 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/mobilevitv2/modeling_mobilevitv2.py ADDED
@@ -0,0 +1,1035 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Apple Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ # Original license: https://github.com/apple/ml-cvnets/blob/main/LICENSE
17
+ """PyTorch MobileViTV2 model."""
18
+
19
+ from typing import Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.utils.checkpoint
23
+ from torch import nn
24
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
25
+
26
+ from ...activations import ACT2FN
27
+ from ...modeling_outputs import (
28
+ BaseModelOutputWithNoAttention,
29
+ BaseModelOutputWithPoolingAndNoAttention,
30
+ ImageClassifierOutputWithNoAttention,
31
+ SemanticSegmenterOutput,
32
+ )
33
+ from ...modeling_utils import PreTrainedModel
34
+ from ...utils import (
35
+ add_code_sample_docstrings,
36
+ add_start_docstrings,
37
+ add_start_docstrings_to_model_forward,
38
+ logging,
39
+ replace_return_docstrings,
40
+ )
41
+ from .configuration_mobilevitv2 import MobileViTV2Config
42
+
43
+
44
+ logger = logging.get_logger(__name__)
45
+
46
+
47
+ # General docstring
48
+ _CONFIG_FOR_DOC = "MobileViTV2Config"
49
+
50
+ # Base docstring
51
+ _CHECKPOINT_FOR_DOC = "apple/mobilevitv2-1.0-imagenet1k-256"
52
+ _EXPECTED_OUTPUT_SHAPE = [1, 512, 8, 8]
53
+
54
+ # Image classification docstring
55
+ _IMAGE_CLASS_CHECKPOINT = "apple/mobilevitv2-1.0-imagenet1k-256"
56
+ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
57
+
58
+
59
+ # Copied from transformers.models.mobilevit.modeling_mobilevit.make_divisible
60
+ def make_divisible(value: int, divisor: int = 8, min_value: Optional[int] = None) -> int:
61
+ """
62
+ Ensure that all layers have a channel count that is divisible by `divisor`. This function is taken from the
63
+ original TensorFlow repo. It can be seen here:
64
+ https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
65
+ """
66
+ if min_value is None:
67
+ min_value = divisor
68
+ new_value = max(min_value, int(value + divisor / 2) // divisor * divisor)
69
+ # Make sure that round down does not go down by more than 10%.
70
+ if new_value < 0.9 * value:
71
+ new_value += divisor
72
+ return int(new_value)
73
+
74
+
75
+ def clip(value: float, min_val: float = float("-inf"), max_val: float = float("inf")) -> float:
76
+ return max(min_val, min(max_val, value))
77
+
78
+
79
+ # Copied from transformers.models.mobilevit.modeling_mobilevit.MobileViTConvLayer with MobileViT->MobileViTV2
80
+ class MobileViTV2ConvLayer(nn.Module):
81
+ def __init__(
82
+ self,
83
+ config: MobileViTV2Config,
84
+ in_channels: int,
85
+ out_channels: int,
86
+ kernel_size: int,
87
+ stride: int = 1,
88
+ groups: int = 1,
89
+ bias: bool = False,
90
+ dilation: int = 1,
91
+ use_normalization: bool = True,
92
+ use_activation: Union[bool, str] = True,
93
+ ) -> None:
94
+ super().__init__()
95
+ padding = int((kernel_size - 1) / 2) * dilation
96
+
97
+ if in_channels % groups != 0:
98
+ raise ValueError(f"Input channels ({in_channels}) are not divisible by {groups} groups.")
99
+ if out_channels % groups != 0:
100
+ raise ValueError(f"Output channels ({out_channels}) are not divisible by {groups} groups.")
101
+
102
+ self.convolution = nn.Conv2d(
103
+ in_channels=in_channels,
104
+ out_channels=out_channels,
105
+ kernel_size=kernel_size,
106
+ stride=stride,
107
+ padding=padding,
108
+ dilation=dilation,
109
+ groups=groups,
110
+ bias=bias,
111
+ padding_mode="zeros",
112
+ )
113
+
114
+ if use_normalization:
115
+ self.normalization = nn.BatchNorm2d(
116
+ num_features=out_channels,
117
+ eps=1e-5,
118
+ momentum=0.1,
119
+ affine=True,
120
+ track_running_stats=True,
121
+ )
122
+ else:
123
+ self.normalization = None
124
+
125
+ if use_activation:
126
+ if isinstance(use_activation, str):
127
+ self.activation = ACT2FN[use_activation]
128
+ elif isinstance(config.hidden_act, str):
129
+ self.activation = ACT2FN[config.hidden_act]
130
+ else:
131
+ self.activation = config.hidden_act
132
+ else:
133
+ self.activation = None
134
+
135
+ def forward(self, features: torch.Tensor) -> torch.Tensor:
136
+ features = self.convolution(features)
137
+ if self.normalization is not None:
138
+ features = self.normalization(features)
139
+ if self.activation is not None:
140
+ features = self.activation(features)
141
+ return features
142
+
143
+
144
+ # Copied from transformers.models.mobilevit.modeling_mobilevit.MobileViTInvertedResidual with MobileViT->MobileViTV2
145
+ class MobileViTV2InvertedResidual(nn.Module):
146
+ """
147
+ Inverted residual block (MobileNetv2): https://arxiv.org/abs/1801.04381
148
+ """
149
+
150
+ def __init__(
151
+ self, config: MobileViTV2Config, in_channels: int, out_channels: int, stride: int, dilation: int = 1
152
+ ) -> None:
153
+ super().__init__()
154
+ expanded_channels = make_divisible(int(round(in_channels * config.expand_ratio)), 8)
155
+
156
+ if stride not in [1, 2]:
157
+ raise ValueError(f"Invalid stride {stride}.")
158
+
159
+ self.use_residual = (stride == 1) and (in_channels == out_channels)
160
+
161
+ self.expand_1x1 = MobileViTV2ConvLayer(
162
+ config, in_channels=in_channels, out_channels=expanded_channels, kernel_size=1
163
+ )
164
+
165
+ self.conv_3x3 = MobileViTV2ConvLayer(
166
+ config,
167
+ in_channels=expanded_channels,
168
+ out_channels=expanded_channels,
169
+ kernel_size=3,
170
+ stride=stride,
171
+ groups=expanded_channels,
172
+ dilation=dilation,
173
+ )
174
+
175
+ self.reduce_1x1 = MobileViTV2ConvLayer(
176
+ config,
177
+ in_channels=expanded_channels,
178
+ out_channels=out_channels,
179
+ kernel_size=1,
180
+ use_activation=False,
181
+ )
182
+
183
+ def forward(self, features: torch.Tensor) -> torch.Tensor:
184
+ residual = features
185
+
186
+ features = self.expand_1x1(features)
187
+ features = self.conv_3x3(features)
188
+ features = self.reduce_1x1(features)
189
+
190
+ return residual + features if self.use_residual else features
191
+
192
+
193
+ # Copied from transformers.models.mobilevit.modeling_mobilevit.MobileViTMobileNetLayer with MobileViT->MobileViTV2
194
+ class MobileViTV2MobileNetLayer(nn.Module):
195
+ def __init__(
196
+ self, config: MobileViTV2Config, in_channels: int, out_channels: int, stride: int = 1, num_stages: int = 1
197
+ ) -> None:
198
+ super().__init__()
199
+
200
+ self.layer = nn.ModuleList()
201
+ for i in range(num_stages):
202
+ layer = MobileViTV2InvertedResidual(
203
+ config,
204
+ in_channels=in_channels,
205
+ out_channels=out_channels,
206
+ stride=stride if i == 0 else 1,
207
+ )
208
+ self.layer.append(layer)
209
+ in_channels = out_channels
210
+
211
+ def forward(self, features: torch.Tensor) -> torch.Tensor:
212
+ for layer_module in self.layer:
213
+ features = layer_module(features)
214
+ return features
215
+
216
+
217
+ class MobileViTV2LinearSelfAttention(nn.Module):
218
+ """
219
+ This layer applies a self-attention with linear complexity, as described in MobileViTV2 paper:
220
+ https://arxiv.org/abs/2206.02680
221
+
222
+ Args:
223
+ config (`MobileVitv2Config`):
224
+ Model configuration object
225
+ embed_dim (`int`):
226
+ `input_channels` from an expected input of size :math:`(batch_size, input_channels, height, width)`
227
+ """
228
+
229
+ def __init__(self, config: MobileViTV2Config, embed_dim: int) -> None:
230
+ super().__init__()
231
+
232
+ self.qkv_proj = MobileViTV2ConvLayer(
233
+ config=config,
234
+ in_channels=embed_dim,
235
+ out_channels=1 + (2 * embed_dim),
236
+ bias=True,
237
+ kernel_size=1,
238
+ use_normalization=False,
239
+ use_activation=False,
240
+ )
241
+
242
+ self.attn_dropout = nn.Dropout(p=config.attn_dropout)
243
+ self.out_proj = MobileViTV2ConvLayer(
244
+ config=config,
245
+ in_channels=embed_dim,
246
+ out_channels=embed_dim,
247
+ bias=True,
248
+ kernel_size=1,
249
+ use_normalization=False,
250
+ use_activation=False,
251
+ )
252
+ self.embed_dim = embed_dim
253
+
254
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
255
+ # (batch_size, embed_dim, num_pixels_in_patch, num_patches) --> (batch_size, 1+2*embed_dim, num_pixels_in_patch, num_patches)
256
+ qkv = self.qkv_proj(hidden_states)
257
+
258
+ # Project hidden_states into query, key and value
259
+ # Query --> [batch_size, 1, num_pixels_in_patch, num_patches]
260
+ # value, key --> [batch_size, embed_dim, num_pixels_in_patch, num_patches]
261
+ query, key, value = torch.split(qkv, split_size_or_sections=[1, self.embed_dim, self.embed_dim], dim=1)
262
+
263
+ # apply softmax along num_patches dimension
264
+ context_scores = torch.nn.functional.softmax(query, dim=-1)
265
+ context_scores = self.attn_dropout(context_scores)
266
+
267
+ # Compute context vector
268
+ # [batch_size, embed_dim, num_pixels_in_patch, num_patches] x [batch_size, 1, num_pixels_in_patch, num_patches] -> [batch_size, embed_dim, num_pixels_in_patch, num_patches]
269
+ context_vector = key * context_scores
270
+ # [batch_size, embed_dim, num_pixels_in_patch, num_patches] --> [batch_size, embed_dim, num_pixels_in_patch, 1]
271
+ context_vector = torch.sum(context_vector, dim=-1, keepdim=True)
272
+
273
+ # combine context vector with values
274
+ # [batch_size, embed_dim, num_pixels_in_patch, num_patches] * [batch_size, embed_dim, num_pixels_in_patch, 1] --> [batch_size, embed_dim, num_pixels_in_patch, num_patches]
275
+ out = torch.nn.functional.relu(value) * context_vector.expand_as(value)
276
+ out = self.out_proj(out)
277
+ return out
278
+
279
+
280
+ class MobileViTV2FFN(nn.Module):
281
+ def __init__(
282
+ self,
283
+ config: MobileViTV2Config,
284
+ embed_dim: int,
285
+ ffn_latent_dim: int,
286
+ ffn_dropout: float = 0.0,
287
+ ) -> None:
288
+ super().__init__()
289
+ self.conv1 = MobileViTV2ConvLayer(
290
+ config=config,
291
+ in_channels=embed_dim,
292
+ out_channels=ffn_latent_dim,
293
+ kernel_size=1,
294
+ stride=1,
295
+ bias=True,
296
+ use_normalization=False,
297
+ use_activation=True,
298
+ )
299
+ self.dropout1 = nn.Dropout(ffn_dropout)
300
+
301
+ self.conv2 = MobileViTV2ConvLayer(
302
+ config=config,
303
+ in_channels=ffn_latent_dim,
304
+ out_channels=embed_dim,
305
+ kernel_size=1,
306
+ stride=1,
307
+ bias=True,
308
+ use_normalization=False,
309
+ use_activation=False,
310
+ )
311
+ self.dropout2 = nn.Dropout(ffn_dropout)
312
+
313
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
314
+ hidden_states = self.conv1(hidden_states)
315
+ hidden_states = self.dropout1(hidden_states)
316
+ hidden_states = self.conv2(hidden_states)
317
+ hidden_states = self.dropout2(hidden_states)
318
+ return hidden_states
319
+
320
+
321
+ class MobileViTV2TransformerLayer(nn.Module):
322
+ def __init__(
323
+ self,
324
+ config: MobileViTV2Config,
325
+ embed_dim: int,
326
+ ffn_latent_dim: int,
327
+ dropout: float = 0.0,
328
+ ) -> None:
329
+ super().__init__()
330
+ self.layernorm_before = nn.GroupNorm(num_groups=1, num_channels=embed_dim, eps=config.layer_norm_eps)
331
+ self.attention = MobileViTV2LinearSelfAttention(config, embed_dim)
332
+ self.dropout1 = nn.Dropout(p=dropout)
333
+ self.layernorm_after = nn.GroupNorm(num_groups=1, num_channels=embed_dim, eps=config.layer_norm_eps)
334
+ self.ffn = MobileViTV2FFN(config, embed_dim, ffn_latent_dim, config.ffn_dropout)
335
+
336
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
337
+ layernorm_1_out = self.layernorm_before(hidden_states)
338
+ attention_output = self.attention(layernorm_1_out)
339
+ hidden_states = attention_output + hidden_states
340
+
341
+ layer_output = self.layernorm_after(hidden_states)
342
+ layer_output = self.ffn(layer_output)
343
+
344
+ layer_output = layer_output + hidden_states
345
+ return layer_output
346
+
347
+
348
+ class MobileViTV2Transformer(nn.Module):
349
+ def __init__(self, config: MobileViTV2Config, n_layers: int, d_model: int) -> None:
350
+ super().__init__()
351
+
352
+ ffn_multiplier = config.ffn_multiplier
353
+
354
+ ffn_dims = [ffn_multiplier * d_model] * n_layers
355
+
356
+ # ensure that dims are multiple of 16
357
+ ffn_dims = [int((d // 16) * 16) for d in ffn_dims]
358
+
359
+ self.layer = nn.ModuleList()
360
+ for block_idx in range(n_layers):
361
+ transformer_layer = MobileViTV2TransformerLayer(
362
+ config, embed_dim=d_model, ffn_latent_dim=ffn_dims[block_idx]
363
+ )
364
+ self.layer.append(transformer_layer)
365
+
366
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
367
+ for layer_module in self.layer:
368
+ hidden_states = layer_module(hidden_states)
369
+ return hidden_states
370
+
371
+
372
+ class MobileViTV2Layer(nn.Module):
373
+ """
374
+ MobileViTV2 layer: https://arxiv.org/abs/2206.02680
375
+ """
376
+
377
+ def __init__(
378
+ self,
379
+ config: MobileViTV2Config,
380
+ in_channels: int,
381
+ out_channels: int,
382
+ attn_unit_dim: int,
383
+ n_attn_blocks: int = 2,
384
+ dilation: int = 1,
385
+ stride: int = 2,
386
+ ) -> None:
387
+ super().__init__()
388
+ self.patch_width = config.patch_size
389
+ self.patch_height = config.patch_size
390
+
391
+ cnn_out_dim = attn_unit_dim
392
+
393
+ if stride == 2:
394
+ self.downsampling_layer = MobileViTV2InvertedResidual(
395
+ config,
396
+ in_channels=in_channels,
397
+ out_channels=out_channels,
398
+ stride=stride if dilation == 1 else 1,
399
+ dilation=dilation // 2 if dilation > 1 else 1,
400
+ )
401
+ in_channels = out_channels
402
+ else:
403
+ self.downsampling_layer = None
404
+
405
+ # Local representations
406
+ self.conv_kxk = MobileViTV2ConvLayer(
407
+ config,
408
+ in_channels=in_channels,
409
+ out_channels=in_channels,
410
+ kernel_size=config.conv_kernel_size,
411
+ groups=in_channels,
412
+ )
413
+ self.conv_1x1 = MobileViTV2ConvLayer(
414
+ config,
415
+ in_channels=in_channels,
416
+ out_channels=cnn_out_dim,
417
+ kernel_size=1,
418
+ use_normalization=False,
419
+ use_activation=False,
420
+ )
421
+
422
+ # Global representations
423
+ self.transformer = MobileViTV2Transformer(config, d_model=attn_unit_dim, n_layers=n_attn_blocks)
424
+
425
+ # self.layernorm = MobileViTV2LayerNorm2D(attn_unit_dim, eps=config.layer_norm_eps)
426
+ self.layernorm = nn.GroupNorm(num_groups=1, num_channels=attn_unit_dim, eps=config.layer_norm_eps)
427
+
428
+ # Fusion
429
+ self.conv_projection = MobileViTV2ConvLayer(
430
+ config,
431
+ in_channels=cnn_out_dim,
432
+ out_channels=in_channels,
433
+ kernel_size=1,
434
+ use_normalization=True,
435
+ use_activation=False,
436
+ )
437
+
438
+ def unfolding(self, feature_map: torch.Tensor) -> Tuple[torch.Tensor, Tuple[int, int]]:
439
+ batch_size, in_channels, img_height, img_width = feature_map.shape
440
+ patches = nn.functional.unfold(
441
+ feature_map,
442
+ kernel_size=(self.patch_height, self.patch_width),
443
+ stride=(self.patch_height, self.patch_width),
444
+ )
445
+ patches = patches.reshape(batch_size, in_channels, self.patch_height * self.patch_width, -1)
446
+
447
+ return patches, (img_height, img_width)
448
+
449
+ def folding(self, patches: torch.Tensor, output_size: Tuple[int, int]) -> torch.Tensor:
450
+ batch_size, in_dim, patch_size, n_patches = patches.shape
451
+ patches = patches.reshape(batch_size, in_dim * patch_size, n_patches)
452
+
453
+ feature_map = nn.functional.fold(
454
+ patches,
455
+ output_size=output_size,
456
+ kernel_size=(self.patch_height, self.patch_width),
457
+ stride=(self.patch_height, self.patch_width),
458
+ )
459
+
460
+ return feature_map
461
+
462
+ def forward(self, features: torch.Tensor) -> torch.Tensor:
463
+ # reduce spatial dimensions if needed
464
+ if self.downsampling_layer:
465
+ features = self.downsampling_layer(features)
466
+
467
+ # local representation
468
+ features = self.conv_kxk(features)
469
+ features = self.conv_1x1(features)
470
+
471
+ # convert feature map to patches
472
+ patches, output_size = self.unfolding(features)
473
+
474
+ # learn global representations
475
+ patches = self.transformer(patches)
476
+ patches = self.layernorm(patches)
477
+
478
+ # convert patches back to feature maps
479
+ # [batch_size, patch_height, patch_width, input_dim] --> [batch_size, input_dim, patch_height, patch_width]
480
+ features = self.folding(patches, output_size)
481
+
482
+ features = self.conv_projection(features)
483
+ return features
484
+
485
+
486
+ class MobileViTV2Encoder(nn.Module):
487
+ def __init__(self, config: MobileViTV2Config) -> None:
488
+ super().__init__()
489
+ self.config = config
490
+
491
+ self.layer = nn.ModuleList()
492
+ self.gradient_checkpointing = False
493
+
494
+ # segmentation architectures like DeepLab and PSPNet modify the strides
495
+ # of the classification backbones
496
+ dilate_layer_4 = dilate_layer_5 = False
497
+ if config.output_stride == 8:
498
+ dilate_layer_4 = True
499
+ dilate_layer_5 = True
500
+ elif config.output_stride == 16:
501
+ dilate_layer_5 = True
502
+
503
+ dilation = 1
504
+
505
+ layer_0_dim = make_divisible(
506
+ clip(value=32 * config.width_multiplier, min_val=16, max_val=64), divisor=8, min_value=16
507
+ )
508
+
509
+ layer_1_dim = make_divisible(64 * config.width_multiplier, divisor=16)
510
+ layer_2_dim = make_divisible(128 * config.width_multiplier, divisor=8)
511
+ layer_3_dim = make_divisible(256 * config.width_multiplier, divisor=8)
512
+ layer_4_dim = make_divisible(384 * config.width_multiplier, divisor=8)
513
+ layer_5_dim = make_divisible(512 * config.width_multiplier, divisor=8)
514
+
515
+ layer_1 = MobileViTV2MobileNetLayer(
516
+ config,
517
+ in_channels=layer_0_dim,
518
+ out_channels=layer_1_dim,
519
+ stride=1,
520
+ num_stages=1,
521
+ )
522
+ self.layer.append(layer_1)
523
+
524
+ layer_2 = MobileViTV2MobileNetLayer(
525
+ config,
526
+ in_channels=layer_1_dim,
527
+ out_channels=layer_2_dim,
528
+ stride=2,
529
+ num_stages=2,
530
+ )
531
+ self.layer.append(layer_2)
532
+
533
+ layer_3 = MobileViTV2Layer(
534
+ config,
535
+ in_channels=layer_2_dim,
536
+ out_channels=layer_3_dim,
537
+ attn_unit_dim=make_divisible(config.base_attn_unit_dims[0] * config.width_multiplier, divisor=8),
538
+ n_attn_blocks=config.n_attn_blocks[0],
539
+ )
540
+ self.layer.append(layer_3)
541
+
542
+ if dilate_layer_4:
543
+ dilation *= 2
544
+
545
+ layer_4 = MobileViTV2Layer(
546
+ config,
547
+ in_channels=layer_3_dim,
548
+ out_channels=layer_4_dim,
549
+ attn_unit_dim=make_divisible(config.base_attn_unit_dims[1] * config.width_multiplier, divisor=8),
550
+ n_attn_blocks=config.n_attn_blocks[1],
551
+ dilation=dilation,
552
+ )
553
+ self.layer.append(layer_4)
554
+
555
+ if dilate_layer_5:
556
+ dilation *= 2
557
+
558
+ layer_5 = MobileViTV2Layer(
559
+ config,
560
+ in_channels=layer_4_dim,
561
+ out_channels=layer_5_dim,
562
+ attn_unit_dim=make_divisible(config.base_attn_unit_dims[2] * config.width_multiplier, divisor=8),
563
+ n_attn_blocks=config.n_attn_blocks[2],
564
+ dilation=dilation,
565
+ )
566
+ self.layer.append(layer_5)
567
+
568
+ def forward(
569
+ self,
570
+ hidden_states: torch.Tensor,
571
+ output_hidden_states: bool = False,
572
+ return_dict: bool = True,
573
+ ) -> Union[tuple, BaseModelOutputWithNoAttention]:
574
+ all_hidden_states = () if output_hidden_states else None
575
+
576
+ for i, layer_module in enumerate(self.layer):
577
+ if self.gradient_checkpointing and self.training:
578
+ hidden_states = self._gradient_checkpointing_func(
579
+ layer_module.__call__,
580
+ hidden_states,
581
+ )
582
+ else:
583
+ hidden_states = layer_module(hidden_states)
584
+
585
+ if output_hidden_states:
586
+ all_hidden_states = all_hidden_states + (hidden_states,)
587
+
588
+ if not return_dict:
589
+ return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
590
+
591
+ return BaseModelOutputWithNoAttention(last_hidden_state=hidden_states, hidden_states=all_hidden_states)
592
+
593
+
594
+ # Copied from transformers.models.mobilevit.modeling_mobilevit.MobileViTPreTrainedModel with MobileViT->MobileViTV2,mobilevit->mobilevitv2
595
+ class MobileViTV2PreTrainedModel(PreTrainedModel):
596
+ """
597
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
598
+ models.
599
+ """
600
+
601
+ config_class = MobileViTV2Config
602
+ base_model_prefix = "mobilevitv2"
603
+ main_input_name = "pixel_values"
604
+ supports_gradient_checkpointing = True
605
+ _no_split_modules = ["MobileViTV2Layer"]
606
+
607
+ def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
608
+ """Initialize the weights"""
609
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
610
+ # Slightly different from the TF version which uses truncated_normal for initialization
611
+ # cf https://github.com/pytorch/pytorch/pull/5617
612
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
613
+ if module.bias is not None:
614
+ module.bias.data.zero_()
615
+ elif isinstance(module, nn.LayerNorm):
616
+ module.bias.data.zero_()
617
+ module.weight.data.fill_(1.0)
618
+
619
+
620
+ MOBILEVITV2_START_DOCSTRING = r"""
621
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
622
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
623
+ behavior.
624
+
625
+ Parameters:
626
+ config ([`MobileViTV2Config`]): Model configuration class with all the parameters of the model.
627
+ Initializing with a config file does not load the weights associated with the model, only the
628
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
629
+ """
630
+
631
+ MOBILEVITV2_INPUTS_DOCSTRING = r"""
632
+ Args:
633
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
634
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
635
+ [`MobileViTImageProcessor.__call__`] for details.
636
+ output_hidden_states (`bool`, *optional*):
637
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
638
+ more detail.
639
+ return_dict (`bool`, *optional*):
640
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
641
+ """
642
+
643
+
644
+ @add_start_docstrings(
645
+ "The bare MobileViTV2 model outputting raw hidden-states without any specific head on top.",
646
+ MOBILEVITV2_START_DOCSTRING,
647
+ )
648
+ class MobileViTV2Model(MobileViTV2PreTrainedModel):
649
+ def __init__(self, config: MobileViTV2Config, expand_output: bool = True):
650
+ super().__init__(config)
651
+ self.config = config
652
+ self.expand_output = expand_output
653
+
654
+ layer_0_dim = make_divisible(
655
+ clip(value=32 * config.width_multiplier, min_val=16, max_val=64), divisor=8, min_value=16
656
+ )
657
+
658
+ self.conv_stem = MobileViTV2ConvLayer(
659
+ config,
660
+ in_channels=config.num_channels,
661
+ out_channels=layer_0_dim,
662
+ kernel_size=3,
663
+ stride=2,
664
+ use_normalization=True,
665
+ use_activation=True,
666
+ )
667
+ self.encoder = MobileViTV2Encoder(config)
668
+
669
+ # Initialize weights and apply final processing
670
+ self.post_init()
671
+
672
+ def _prune_heads(self, heads_to_prune):
673
+ """Prunes heads of the model.
674
+ heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel
675
+ """
676
+ for layer_index, heads in heads_to_prune.items():
677
+ mobilevitv2_layer = self.encoder.layer[layer_index]
678
+ if isinstance(mobilevitv2_layer, MobileViTV2Layer):
679
+ for transformer_layer in mobilevitv2_layer.transformer.layer:
680
+ transformer_layer.attention.prune_heads(heads)
681
+
682
+ @add_start_docstrings_to_model_forward(MOBILEVITV2_INPUTS_DOCSTRING)
683
+ @add_code_sample_docstrings(
684
+ checkpoint=_CHECKPOINT_FOR_DOC,
685
+ output_type=BaseModelOutputWithPoolingAndNoAttention,
686
+ config_class=_CONFIG_FOR_DOC,
687
+ modality="vision",
688
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
689
+ )
690
+ def forward(
691
+ self,
692
+ pixel_values: Optional[torch.Tensor] = None,
693
+ output_hidden_states: Optional[bool] = None,
694
+ return_dict: Optional[bool] = None,
695
+ ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
696
+ output_hidden_states = (
697
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
698
+ )
699
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
700
+
701
+ if pixel_values is None:
702
+ raise ValueError("You have to specify pixel_values")
703
+
704
+ embedding_output = self.conv_stem(pixel_values)
705
+
706
+ encoder_outputs = self.encoder(
707
+ embedding_output,
708
+ output_hidden_states=output_hidden_states,
709
+ return_dict=return_dict,
710
+ )
711
+
712
+ if self.expand_output:
713
+ last_hidden_state = encoder_outputs[0]
714
+
715
+ # global average pooling: (batch_size, channels, height, width) -> (batch_size, channels)
716
+ pooled_output = torch.mean(last_hidden_state, dim=[-2, -1], keepdim=False)
717
+ else:
718
+ last_hidden_state = encoder_outputs[0]
719
+ pooled_output = None
720
+
721
+ if not return_dict:
722
+ output = (last_hidden_state, pooled_output) if pooled_output is not None else (last_hidden_state,)
723
+ return output + encoder_outputs[1:]
724
+
725
+ return BaseModelOutputWithPoolingAndNoAttention(
726
+ last_hidden_state=last_hidden_state,
727
+ pooler_output=pooled_output,
728
+ hidden_states=encoder_outputs.hidden_states,
729
+ )
730
+
731
+
732
+ @add_start_docstrings(
733
+ """
734
+ MobileViTV2 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
735
+ ImageNet.
736
+ """,
737
+ MOBILEVITV2_START_DOCSTRING,
738
+ )
739
+ class MobileViTV2ForImageClassification(MobileViTV2PreTrainedModel):
740
+ def __init__(self, config: MobileViTV2Config) -> None:
741
+ super().__init__(config)
742
+
743
+ self.num_labels = config.num_labels
744
+ self.mobilevitv2 = MobileViTV2Model(config)
745
+
746
+ out_channels = make_divisible(512 * config.width_multiplier, divisor=8) # layer 5 output dimension
747
+ # Classifier head
748
+ self.classifier = (
749
+ nn.Linear(in_features=out_channels, out_features=config.num_labels)
750
+ if config.num_labels > 0
751
+ else nn.Identity()
752
+ )
753
+
754
+ # Initialize weights and apply final processing
755
+ self.post_init()
756
+
757
+ @add_start_docstrings_to_model_forward(MOBILEVITV2_INPUTS_DOCSTRING)
758
+ @add_code_sample_docstrings(
759
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
760
+ output_type=ImageClassifierOutputWithNoAttention,
761
+ config_class=_CONFIG_FOR_DOC,
762
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
763
+ )
764
+ def forward(
765
+ self,
766
+ pixel_values: Optional[torch.Tensor] = None,
767
+ output_hidden_states: Optional[bool] = None,
768
+ labels: Optional[torch.Tensor] = None,
769
+ return_dict: Optional[bool] = None,
770
+ ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
771
+ r"""
772
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
773
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
774
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss). If
775
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
776
+ """
777
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
778
+
779
+ outputs = self.mobilevitv2(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
780
+
781
+ pooled_output = outputs.pooler_output if return_dict else outputs[1]
782
+
783
+ logits = self.classifier(pooled_output)
784
+
785
+ loss = None
786
+ if labels is not None:
787
+ if self.config.problem_type is None:
788
+ if self.num_labels == 1:
789
+ self.config.problem_type = "regression"
790
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
791
+ self.config.problem_type = "single_label_classification"
792
+ else:
793
+ self.config.problem_type = "multi_label_classification"
794
+
795
+ if self.config.problem_type == "regression":
796
+ loss_fct = MSELoss()
797
+ if self.num_labels == 1:
798
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
799
+ else:
800
+ loss = loss_fct(logits, labels)
801
+ elif self.config.problem_type == "single_label_classification":
802
+ loss_fct = CrossEntropyLoss()
803
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
804
+ elif self.config.problem_type == "multi_label_classification":
805
+ loss_fct = BCEWithLogitsLoss()
806
+ loss = loss_fct(logits, labels)
807
+
808
+ if not return_dict:
809
+ output = (logits,) + outputs[2:]
810
+ return ((loss,) + output) if loss is not None else output
811
+
812
+ return ImageClassifierOutputWithNoAttention(
813
+ loss=loss,
814
+ logits=logits,
815
+ hidden_states=outputs.hidden_states,
816
+ )
817
+
818
+
819
+ # Copied from transformers.models.mobilevit.modeling_mobilevit.MobileViTASPPPooling with MobileViT->MobileViTV2
820
+ class MobileViTV2ASPPPooling(nn.Module):
821
+ def __init__(self, config: MobileViTV2Config, in_channels: int, out_channels: int) -> None:
822
+ super().__init__()
823
+
824
+ self.global_pool = nn.AdaptiveAvgPool2d(output_size=1)
825
+
826
+ self.conv_1x1 = MobileViTV2ConvLayer(
827
+ config,
828
+ in_channels=in_channels,
829
+ out_channels=out_channels,
830
+ kernel_size=1,
831
+ stride=1,
832
+ use_normalization=True,
833
+ use_activation="relu",
834
+ )
835
+
836
+ def forward(self, features: torch.Tensor) -> torch.Tensor:
837
+ spatial_size = features.shape[-2:]
838
+ features = self.global_pool(features)
839
+ features = self.conv_1x1(features)
840
+ features = nn.functional.interpolate(features, size=spatial_size, mode="bilinear", align_corners=False)
841
+ return features
842
+
843
+
844
+ class MobileViTV2ASPP(nn.Module):
845
+ """
846
+ ASPP module defined in DeepLab papers: https://arxiv.org/abs/1606.00915, https://arxiv.org/abs/1706.05587
847
+ """
848
+
849
+ def __init__(self, config: MobileViTV2Config) -> None:
850
+ super().__init__()
851
+
852
+ encoder_out_channels = make_divisible(512 * config.width_multiplier, divisor=8) # layer 5 output dimension
853
+ in_channels = encoder_out_channels
854
+ out_channels = config.aspp_out_channels
855
+
856
+ if len(config.atrous_rates) != 3:
857
+ raise ValueError("Expected 3 values for atrous_rates")
858
+
859
+ self.convs = nn.ModuleList()
860
+
861
+ in_projection = MobileViTV2ConvLayer(
862
+ config,
863
+ in_channels=in_channels,
864
+ out_channels=out_channels,
865
+ kernel_size=1,
866
+ use_activation="relu",
867
+ )
868
+ self.convs.append(in_projection)
869
+
870
+ self.convs.extend(
871
+ [
872
+ MobileViTV2ConvLayer(
873
+ config,
874
+ in_channels=in_channels,
875
+ out_channels=out_channels,
876
+ kernel_size=3,
877
+ dilation=rate,
878
+ use_activation="relu",
879
+ )
880
+ for rate in config.atrous_rates
881
+ ]
882
+ )
883
+
884
+ pool_layer = MobileViTV2ASPPPooling(config, in_channels, out_channels)
885
+ self.convs.append(pool_layer)
886
+
887
+ self.project = MobileViTV2ConvLayer(
888
+ config, in_channels=5 * out_channels, out_channels=out_channels, kernel_size=1, use_activation="relu"
889
+ )
890
+
891
+ self.dropout = nn.Dropout(p=config.aspp_dropout_prob)
892
+
893
+ def forward(self, features: torch.Tensor) -> torch.Tensor:
894
+ pyramid = []
895
+ for conv in self.convs:
896
+ pyramid.append(conv(features))
897
+ pyramid = torch.cat(pyramid, dim=1)
898
+
899
+ pooled_features = self.project(pyramid)
900
+ pooled_features = self.dropout(pooled_features)
901
+ return pooled_features
902
+
903
+
904
+ # Copied from transformers.models.mobilevit.modeling_mobilevit.MobileViTDeepLabV3 with MobileViT->MobileViTV2
905
+ class MobileViTV2DeepLabV3(nn.Module):
906
+ """
907
+ DeepLabv3 architecture: https://arxiv.org/abs/1706.05587
908
+ """
909
+
910
+ def __init__(self, config: MobileViTV2Config) -> None:
911
+ super().__init__()
912
+ self.aspp = MobileViTV2ASPP(config)
913
+
914
+ self.dropout = nn.Dropout2d(config.classifier_dropout_prob)
915
+
916
+ self.classifier = MobileViTV2ConvLayer(
917
+ config,
918
+ in_channels=config.aspp_out_channels,
919
+ out_channels=config.num_labels,
920
+ kernel_size=1,
921
+ use_normalization=False,
922
+ use_activation=False,
923
+ bias=True,
924
+ )
925
+
926
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
927
+ features = self.aspp(hidden_states[-1])
928
+ features = self.dropout(features)
929
+ features = self.classifier(features)
930
+ return features
931
+
932
+
933
+ @add_start_docstrings(
934
+ """
935
+ MobileViTV2 model with a semantic segmentation head on top, e.g. for Pascal VOC.
936
+ """,
937
+ MOBILEVITV2_START_DOCSTRING,
938
+ )
939
+ class MobileViTV2ForSemanticSegmentation(MobileViTV2PreTrainedModel):
940
+ def __init__(self, config: MobileViTV2Config) -> None:
941
+ super().__init__(config)
942
+
943
+ self.num_labels = config.num_labels
944
+ self.mobilevitv2 = MobileViTV2Model(config, expand_output=False)
945
+ self.segmentation_head = MobileViTV2DeepLabV3(config)
946
+
947
+ # Initialize weights and apply final processing
948
+ self.post_init()
949
+
950
+ @add_start_docstrings_to_model_forward(MOBILEVITV2_INPUTS_DOCSTRING)
951
+ @replace_return_docstrings(output_type=SemanticSegmenterOutput, config_class=_CONFIG_FOR_DOC)
952
+ def forward(
953
+ self,
954
+ pixel_values: Optional[torch.Tensor] = None,
955
+ labels: Optional[torch.Tensor] = None,
956
+ output_hidden_states: Optional[bool] = None,
957
+ return_dict: Optional[bool] = None,
958
+ ) -> Union[tuple, SemanticSegmenterOutput]:
959
+ r"""
960
+ labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
961
+ Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ...,
962
+ config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy).
963
+
964
+ Returns:
965
+
966
+ Examples:
967
+
968
+ ```python
969
+ >>> import requests
970
+ >>> import torch
971
+ >>> from PIL import Image
972
+ >>> from transformers import AutoImageProcessor, MobileViTV2ForSemanticSegmentation
973
+
974
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
975
+ >>> image = Image.open(requests.get(url, stream=True).raw)
976
+
977
+ >>> image_processor = AutoImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256")
978
+ >>> model = MobileViTV2ForSemanticSegmentation.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256")
979
+
980
+ >>> inputs = image_processor(images=image, return_tensors="pt")
981
+
982
+ >>> with torch.no_grad():
983
+ ... outputs = model(**inputs)
984
+
985
+ >>> # logits are of shape (batch_size, num_labels, height, width)
986
+ >>> logits = outputs.logits
987
+ ```"""
988
+ output_hidden_states = (
989
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
990
+ )
991
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
992
+
993
+ if labels is not None and self.config.num_labels == 1:
994
+ raise ValueError("The number of labels should be greater than one")
995
+
996
+ outputs = self.mobilevitv2(
997
+ pixel_values,
998
+ output_hidden_states=True, # we need the intermediate hidden states
999
+ return_dict=return_dict,
1000
+ )
1001
+
1002
+ encoder_hidden_states = outputs.hidden_states if return_dict else outputs[1]
1003
+
1004
+ logits = self.segmentation_head(encoder_hidden_states)
1005
+
1006
+ loss = None
1007
+ if labels is not None:
1008
+ # upsample logits to the images' original size
1009
+ upsampled_logits = nn.functional.interpolate(
1010
+ logits, size=labels.shape[-2:], mode="bilinear", align_corners=False
1011
+ )
1012
+ loss_fct = CrossEntropyLoss(ignore_index=self.config.semantic_loss_ignore_index)
1013
+ loss = loss_fct(upsampled_logits, labels)
1014
+
1015
+ if not return_dict:
1016
+ if output_hidden_states:
1017
+ output = (logits,) + outputs[1:]
1018
+ else:
1019
+ output = (logits,) + outputs[2:]
1020
+ return ((loss,) + output) if loss is not None else output
1021
+
1022
+ return SemanticSegmenterOutput(
1023
+ loss=loss,
1024
+ logits=logits,
1025
+ hidden_states=outputs.hidden_states if output_hidden_states else None,
1026
+ attentions=None,
1027
+ )
1028
+
1029
+
1030
+ __all__ = [
1031
+ "MobileViTV2ForImageClassification",
1032
+ "MobileViTV2ForSemanticSegmentation",
1033
+ "MobileViTV2Model",
1034
+ "MobileViTV2PreTrainedModel",
1035
+ ]
janus/lib/python3.10/site-packages/transformers/models/mt5/__init__.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import _LazyModule
17
+ from ...utils.import_utils import define_import_structure
18
+
19
+
20
+ if TYPE_CHECKING:
21
+ from .configuration_mt5 import *
22
+ from .modeling_flax_mt5 import *
23
+ from .modeling_mt5 import *
24
+ from .modeling_tf_mt5 import *
25
+ from .tokenization_mt5 import *
26
+ else:
27
+ import sys
28
+
29
+ _file = globals()["__file__"]
30
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
janus/lib/python3.10/site-packages/transformers/models/mt5/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (612 Bytes). View file
 
janus/lib/python3.10/site-packages/transformers/models/mt5/__pycache__/configuration_mt5.cpython-310.pyc ADDED
Binary file (6.58 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/mt5/__pycache__/modeling_flax_mt5.cpython-310.pyc ADDED
Binary file (3.91 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/mt5/__pycache__/modeling_mt5.cpython-310.pyc ADDED
Binary file (69.3 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/mt5/__pycache__/modeling_tf_mt5.cpython-310.pyc ADDED
Binary file (3.13 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/mt5/__pycache__/tokenization_mt5.cpython-310.pyc ADDED
Binary file (438 Bytes). View file
 
janus/lib/python3.10/site-packages/transformers/models/mt5/__pycache__/tokenization_mt5_fast.cpython-310.pyc ADDED
Binary file (451 Bytes). View file
 
janus/lib/python3.10/site-packages/transformers/models/mt5/configuration_mt5.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020, The T5 Authors and HuggingFace Inc.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """mT5 model configuration"""
16
+
17
+ from typing import Mapping
18
+
19
+ from ...configuration_utils import PretrainedConfig
20
+ from ...onnx import OnnxSeq2SeqConfigWithPast
21
+ from ...utils import logging
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+
27
+ class MT5Config(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`MT5Model`] or a [`TFMT5Model`]. It is used to
30
+ instantiate a mT5 model according to the specified arguments, defining the model architecture. Instantiating a
31
+ configuration with the defaults will yield a similar configuration to that of the mT5
32
+ [google/mt5-small](https://huggingface.co/google/mt5-small) architecture.
33
+
34
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
35
+ documentation from [`PretrainedConfig`] for more information.
36
+
37
+ Arguments:
38
+ vocab_size (`int`, *optional*, defaults to 250112):
39
+ Vocabulary size of the T5 model. Defines the number of different tokens that can be represented by the
40
+ `inputs_ids` passed when calling [`T5Model`] or [`TFT5Model`].
41
+ d_model (`int`, *optional*, defaults to 512):
42
+ Size of the encoder layers and the pooler layer.
43
+ d_kv (`int`, *optional*, defaults to 64):
44
+ Size of the key, query, value projections per attention head. In the conventional context, it is typically expected that `d_kv` has to be equal to `d_model // num_heads`.
45
+ But in the architecture of mt5-small, `d_kv` is not equal to `d_model //num_heads`. The `inner_dim` of the projection layer will be defined as `num_heads * d_kv`.
46
+ d_ff (`int`, *optional*, defaults to 1024):
47
+ Size of the intermediate feed forward layer in each `T5Block`.
48
+ num_layers (`int`, *optional*, defaults to 8):
49
+ Number of hidden layers in the Transformer encoder.
50
+ num_decoder_layers (`int`, *optional*):
51
+ Number of hidden layers in the Transformer decoder. Will use the same value as `num_layers` if not set.
52
+ num_heads (`int`, *optional*, defaults to 6):
53
+ Number of attention heads for each attention layer in the Transformer encoder.
54
+ relative_attention_num_buckets (`int`, *optional*, defaults to 32):
55
+ The number of buckets to use for each attention layer.
56
+ relative_attention_max_distance (`int`, *optional*, defaults to 128):
57
+ The maximum distance of the longer sequences for the bucket separation.
58
+ dropout_rate (`float`, *optional*, defaults to 0.1):
59
+ The ratio for all dropout layers.
60
+ classifier_dropout (`float`, *optional*, defaults to 0.0):
61
+ The dropout ratio for classifier.
62
+ layer_norm_eps (`float`, *optional*, defaults to 1e-6):
63
+ The epsilon used by the layer normalization layers.
64
+ initializer_factor (`float`, *optional*, defaults to 1):
65
+ A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
66
+ testing).
67
+ feed_forward_proj (`string`, *optional*, defaults to `"gated-gelu"`):
68
+ Type of feed forward layer to be used. Should be one of `"relu"` or `"gated-gelu"`.
69
+ use_cache (`bool`, *optional*, defaults to `True`):
70
+ Whether or not the model should return the last key/values attentions (not used by all models).
71
+ """
72
+
73
+ model_type = "mt5"
74
+ keys_to_ignore_at_inference = ["past_key_values"]
75
+ attribute_map = {
76
+ "hidden_size": "d_model",
77
+ "num_attention_heads": "num_heads",
78
+ "num_hidden_layers": "num_layers",
79
+ "head_dim": "d_kv",
80
+ }
81
+
82
+ def __init__(
83
+ self,
84
+ vocab_size=250112,
85
+ d_model=512,
86
+ d_kv=64,
87
+ d_ff=1024,
88
+ num_layers=8,
89
+ num_decoder_layers=None,
90
+ num_heads=6,
91
+ relative_attention_num_buckets=32,
92
+ relative_attention_max_distance=128,
93
+ dropout_rate=0.1,
94
+ layer_norm_epsilon=1e-6,
95
+ initializer_factor=1.0,
96
+ feed_forward_proj="gated-gelu",
97
+ is_encoder_decoder=True,
98
+ use_cache=True,
99
+ tokenizer_class="T5Tokenizer",
100
+ tie_word_embeddings=False,
101
+ pad_token_id=0,
102
+ eos_token_id=1,
103
+ decoder_start_token_id=0,
104
+ classifier_dropout=0.0,
105
+ **kwargs,
106
+ ):
107
+ self.vocab_size = vocab_size
108
+ self.d_model = d_model
109
+ self.d_kv = d_kv
110
+ self.d_ff = d_ff
111
+ self.num_layers = num_layers
112
+ self.num_decoder_layers = (
113
+ num_decoder_layers if num_decoder_layers is not None else self.num_layers
114
+ ) # default = symmetry
115
+ self.num_heads = num_heads
116
+ self.relative_attention_num_buckets = relative_attention_num_buckets
117
+ self.relative_attention_max_distance = relative_attention_max_distance
118
+ self.dropout_rate = dropout_rate
119
+ self.classifier_dropout = classifier_dropout
120
+ self.layer_norm_epsilon = layer_norm_epsilon
121
+ self.initializer_factor = initializer_factor
122
+ self.feed_forward_proj = feed_forward_proj
123
+ self.use_cache = use_cache
124
+
125
+ act_info = self.feed_forward_proj.split("-")
126
+ self.dense_act_fn = act_info[-1]
127
+ self.is_gated_act = act_info[0] == "gated"
128
+
129
+ if len(act_info) > 1 and act_info[0] != "gated" or len(act_info) > 2:
130
+ raise ValueError(
131
+ f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer. "
132
+ "Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
133
+ "'gated-gelu' or 'relu'"
134
+ )
135
+
136
+ # for backwards compatibility
137
+ if feed_forward_proj == "gated-gelu":
138
+ self.dense_act_fn = "gelu_new"
139
+
140
+ super().__init__(
141
+ is_encoder_decoder=is_encoder_decoder,
142
+ tokenizer_class=tokenizer_class,
143
+ tie_word_embeddings=tie_word_embeddings,
144
+ pad_token_id=pad_token_id,
145
+ eos_token_id=eos_token_id,
146
+ decoder_start_token_id=decoder_start_token_id,
147
+ **kwargs,
148
+ )
149
+
150
+
151
+ class MT5OnnxConfig(OnnxSeq2SeqConfigWithPast):
152
+ @property
153
+ # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
154
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
155
+ common_inputs = {
156
+ "input_ids": {0: "batch", 1: "encoder_sequence"},
157
+ "attention_mask": {0: "batch", 1: "encoder_sequence"},
158
+ }
159
+ if self.use_past:
160
+ common_inputs["attention_mask"][1] = "past_encoder_sequence + sequence"
161
+ common_inputs["decoder_input_ids"] = {0: "batch"}
162
+ common_inputs["decoder_attention_mask"] = {0: "batch", 1: "past_decoder_sequence + sequence"}
163
+ else:
164
+ common_inputs["decoder_input_ids"] = {0: "batch", 1: "decoder_sequence"}
165
+ common_inputs["decoder_attention_mask"] = {0: "batch", 1: "decoder_sequence"}
166
+
167
+ if self.use_past:
168
+ self.fill_with_past_key_values_(common_inputs, direction="inputs")
169
+
170
+ return common_inputs
171
+
172
+ @property
173
+ # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
174
+ def default_onnx_opset(self) -> int:
175
+ return 13
176
+
177
+ @property
178
+ def atol_for_validation(self) -> float:
179
+ return 5e-4
180
+
181
+
182
+ __all__ = ["MT5Config", "MT5OnnxConfig"]
janus/lib/python3.10/site-packages/transformers/models/mt5/modeling_flax_mt5.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 Mesh TensorFlow authors, T5 Authors and HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Flax mT5 model."""
16
+
17
+ import jax.numpy as jnp
18
+
19
+ from ...utils import logging
20
+ from ..t5.modeling_flax_t5 import FlaxT5EncoderModel, FlaxT5ForConditionalGeneration, FlaxT5Model
21
+ from .configuration_mt5 import MT5Config
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+ _CONFIG_FOR_DOC = "T5Config"
27
+
28
+
29
+ # Copied from transformers.models.bart.modeling_flax_bart.shift_tokens_right
30
+ def shift_tokens_right(input_ids: jnp.ndarray, pad_token_id: int, decoder_start_token_id: int) -> jnp.ndarray:
31
+ """
32
+ Shift input ids one token to the right.
33
+ """
34
+ shifted_input_ids = jnp.zeros_like(input_ids)
35
+ shifted_input_ids = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1])
36
+ shifted_input_ids = shifted_input_ids.at[:, 0].set(decoder_start_token_id)
37
+
38
+ shifted_input_ids = jnp.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids)
39
+ return shifted_input_ids
40
+
41
+
42
+ class FlaxMT5Model(FlaxT5Model):
43
+ r"""
44
+ This class overrides [`FlaxT5Model`]. Please check the superclass for the appropriate documentation alongside usage
45
+ examples.
46
+
47
+ Examples:
48
+
49
+ ```python
50
+ >>> from transformers import FlaxMT5Model, AutoTokenizer
51
+
52
+ >>> model = FlaxMT5Model.from_pretrained("google/mt5-small")
53
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small")
54
+
55
+ >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien."
56
+ >>> summary = "Weiter Verhandlung in Syrien."
57
+ >>> inputs = tokenizer(article, return_tensors="np")
58
+
59
+ >>> decoder_input_ids = tokenizer(text_target=summary, return_tensors="np").input_ids
60
+
61
+ >>> outputs = model(input_ids=inputs["input_ids"], decoder_input_ids=decoder_input_ids)
62
+ >>> hidden_states = outputs.last_hidden_state
63
+ ```"""
64
+
65
+ model_type = "mt5"
66
+ config_class = MT5Config
67
+
68
+
69
+ class FlaxMT5EncoderModel(FlaxT5EncoderModel):
70
+ r"""
71
+ This class overrides [`FlaxT5EncoderModel`]. Please check the superclass for the appropriate documentation
72
+ alongside usage examples.
73
+
74
+ Examples:
75
+
76
+ ```python
77
+ >>> from transformers import FlaxT5EncoderModel, AutoTokenizer
78
+
79
+ >>> model = FlaxT5EncoderModel.from_pretrained("google/mt5-small")
80
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small")
81
+
82
+ >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien."
83
+ >>> summary = "Weiter Verhandlung in Syrien."
84
+ >>> inputs = tokenizer(article, return_tensors="np")
85
+
86
+ >>> decoder_input_ids = tokenizer(text_target=summary, return_tensors="np").input_ids
87
+
88
+ >>> outputs = model(input_ids=inputs["input_ids"])
89
+ >>> hidden_states = outputs.last_hidden_state
90
+ ```"""
91
+
92
+ model_type = "mt5"
93
+ config_class = MT5Config
94
+
95
+
96
+ class FlaxMT5ForConditionalGeneration(FlaxT5ForConditionalGeneration):
97
+ r"""
98
+ This class overrides [`FlaxT5ForConditionalGeneration`]. Please check the superclass for the appropriate
99
+ documentation alongside usage examples.
100
+
101
+ Examples:
102
+
103
+ ```python
104
+ >>> from transformers import FlaxMT5ForConditionalGeneration, AutoTokenizer
105
+
106
+ >>> model = FlaxMT5ForConditionalGeneration.from_pretrained("google/mt5-small")
107
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small")
108
+
109
+ >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien."
110
+ >>> summary = "Weiter Verhandlung in Syrien."
111
+ >>> inputs = tokenizer(article, return_tensors="np")
112
+
113
+ >>> decoder_input_ids = tokenizer(text_target=summary, return_tensors="np").input_ids
114
+
115
+ >>> outputs = model(**inputs, decoder_input_ids=decoder_input_ids)
116
+ >>> logits = outputs.logits
117
+ ```"""
118
+
119
+ model_type = "mt5"
120
+ config_class = MT5Config
121
+
122
+
123
+ __all__ = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"]