ZTWHHH commited on
Commit
b0e1aca
·
verified ·
1 Parent(s): 9148022

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. mgm/lib/python3.10/site-packages/transformers/models/bartpho/__init__.py +42 -0
  2. mgm/lib/python3.10/site-packages/transformers/models/bartpho/__pycache__/__init__.cpython-310.pyc +0 -0
  3. mgm/lib/python3.10/site-packages/transformers/models/bartpho/__pycache__/tokenization_bartpho.cpython-310.pyc +0 -0
  4. mgm/lib/python3.10/site-packages/transformers/models/bartpho/tokenization_bartpho.py +327 -0
  5. mgm/lib/python3.10/site-packages/transformers/models/deberta/__init__.py +120 -0
  6. mgm/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/configuration_deberta.cpython-310.pyc +0 -0
  7. mgm/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/modeling_tf_deberta.cpython-310.pyc +0 -0
  8. mgm/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/tokenization_deberta_fast.cpython-310.pyc +0 -0
  9. mgm/lib/python3.10/site-packages/transformers/models/deberta/configuration_deberta.py +199 -0
  10. mgm/lib/python3.10/site-packages/transformers/models/deberta/modeling_deberta.py +1433 -0
  11. mgm/lib/python3.10/site-packages/transformers/models/deberta/modeling_tf_deberta.py +1432 -0
  12. mgm/lib/python3.10/site-packages/transformers/models/deberta/tokenization_deberta_fast.py +286 -0
  13. mgm/lib/python3.10/site-packages/transformers/models/deta/__init__.py +73 -0
  14. mgm/lib/python3.10/site-packages/transformers/models/deta/__pycache__/__init__.cpython-310.pyc +0 -0
  15. mgm/lib/python3.10/site-packages/transformers/models/deta/__pycache__/configuration_deta.cpython-310.pyc +0 -0
  16. mgm/lib/python3.10/site-packages/transformers/models/deta/__pycache__/convert_deta_resnet_to_pytorch.cpython-310.pyc +0 -0
  17. mgm/lib/python3.10/site-packages/transformers/models/deta/__pycache__/convert_deta_swin_to_pytorch.cpython-310.pyc +0 -0
  18. mgm/lib/python3.10/site-packages/transformers/models/deta/__pycache__/image_processing_deta.cpython-310.pyc +0 -0
  19. mgm/lib/python3.10/site-packages/transformers/models/deta/__pycache__/modeling_deta.cpython-310.pyc +0 -0
  20. mgm/lib/python3.10/site-packages/transformers/models/deta/configuration_deta.py +232 -0
  21. mgm/lib/python3.10/site-packages/transformers/models/deta/convert_deta_resnet_to_pytorch.py +320 -0
  22. mgm/lib/python3.10/site-packages/transformers/models/deta/convert_deta_swin_to_pytorch.py +327 -0
  23. mgm/lib/python3.10/site-packages/transformers/models/deta/image_processing_deta.py +1095 -0
  24. mgm/lib/python3.10/site-packages/transformers/models/deta/modeling_deta.py +0 -0
  25. mgm/lib/python3.10/site-packages/transformers/models/imagegpt/__pycache__/modeling_imagegpt.cpython-310.pyc +0 -0
  26. mgm/lib/python3.10/site-packages/transformers/models/imagegpt/feature_extraction_imagegpt.py +33 -0
  27. mgm/lib/python3.10/site-packages/transformers/models/imagegpt/image_processing_imagegpt.py +293 -0
  28. mgm/lib/python3.10/site-packages/transformers/models/layoutlmv3/__init__.py +144 -0
  29. mgm/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/__init__.cpython-310.pyc +0 -0
  30. mgm/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/configuration_layoutlmv3.cpython-310.pyc +0 -0
  31. mgm/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/feature_extraction_layoutlmv3.cpython-310.pyc +0 -0
  32. mgm/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/image_processing_layoutlmv3.cpython-310.pyc +0 -0
  33. mgm/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/modeling_layoutlmv3.cpython-310.pyc +0 -0
  34. mgm/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/modeling_tf_layoutlmv3.cpython-310.pyc +0 -0
  35. mgm/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/processing_layoutlmv3.cpython-310.pyc +0 -0
  36. mgm/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/tokenization_layoutlmv3.cpython-310.pyc +0 -0
  37. mgm/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/tokenization_layoutlmv3_fast.cpython-310.pyc +0 -0
  38. mgm/lib/python3.10/site-packages/transformers/models/layoutlmv3/configuration_layoutlmv3.py +294 -0
  39. mgm/lib/python3.10/site-packages/transformers/models/layoutlmv3/image_processing_layoutlmv3.py +366 -0
  40. mgm/lib/python3.10/site-packages/transformers/models/layoutlmv3/modeling_layoutlmv3.py +1373 -0
  41. mgm/lib/python3.10/site-packages/transformers/models/layoutlmv3/modeling_tf_layoutlmv3.py +1569 -0
  42. mgm/lib/python3.10/site-packages/transformers/models/layoutlmv3/tokenization_layoutlmv3.py +1479 -0
  43. mgm/lib/python3.10/site-packages/transformers/models/pvt/__init__.py +80 -0
  44. mgm/lib/python3.10/site-packages/transformers/models/pvt/__pycache__/__init__.cpython-310.pyc +0 -0
  45. mgm/lib/python3.10/site-packages/transformers/models/pvt/__pycache__/configuration_pvt.cpython-310.pyc +0 -0
  46. mgm/lib/python3.10/site-packages/transformers/models/pvt/__pycache__/convert_pvt_to_pytorch.cpython-310.pyc +0 -0
  47. mgm/lib/python3.10/site-packages/transformers/models/pvt/__pycache__/image_processing_pvt.cpython-310.pyc +0 -0
  48. mgm/lib/python3.10/site-packages/transformers/models/pvt/__pycache__/modeling_pvt.cpython-310.pyc +0 -0
  49. mgm/lib/python3.10/site-packages/transformers/models/pvt/configuration_pvt.py +164 -0
  50. mgm/lib/python3.10/site-packages/transformers/models/pvt/convert_pvt_to_pytorch.py +227 -0
mgm/lib/python3.10/site-packages/transformers/models/bartpho/__init__.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
18
+
19
+
20
+ _import_structure = {}
21
+
22
+ try:
23
+ if not is_sentencepiece_available():
24
+ raise OptionalDependencyNotAvailable()
25
+ except OptionalDependencyNotAvailable:
26
+ pass
27
+ else:
28
+ _import_structure["tokenization_bartpho"] = ["BartphoTokenizer"]
29
+
30
+ if TYPE_CHECKING:
31
+ try:
32
+ if not is_sentencepiece_available():
33
+ raise OptionalDependencyNotAvailable()
34
+ except OptionalDependencyNotAvailable:
35
+ pass
36
+ else:
37
+ from .tokenization_bartpho import BartphoTokenizer
38
+
39
+ else:
40
+ import sys
41
+
42
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
mgm/lib/python3.10/site-packages/transformers/models/bartpho/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (677 Bytes). View file
 
mgm/lib/python3.10/site-packages/transformers/models/bartpho/__pycache__/tokenization_bartpho.cpython-310.pyc ADDED
Binary file (12.5 kB). View file
 
mgm/lib/python3.10/site-packages/transformers/models/bartpho/tokenization_bartpho.py ADDED
@@ -0,0 +1,327 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 VinAI Research and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License
15
+ """ Tokenization classes for BARTpho-syllable model."""
16
+
17
+
18
+ import os
19
+ from shutil import copyfile
20
+ from typing import Any, Dict, List, Optional, Tuple
21
+
22
+ import sentencepiece as spm
23
+
24
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer
25
+ from ...utils import logging
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+ SPIECE_UNDERLINE = "▁"
31
+
32
+ VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
33
+
34
+ PRETRAINED_VOCAB_FILES_MAP = {
35
+ "vocab_file": {
36
+ "vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model",
37
+ },
38
+ "monolingual_vocab_file": {
39
+ "vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt",
40
+ },
41
+ }
42
+
43
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {"vinai/bartpho-syllable": 1024}
44
+
45
+
46
+ class BartphoTokenizer(PreTrainedTokenizer):
47
+ """
48
+ Adapted from [`XLMRobertaTokenizer`]. Based on [SentencePiece](https://github.com/google/sentencepiece).
49
+
50
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
51
+ this superclass for more information regarding those methods.
52
+
53
+ Args:
54
+ vocab_file (`str`):
55
+ Path to the vocabulary file. This vocabulary is the pre-trained SentencePiece model available from the
56
+ multilingual XLM-RoBERTa, also used in mBART, consisting of 250K types.
57
+ monolingual_vocab_file (`str`):
58
+ Path to the monolingual vocabulary file. This monolingual vocabulary consists of Vietnamese-specialized
59
+ types extracted from the multilingual vocabulary vocab_file of 250K types.
60
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
61
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
62
+
63
+ <Tip>
64
+
65
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
66
+ sequence. The token used is the `cls_token`.
67
+
68
+ </Tip>
69
+
70
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
71
+ The end of sequence token.
72
+
73
+ <Tip>
74
+
75
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
76
+ The token used is the `sep_token`.
77
+
78
+ </Tip>
79
+
80
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
81
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
82
+ sequence classification or for a text and a question for question answering. It is also used as the last
83
+ token of a sequence built with special tokens.
84
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
85
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
86
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
87
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
88
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
89
+ token instead.
90
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
91
+ The token used for padding, for example when batching sequences of different lengths.
92
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
93
+ The token used for masking values. This is the token used when training this model with masked language
94
+ modeling. This is the token which the model will try to predict.
95
+ sp_model_kwargs (`dict`, *optional*):
96
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
97
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
98
+ to set:
99
+
100
+ - `enable_sampling`: Enable subword regularization.
101
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
102
+
103
+ - `nbest_size = {0,1}`: No sampling is performed.
104
+ - `nbest_size > 1`: samples from the nbest_size results.
105
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
106
+ using forward-filtering-and-backward-sampling algorithm.
107
+
108
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
109
+ BPE-dropout.
110
+
111
+ Attributes:
112
+ sp_model (`SentencePieceProcessor`):
113
+ The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
114
+ """
115
+
116
+ vocab_files_names = VOCAB_FILES_NAMES
117
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
118
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
119
+ model_input_names = ["input_ids", "attention_mask"]
120
+
121
+ def __init__(
122
+ self,
123
+ vocab_file,
124
+ monolingual_vocab_file,
125
+ bos_token="<s>",
126
+ eos_token="</s>",
127
+ sep_token="</s>",
128
+ cls_token="<s>",
129
+ unk_token="<unk>",
130
+ pad_token="<pad>",
131
+ mask_token="<mask>",
132
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
133
+ **kwargs,
134
+ ) -> None:
135
+ # Mask token behave like a normal word, i.e. include the space before it
136
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
137
+
138
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
139
+
140
+ self.vocab_file = vocab_file
141
+ self.monolingual_vocab_file = monolingual_vocab_file
142
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
143
+ self.sp_model.Load(str(vocab_file))
144
+
145
+ # Load the reduced vocab
146
+
147
+ # Keep order of special tokens for backward compatibility
148
+ self.fairseq_tokens_to_ids = {}
149
+ cnt = 0
150
+ for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
151
+ if str(token) not in self.fairseq_tokens_to_ids:
152
+ self.fairseq_tokens_to_ids[str(token)] = cnt
153
+ cnt += 1
154
+ with open(monolingual_vocab_file, "r", encoding="utf-8") as f:
155
+ for line in f.readlines():
156
+ token = line.strip().split()[0]
157
+ self.fairseq_tokens_to_ids[token] = len(self.fairseq_tokens_to_ids)
158
+ if str(mask_token) not in self.fairseq_tokens_to_ids:
159
+ self.fairseq_tokens_to_ids[str(mask_token)] = len(self.fairseq_tokens_to_ids)
160
+
161
+ self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
162
+
163
+ super().__init__(
164
+ bos_token=bos_token,
165
+ eos_token=eos_token,
166
+ unk_token=unk_token,
167
+ sep_token=sep_token,
168
+ cls_token=cls_token,
169
+ pad_token=pad_token,
170
+ mask_token=mask_token,
171
+ sp_model_kwargs=self.sp_model_kwargs,
172
+ **kwargs,
173
+ )
174
+
175
+ def __getstate__(self):
176
+ state = self.__dict__.copy()
177
+ state["sp_model"] = None
178
+ state["sp_model_proto"] = self.sp_model.serialized_model_proto()
179
+ return state
180
+
181
+ def __setstate__(self, d):
182
+ self.__dict__ = d
183
+
184
+ # for backward compatibility
185
+ if not hasattr(self, "sp_model_kwargs"):
186
+ self.sp_model_kwargs = {}
187
+
188
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
189
+ self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
190
+
191
+ def build_inputs_with_special_tokens(
192
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
193
+ ) -> List[int]:
194
+ """
195
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
196
+ adding special tokens. An BARTPho sequence has the following format:
197
+
198
+ - single sequence: `<s> X </s>`
199
+ - pair of sequences: `<s> A </s></s> B </s>`
200
+
201
+ Args:
202
+ token_ids_0 (`List[int]`):
203
+ List of IDs to which the special tokens will be added.
204
+ token_ids_1 (`List[int]`, *optional*):
205
+ Optional second list of IDs for sequence pairs.
206
+
207
+ Returns:
208
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
209
+ """
210
+
211
+ if token_ids_1 is None:
212
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
213
+ cls = [self.cls_token_id]
214
+ sep = [self.sep_token_id]
215
+ return cls + token_ids_0 + sep + sep + token_ids_1 + sep
216
+
217
+ def get_special_tokens_mask(
218
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
219
+ ) -> List[int]:
220
+ """
221
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
222
+ special tokens using the tokenizer `prepare_for_model` method.
223
+
224
+ Args:
225
+ token_ids_0 (`List[int]`):
226
+ List of IDs.
227
+ token_ids_1 (`List[int]`, *optional*):
228
+ Optional second list of IDs for sequence pairs.
229
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
230
+ Whether or not the token list is already formatted with special tokens for the model.
231
+
232
+ Returns:
233
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
234
+ """
235
+
236
+ if already_has_special_tokens:
237
+ return super().get_special_tokens_mask(
238
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
239
+ )
240
+
241
+ if token_ids_1 is None:
242
+ return [1] + ([0] * len(token_ids_0)) + [1]
243
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
244
+
245
+ def create_token_type_ids_from_sequences(
246
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
247
+ ) -> List[int]:
248
+ """
249
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. BARTPho does not
250
+ make use of token type ids, therefore a list of zeros is returned.
251
+
252
+ Args:
253
+ token_ids_0 (`List[int]`):
254
+ List of IDs.
255
+ token_ids_1 (`List[int]`, *optional*):
256
+ Optional second list of IDs for sequence pairs.
257
+
258
+ Returns:
259
+ `List[int]`: List of zeros.
260
+
261
+ """
262
+
263
+ sep = [self.sep_token_id]
264
+ cls = [self.cls_token_id]
265
+
266
+ if token_ids_1 is None:
267
+ return len(cls + token_ids_0 + sep) * [0]
268
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
269
+
270
+ @property
271
+ def vocab_size(self):
272
+ return len(self.fairseq_ids_to_tokens)
273
+
274
+ def get_vocab(self):
275
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
276
+ vocab.update(self.added_tokens_encoder)
277
+ return vocab
278
+
279
+ def _tokenize(self, text: str) -> List[str]:
280
+ return self.sp_model.encode(text, out_type=str)
281
+
282
+ def _convert_token_to_id(self, token):
283
+ """Converts a token (str) in an id using the vocab."""
284
+ if token in self.fairseq_tokens_to_ids:
285
+ return self.fairseq_tokens_to_ids[token]
286
+ else:
287
+ return self.unk_token_id
288
+
289
+ def _convert_id_to_token(self, index):
290
+ """Converts an index (integer) in a token (str) using the vocab."""
291
+ return self.fairseq_ids_to_tokens[index]
292
+
293
+ def convert_tokens_to_string(self, tokens):
294
+ """Converts a sequence of tokens (strings for sub-words) in a single string."""
295
+ out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
296
+ return out_string
297
+
298
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
299
+ if not os.path.isdir(save_directory):
300
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
301
+ return
302
+ out_vocab_file = os.path.join(
303
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
304
+ )
305
+ out_monolingual_vocab_file = os.path.join(
306
+ save_directory,
307
+ (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["monolingual_vocab_file"],
308
+ )
309
+
310
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
311
+ copyfile(self.vocab_file, out_vocab_file)
312
+ elif not os.path.isfile(self.vocab_file):
313
+ with open(out_vocab_file, "wb") as fi:
314
+ content_spiece_model = self.sp_model.serialized_model_proto()
315
+ fi.write(content_spiece_model)
316
+
317
+ if os.path.abspath(self.monolingual_vocab_file) != os.path.abspath(
318
+ out_monolingual_vocab_file
319
+ ) and os.path.isfile(self.monolingual_vocab_file):
320
+ copyfile(self.monolingual_vocab_file, out_monolingual_vocab_file)
321
+ elif not os.path.isfile(self.monolingual_vocab_file):
322
+ with open(out_monolingual_vocab_file, "w", encoding="utf-8") as fp:
323
+ for token in self.fairseq_tokens_to_ids:
324
+ if token not in self.all_special_tokens:
325
+ fp.write(f"{str(token)} \n")
326
+
327
+ return out_vocab_file, out_monolingual_vocab_file
mgm/lib/python3.10/site-packages/transformers/models/deberta/__init__.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_tf_available,
21
+ is_tokenizers_available,
22
+ is_torch_available,
23
+ )
24
+
25
+
26
+ _import_structure = {
27
+ "configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"],
28
+ "tokenization_deberta": ["DebertaTokenizer"],
29
+ }
30
+
31
+ try:
32
+ if not is_tokenizers_available():
33
+ raise OptionalDependencyNotAvailable()
34
+ except OptionalDependencyNotAvailable:
35
+ pass
36
+ else:
37
+ _import_structure["tokenization_deberta_fast"] = ["DebertaTokenizerFast"]
38
+
39
+ try:
40
+ if not is_torch_available():
41
+ raise OptionalDependencyNotAvailable()
42
+ except OptionalDependencyNotAvailable:
43
+ pass
44
+ else:
45
+ _import_structure["modeling_deberta"] = [
46
+ "DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
47
+ "DebertaForMaskedLM",
48
+ "DebertaForQuestionAnswering",
49
+ "DebertaForSequenceClassification",
50
+ "DebertaForTokenClassification",
51
+ "DebertaModel",
52
+ "DebertaPreTrainedModel",
53
+ ]
54
+
55
+ try:
56
+ if not is_tf_available():
57
+ raise OptionalDependencyNotAvailable()
58
+ except OptionalDependencyNotAvailable:
59
+ pass
60
+ else:
61
+ _import_structure["modeling_tf_deberta"] = [
62
+ "TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
63
+ "TFDebertaForMaskedLM",
64
+ "TFDebertaForQuestionAnswering",
65
+ "TFDebertaForSequenceClassification",
66
+ "TFDebertaForTokenClassification",
67
+ "TFDebertaModel",
68
+ "TFDebertaPreTrainedModel",
69
+ ]
70
+
71
+
72
+ if TYPE_CHECKING:
73
+ from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
74
+ from .tokenization_deberta import DebertaTokenizer
75
+
76
+ try:
77
+ if not is_tokenizers_available():
78
+ raise OptionalDependencyNotAvailable()
79
+ except OptionalDependencyNotAvailable:
80
+ pass
81
+ else:
82
+ from .tokenization_deberta_fast import DebertaTokenizerFast
83
+
84
+ try:
85
+ if not is_torch_available():
86
+ raise OptionalDependencyNotAvailable()
87
+ except OptionalDependencyNotAvailable:
88
+ pass
89
+ else:
90
+ from .modeling_deberta import (
91
+ DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
92
+ DebertaForMaskedLM,
93
+ DebertaForQuestionAnswering,
94
+ DebertaForSequenceClassification,
95
+ DebertaForTokenClassification,
96
+ DebertaModel,
97
+ DebertaPreTrainedModel,
98
+ )
99
+
100
+ try:
101
+ if not is_tf_available():
102
+ raise OptionalDependencyNotAvailable()
103
+ except OptionalDependencyNotAvailable:
104
+ pass
105
+ else:
106
+ from .modeling_tf_deberta import (
107
+ TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
108
+ TFDebertaForMaskedLM,
109
+ TFDebertaForQuestionAnswering,
110
+ TFDebertaForSequenceClassification,
111
+ TFDebertaForTokenClassification,
112
+ TFDebertaModel,
113
+ TFDebertaPreTrainedModel,
114
+ )
115
+
116
+
117
+ else:
118
+ import sys
119
+
120
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
mgm/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/configuration_deberta.cpython-310.pyc ADDED
Binary file (8.52 kB). View file
 
mgm/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/modeling_tf_deberta.cpython-310.pyc ADDED
Binary file (45.2 kB). View file
 
mgm/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/tokenization_deberta_fast.cpython-310.pyc ADDED
Binary file (10.8 kB). View file
 
mgm/lib/python3.10/site-packages/transformers/models/deberta/configuration_deberta.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020, Microsoft and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ DeBERTa model configuration"""
16
+ from collections import OrderedDict
17
+ from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
18
+
19
+ from ...configuration_utils import PretrainedConfig
20
+ from ...onnx import OnnxConfig
21
+ from ...utils import logging
22
+
23
+
24
+ if TYPE_CHECKING:
25
+ from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+ DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP = {
31
+ "microsoft/deberta-base": "https://huggingface.co/microsoft/deberta-base/resolve/main/config.json",
32
+ "microsoft/deberta-large": "https://huggingface.co/microsoft/deberta-large/resolve/main/config.json",
33
+ "microsoft/deberta-xlarge": "https://huggingface.co/microsoft/deberta-xlarge/resolve/main/config.json",
34
+ "microsoft/deberta-base-mnli": "https://huggingface.co/microsoft/deberta-base-mnli/resolve/main/config.json",
35
+ "microsoft/deberta-large-mnli": "https://huggingface.co/microsoft/deberta-large-mnli/resolve/main/config.json",
36
+ "microsoft/deberta-xlarge-mnli": "https://huggingface.co/microsoft/deberta-xlarge-mnli/resolve/main/config.json",
37
+ }
38
+
39
+
40
+ class DebertaConfig(PretrainedConfig):
41
+ r"""
42
+ This is the configuration class to store the configuration of a [`DebertaModel`] or a [`TFDebertaModel`]. It is
43
+ used to instantiate a DeBERTa model according to the specified arguments, defining the model architecture.
44
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the DeBERTa
45
+ [microsoft/deberta-base](https://huggingface.co/microsoft/deberta-base) architecture.
46
+
47
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
48
+ documentation from [`PretrainedConfig`] for more information.
49
+
50
+ Arguments:
51
+ vocab_size (`int`, *optional*, defaults to 30522):
52
+ Vocabulary size of the DeBERTa model. Defines the number of different tokens that can be represented by the
53
+ `inputs_ids` passed when calling [`DebertaModel`] or [`TFDebertaModel`].
54
+ hidden_size (`int`, *optional*, defaults to 768):
55
+ Dimensionality of the encoder layers and the pooler layer.
56
+ num_hidden_layers (`int`, *optional*, defaults to 12):
57
+ Number of hidden layers in the Transformer encoder.
58
+ num_attention_heads (`int`, *optional*, defaults to 12):
59
+ Number of attention heads for each attention layer in the Transformer encoder.
60
+ intermediate_size (`int`, *optional*, defaults to 3072):
61
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
62
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
63
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
64
+ `"relu"`, `"silu"`, `"gelu"`, `"tanh"`, `"gelu_fast"`, `"mish"`, `"linear"`, `"sigmoid"` and `"gelu_new"`
65
+ are supported.
66
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
67
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
68
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
69
+ The dropout ratio for the attention probabilities.
70
+ max_position_embeddings (`int`, *optional*, defaults to 512):
71
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
72
+ just in case (e.g., 512 or 1024 or 2048).
73
+ type_vocab_size (`int`, *optional*, defaults to 2):
74
+ The vocabulary size of the `token_type_ids` passed when calling [`DebertaModel`] or [`TFDebertaModel`].
75
+ initializer_range (`float`, *optional*, defaults to 0.02):
76
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
77
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
78
+ The epsilon used by the layer normalization layers.
79
+ relative_attention (`bool`, *optional*, defaults to `False`):
80
+ Whether use relative position encoding.
81
+ max_relative_positions (`int`, *optional*, defaults to 1):
82
+ The range of relative positions `[-max_position_embeddings, max_position_embeddings]`. Use the same value
83
+ as `max_position_embeddings`.
84
+ pad_token_id (`int`, *optional*, defaults to 0):
85
+ The value used to pad input_ids.
86
+ position_biased_input (`bool`, *optional*, defaults to `True`):
87
+ Whether add absolute position embedding to content embedding.
88
+ pos_att_type (`List[str]`, *optional*):
89
+ The type of relative position attention, it can be a combination of `["p2c", "c2p"]`, e.g. `["p2c"]`,
90
+ `["p2c", "c2p"]`.
91
+ layer_norm_eps (`float`, optional, defaults to 1e-12):
92
+ The epsilon used by the layer normalization layers.
93
+
94
+ Example:
95
+
96
+ ```python
97
+ >>> from transformers import DebertaConfig, DebertaModel
98
+
99
+ >>> # Initializing a DeBERTa microsoft/deberta-base style configuration
100
+ >>> configuration = DebertaConfig()
101
+
102
+ >>> # Initializing a model (with random weights) from the microsoft/deberta-base style configuration
103
+ >>> model = DebertaModel(configuration)
104
+
105
+ >>> # Accessing the model configuration
106
+ >>> configuration = model.config
107
+ ```"""
108
+
109
+ model_type = "deberta"
110
+
111
+ def __init__(
112
+ self,
113
+ vocab_size=50265,
114
+ hidden_size=768,
115
+ num_hidden_layers=12,
116
+ num_attention_heads=12,
117
+ intermediate_size=3072,
118
+ hidden_act="gelu",
119
+ hidden_dropout_prob=0.1,
120
+ attention_probs_dropout_prob=0.1,
121
+ max_position_embeddings=512,
122
+ type_vocab_size=0,
123
+ initializer_range=0.02,
124
+ layer_norm_eps=1e-7,
125
+ relative_attention=False,
126
+ max_relative_positions=-1,
127
+ pad_token_id=0,
128
+ position_biased_input=True,
129
+ pos_att_type=None,
130
+ pooler_dropout=0,
131
+ pooler_hidden_act="gelu",
132
+ **kwargs,
133
+ ):
134
+ super().__init__(**kwargs)
135
+
136
+ self.hidden_size = hidden_size
137
+ self.num_hidden_layers = num_hidden_layers
138
+ self.num_attention_heads = num_attention_heads
139
+ self.intermediate_size = intermediate_size
140
+ self.hidden_act = hidden_act
141
+ self.hidden_dropout_prob = hidden_dropout_prob
142
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
143
+ self.max_position_embeddings = max_position_embeddings
144
+ self.type_vocab_size = type_vocab_size
145
+ self.initializer_range = initializer_range
146
+ self.relative_attention = relative_attention
147
+ self.max_relative_positions = max_relative_positions
148
+ self.pad_token_id = pad_token_id
149
+ self.position_biased_input = position_biased_input
150
+
151
+ # Backwards compatibility
152
+ if isinstance(pos_att_type, str):
153
+ pos_att_type = [x.strip() for x in pos_att_type.lower().split("|")]
154
+
155
+ self.pos_att_type = pos_att_type
156
+ self.vocab_size = vocab_size
157
+ self.layer_norm_eps = layer_norm_eps
158
+
159
+ self.pooler_hidden_size = kwargs.get("pooler_hidden_size", hidden_size)
160
+ self.pooler_dropout = pooler_dropout
161
+ self.pooler_hidden_act = pooler_hidden_act
162
+
163
+
164
+ # Copied from transformers.models.deberta_v2.configuration_deberta_v2.DebertaV2OnnxConfig
165
+ class DebertaOnnxConfig(OnnxConfig):
166
+ @property
167
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
168
+ if self.task == "multiple-choice":
169
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
170
+ else:
171
+ dynamic_axis = {0: "batch", 1: "sequence"}
172
+ if self._config.type_vocab_size > 0:
173
+ return OrderedDict(
174
+ [("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)]
175
+ )
176
+ else:
177
+ return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)])
178
+
179
+ @property
180
+ def default_onnx_opset(self) -> int:
181
+ return 12
182
+
183
+ def generate_dummy_inputs(
184
+ self,
185
+ preprocessor: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"],
186
+ batch_size: int = -1,
187
+ seq_length: int = -1,
188
+ num_choices: int = -1,
189
+ is_pair: bool = False,
190
+ framework: Optional["TensorType"] = None,
191
+ num_channels: int = 3,
192
+ image_width: int = 40,
193
+ image_height: int = 40,
194
+ tokenizer: "PreTrainedTokenizerBase" = None,
195
+ ) -> Mapping[str, Any]:
196
+ dummy_inputs = super().generate_dummy_inputs(preprocessor=preprocessor, framework=framework)
197
+ if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
198
+ del dummy_inputs["token_type_ids"]
199
+ return dummy_inputs
mgm/lib/python3.10/site-packages/transformers/models/deberta/modeling_deberta.py ADDED
@@ -0,0 +1,1433 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 Microsoft and the Hugging Face Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch DeBERTa model."""
16
+
17
+ from collections.abc import Sequence
18
+ from typing import Optional, Tuple, Union
19
+
20
+ import torch
21
+ import torch.utils.checkpoint
22
+ from torch import nn
23
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
24
+
25
+ from ...activations import ACT2FN
26
+ from ...modeling_outputs import (
27
+ BaseModelOutput,
28
+ MaskedLMOutput,
29
+ QuestionAnsweringModelOutput,
30
+ SequenceClassifierOutput,
31
+ TokenClassifierOutput,
32
+ )
33
+ from ...modeling_utils import PreTrainedModel
34
+ from ...pytorch_utils import softmax_backward_data
35
+ from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
36
+ from .configuration_deberta import DebertaConfig
37
+
38
+
39
+ logger = logging.get_logger(__name__)
40
+ _CONFIG_FOR_DOC = "DebertaConfig"
41
+ _CHECKPOINT_FOR_DOC = "microsoft/deberta-base"
42
+
43
+ # Masked LM docstring
44
+ _CHECKPOINT_FOR_MASKED_LM = "lsanochkin/deberta-large-feedback"
45
+ _MASKED_LM_EXPECTED_OUTPUT = "' Paris'"
46
+ _MASKED_LM_EXPECTED_LOSS = "0.54"
47
+
48
+ # QuestionAnswering docstring
49
+ _CHECKPOINT_FOR_QA = "Palak/microsoft_deberta-large_squad"
50
+ _QA_EXPECTED_OUTPUT = "' a nice puppet'"
51
+ _QA_EXPECTED_LOSS = 0.14
52
+ _QA_TARGET_START_INDEX = 12
53
+ _QA_TARGET_END_INDEX = 14
54
+
55
+
56
+ DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = [
57
+ "microsoft/deberta-base",
58
+ "microsoft/deberta-large",
59
+ "microsoft/deberta-xlarge",
60
+ "microsoft/deberta-base-mnli",
61
+ "microsoft/deberta-large-mnli",
62
+ "microsoft/deberta-xlarge-mnli",
63
+ ]
64
+
65
+
66
+ class ContextPooler(nn.Module):
67
+ def __init__(self, config):
68
+ super().__init__()
69
+ self.dense = nn.Linear(config.pooler_hidden_size, config.pooler_hidden_size)
70
+ self.dropout = StableDropout(config.pooler_dropout)
71
+ self.config = config
72
+
73
+ def forward(self, hidden_states):
74
+ # We "pool" the model by simply taking the hidden state corresponding
75
+ # to the first token.
76
+
77
+ context_token = hidden_states[:, 0]
78
+ context_token = self.dropout(context_token)
79
+ pooled_output = self.dense(context_token)
80
+ pooled_output = ACT2FN[self.config.pooler_hidden_act](pooled_output)
81
+ return pooled_output
82
+
83
+ @property
84
+ def output_dim(self):
85
+ return self.config.hidden_size
86
+
87
+
88
+ class XSoftmax(torch.autograd.Function):
89
+ """
90
+ Masked Softmax which is optimized for saving memory
91
+
92
+ Args:
93
+ input (`torch.tensor`): The input tensor that will apply softmax.
94
+ mask (`torch.IntTensor`):
95
+ The mask matrix where 0 indicate that element will be ignored in the softmax calculation.
96
+ dim (int): The dimension that will apply softmax
97
+
98
+ Example:
99
+
100
+ ```python
101
+ >>> import torch
102
+ >>> from transformers.models.deberta.modeling_deberta import XSoftmax
103
+
104
+ >>> # Make a tensor
105
+ >>> x = torch.randn([4, 20, 100])
106
+
107
+ >>> # Create a mask
108
+ >>> mask = (x > 0).int()
109
+
110
+ >>> # Specify the dimension to apply softmax
111
+ >>> dim = -1
112
+
113
+ >>> y = XSoftmax.apply(x, mask, dim)
114
+ ```"""
115
+
116
+ @staticmethod
117
+ def forward(self, input, mask, dim):
118
+ self.dim = dim
119
+ rmask = ~(mask.to(torch.bool))
120
+
121
+ output = input.masked_fill(rmask, torch.tensor(torch.finfo(input.dtype).min))
122
+ output = torch.softmax(output, self.dim)
123
+ output.masked_fill_(rmask, 0)
124
+ self.save_for_backward(output)
125
+ return output
126
+
127
+ @staticmethod
128
+ def backward(self, grad_output):
129
+ (output,) = self.saved_tensors
130
+ inputGrad = softmax_backward_data(self, grad_output, output, self.dim, output)
131
+ return inputGrad, None, None
132
+
133
+ @staticmethod
134
+ def symbolic(g, self, mask, dim):
135
+ import torch.onnx.symbolic_helper as sym_help
136
+ from torch.onnx.symbolic_opset9 import masked_fill, softmax
137
+
138
+ mask_cast_value = g.op("Cast", mask, to_i=sym_help.cast_pytorch_to_onnx["Long"])
139
+ r_mask = g.op(
140
+ "Cast",
141
+ g.op("Sub", g.op("Constant", value_t=torch.tensor(1, dtype=torch.int64)), mask_cast_value),
142
+ to_i=sym_help.cast_pytorch_to_onnx["Bool"],
143
+ )
144
+ output = masked_fill(
145
+ g, self, r_mask, g.op("Constant", value_t=torch.tensor(torch.finfo(self.type().dtype()).min))
146
+ )
147
+ output = softmax(g, output, dim)
148
+ return masked_fill(g, output, r_mask, g.op("Constant", value_t=torch.tensor(0, dtype=torch.bool)))
149
+
150
+
151
+ class DropoutContext(object):
152
+ def __init__(self):
153
+ self.dropout = 0
154
+ self.mask = None
155
+ self.scale = 1
156
+ self.reuse_mask = True
157
+
158
+
159
+ def get_mask(input, local_context):
160
+ if not isinstance(local_context, DropoutContext):
161
+ dropout = local_context
162
+ mask = None
163
+ else:
164
+ dropout = local_context.dropout
165
+ dropout *= local_context.scale
166
+ mask = local_context.mask if local_context.reuse_mask else None
167
+
168
+ if dropout > 0 and mask is None:
169
+ mask = (1 - torch.empty_like(input).bernoulli_(1 - dropout)).to(torch.bool)
170
+
171
+ if isinstance(local_context, DropoutContext):
172
+ if local_context.mask is None:
173
+ local_context.mask = mask
174
+
175
+ return mask, dropout
176
+
177
+
178
+ class XDropout(torch.autograd.Function):
179
+ """Optimized dropout function to save computation and memory by using mask operation instead of multiplication."""
180
+
181
+ @staticmethod
182
+ def forward(ctx, input, local_ctx):
183
+ mask, dropout = get_mask(input, local_ctx)
184
+ ctx.scale = 1.0 / (1 - dropout)
185
+ if dropout > 0:
186
+ ctx.save_for_backward(mask)
187
+ return input.masked_fill(mask, 0) * ctx.scale
188
+ else:
189
+ return input
190
+
191
+ @staticmethod
192
+ def backward(ctx, grad_output):
193
+ if ctx.scale > 1:
194
+ (mask,) = ctx.saved_tensors
195
+ return grad_output.masked_fill(mask, 0) * ctx.scale, None
196
+ else:
197
+ return grad_output, None
198
+
199
+ @staticmethod
200
+ def symbolic(g: torch._C.Graph, input: torch._C.Value, local_ctx: Union[float, DropoutContext]) -> torch._C.Value:
201
+ from torch.onnx import symbolic_opset12
202
+
203
+ dropout_p = local_ctx
204
+ if isinstance(local_ctx, DropoutContext):
205
+ dropout_p = local_ctx.dropout
206
+ # StableDropout only calls this function when training.
207
+ train = True
208
+ # TODO: We should check if the opset_version being used to export
209
+ # is > 12 here, but there's no good way to do that. As-is, if the
210
+ # opset_version < 12, export will fail with a CheckerError.
211
+ # Once https://github.com/pytorch/pytorch/issues/78391 is fixed, do something like:
212
+ # if opset_version < 12:
213
+ # return torch.onnx.symbolic_opset9.dropout(g, input, dropout_p, train)
214
+ return symbolic_opset12.dropout(g, input, dropout_p, train)
215
+
216
+
217
+ class StableDropout(nn.Module):
218
+ """
219
+ Optimized dropout module for stabilizing the training
220
+
221
+ Args:
222
+ drop_prob (float): the dropout probabilities
223
+ """
224
+
225
+ def __init__(self, drop_prob):
226
+ super().__init__()
227
+ self.drop_prob = drop_prob
228
+ self.count = 0
229
+ self.context_stack = None
230
+
231
+ def forward(self, x):
232
+ """
233
+ Call the module
234
+
235
+ Args:
236
+ x (`torch.tensor`): The input tensor to apply dropout
237
+ """
238
+ if self.training and self.drop_prob > 0:
239
+ return XDropout.apply(x, self.get_context())
240
+ return x
241
+
242
+ def clear_context(self):
243
+ self.count = 0
244
+ self.context_stack = None
245
+
246
+ def init_context(self, reuse_mask=True, scale=1):
247
+ if self.context_stack is None:
248
+ self.context_stack = []
249
+ self.count = 0
250
+ for c in self.context_stack:
251
+ c.reuse_mask = reuse_mask
252
+ c.scale = scale
253
+
254
+ def get_context(self):
255
+ if self.context_stack is not None:
256
+ if self.count >= len(self.context_stack):
257
+ self.context_stack.append(DropoutContext())
258
+ ctx = self.context_stack[self.count]
259
+ ctx.dropout = self.drop_prob
260
+ self.count += 1
261
+ return ctx
262
+ else:
263
+ return self.drop_prob
264
+
265
+
266
+ class DebertaLayerNorm(nn.Module):
267
+ """LayerNorm module in the TF style (epsilon inside the square root)."""
268
+
269
+ def __init__(self, size, eps=1e-12):
270
+ super().__init__()
271
+ self.weight = nn.Parameter(torch.ones(size))
272
+ self.bias = nn.Parameter(torch.zeros(size))
273
+ self.variance_epsilon = eps
274
+
275
+ def forward(self, hidden_states):
276
+ input_type = hidden_states.dtype
277
+ hidden_states = hidden_states.float()
278
+ mean = hidden_states.mean(-1, keepdim=True)
279
+ variance = (hidden_states - mean).pow(2).mean(-1, keepdim=True)
280
+ hidden_states = (hidden_states - mean) / torch.sqrt(variance + self.variance_epsilon)
281
+ hidden_states = hidden_states.to(input_type)
282
+ y = self.weight * hidden_states + self.bias
283
+ return y
284
+
285
+
286
+ class DebertaSelfOutput(nn.Module):
287
+ def __init__(self, config):
288
+ super().__init__()
289
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
290
+ self.LayerNorm = DebertaLayerNorm(config.hidden_size, config.layer_norm_eps)
291
+ self.dropout = StableDropout(config.hidden_dropout_prob)
292
+
293
+ def forward(self, hidden_states, input_tensor):
294
+ hidden_states = self.dense(hidden_states)
295
+ hidden_states = self.dropout(hidden_states)
296
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
297
+ return hidden_states
298
+
299
+
300
+ class DebertaAttention(nn.Module):
301
+ def __init__(self, config):
302
+ super().__init__()
303
+ self.self = DisentangledSelfAttention(config)
304
+ self.output = DebertaSelfOutput(config)
305
+ self.config = config
306
+
307
+ def forward(
308
+ self,
309
+ hidden_states,
310
+ attention_mask,
311
+ output_attentions=False,
312
+ query_states=None,
313
+ relative_pos=None,
314
+ rel_embeddings=None,
315
+ ):
316
+ self_output = self.self(
317
+ hidden_states,
318
+ attention_mask,
319
+ output_attentions,
320
+ query_states=query_states,
321
+ relative_pos=relative_pos,
322
+ rel_embeddings=rel_embeddings,
323
+ )
324
+ if output_attentions:
325
+ self_output, att_matrix = self_output
326
+ if query_states is None:
327
+ query_states = hidden_states
328
+ attention_output = self.output(self_output, query_states)
329
+
330
+ if output_attentions:
331
+ return (attention_output, att_matrix)
332
+ else:
333
+ return attention_output
334
+
335
+
336
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Deberta
337
+ class DebertaIntermediate(nn.Module):
338
+ def __init__(self, config):
339
+ super().__init__()
340
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
341
+ if isinstance(config.hidden_act, str):
342
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
343
+ else:
344
+ self.intermediate_act_fn = config.hidden_act
345
+
346
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
347
+ hidden_states = self.dense(hidden_states)
348
+ hidden_states = self.intermediate_act_fn(hidden_states)
349
+ return hidden_states
350
+
351
+
352
+ class DebertaOutput(nn.Module):
353
+ def __init__(self, config):
354
+ super().__init__()
355
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
356
+ self.LayerNorm = DebertaLayerNorm(config.hidden_size, config.layer_norm_eps)
357
+ self.dropout = StableDropout(config.hidden_dropout_prob)
358
+ self.config = config
359
+
360
+ def forward(self, hidden_states, input_tensor):
361
+ hidden_states = self.dense(hidden_states)
362
+ hidden_states = self.dropout(hidden_states)
363
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
364
+ return hidden_states
365
+
366
+
367
+ class DebertaLayer(nn.Module):
368
+ def __init__(self, config):
369
+ super().__init__()
370
+ self.attention = DebertaAttention(config)
371
+ self.intermediate = DebertaIntermediate(config)
372
+ self.output = DebertaOutput(config)
373
+
374
+ def forward(
375
+ self,
376
+ hidden_states,
377
+ attention_mask,
378
+ query_states=None,
379
+ relative_pos=None,
380
+ rel_embeddings=None,
381
+ output_attentions=False,
382
+ ):
383
+ attention_output = self.attention(
384
+ hidden_states,
385
+ attention_mask,
386
+ output_attentions=output_attentions,
387
+ query_states=query_states,
388
+ relative_pos=relative_pos,
389
+ rel_embeddings=rel_embeddings,
390
+ )
391
+ if output_attentions:
392
+ attention_output, att_matrix = attention_output
393
+ intermediate_output = self.intermediate(attention_output)
394
+ layer_output = self.output(intermediate_output, attention_output)
395
+ if output_attentions:
396
+ return (layer_output, att_matrix)
397
+ else:
398
+ return layer_output
399
+
400
+
401
+ class DebertaEncoder(nn.Module):
402
+ """Modified BertEncoder with relative position bias support"""
403
+
404
+ def __init__(self, config):
405
+ super().__init__()
406
+ self.layer = nn.ModuleList([DebertaLayer(config) for _ in range(config.num_hidden_layers)])
407
+ self.relative_attention = getattr(config, "relative_attention", False)
408
+ if self.relative_attention:
409
+ self.max_relative_positions = getattr(config, "max_relative_positions", -1)
410
+ if self.max_relative_positions < 1:
411
+ self.max_relative_positions = config.max_position_embeddings
412
+ self.rel_embeddings = nn.Embedding(self.max_relative_positions * 2, config.hidden_size)
413
+ self.gradient_checkpointing = False
414
+
415
+ def get_rel_embedding(self):
416
+ rel_embeddings = self.rel_embeddings.weight if self.relative_attention else None
417
+ return rel_embeddings
418
+
419
+ def get_attention_mask(self, attention_mask):
420
+ if attention_mask.dim() <= 2:
421
+ extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
422
+ attention_mask = extended_attention_mask * extended_attention_mask.squeeze(-2).unsqueeze(-1)
423
+ elif attention_mask.dim() == 3:
424
+ attention_mask = attention_mask.unsqueeze(1)
425
+
426
+ return attention_mask
427
+
428
+ def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None):
429
+ if self.relative_attention and relative_pos is None:
430
+ q = query_states.size(-2) if query_states is not None else hidden_states.size(-2)
431
+ relative_pos = build_relative_position(q, hidden_states.size(-2), hidden_states.device)
432
+ return relative_pos
433
+
434
+ def forward(
435
+ self,
436
+ hidden_states,
437
+ attention_mask,
438
+ output_hidden_states=True,
439
+ output_attentions=False,
440
+ query_states=None,
441
+ relative_pos=None,
442
+ return_dict=True,
443
+ ):
444
+ attention_mask = self.get_attention_mask(attention_mask)
445
+ relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos)
446
+
447
+ all_hidden_states = () if output_hidden_states else None
448
+ all_attentions = () if output_attentions else None
449
+
450
+ if isinstance(hidden_states, Sequence):
451
+ next_kv = hidden_states[0]
452
+ else:
453
+ next_kv = hidden_states
454
+ rel_embeddings = self.get_rel_embedding()
455
+ for i, layer_module in enumerate(self.layer):
456
+ if output_hidden_states:
457
+ all_hidden_states = all_hidden_states + (hidden_states,)
458
+
459
+ if self.gradient_checkpointing and self.training:
460
+ hidden_states = self._gradient_checkpointing_func(
461
+ layer_module.__call__,
462
+ next_kv,
463
+ attention_mask,
464
+ query_states,
465
+ relative_pos,
466
+ rel_embeddings,
467
+ output_attentions,
468
+ )
469
+ else:
470
+ hidden_states = layer_module(
471
+ next_kv,
472
+ attention_mask,
473
+ query_states=query_states,
474
+ relative_pos=relative_pos,
475
+ rel_embeddings=rel_embeddings,
476
+ output_attentions=output_attentions,
477
+ )
478
+
479
+ if output_attentions:
480
+ hidden_states, att_m = hidden_states
481
+
482
+ if query_states is not None:
483
+ query_states = hidden_states
484
+ if isinstance(hidden_states, Sequence):
485
+ next_kv = hidden_states[i + 1] if i + 1 < len(self.layer) else None
486
+ else:
487
+ next_kv = hidden_states
488
+
489
+ if output_attentions:
490
+ all_attentions = all_attentions + (att_m,)
491
+
492
+ if output_hidden_states:
493
+ all_hidden_states = all_hidden_states + (hidden_states,)
494
+
495
+ if not return_dict:
496
+ return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
497
+ return BaseModelOutput(
498
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
499
+ )
500
+
501
+
502
+ def build_relative_position(query_size, key_size, device):
503
+ """
504
+ Build relative position according to the query and key
505
+
506
+ We assume the absolute position of query \\(P_q\\) is range from (0, query_size) and the absolute position of key
507
+ \\(P_k\\) is range from (0, key_size), The relative positions from query to key is \\(R_{q \\rightarrow k} = P_q -
508
+ P_k\\)
509
+
510
+ Args:
511
+ query_size (int): the length of query
512
+ key_size (int): the length of key
513
+
514
+ Return:
515
+ `torch.LongTensor`: A tensor with shape [1, query_size, key_size]
516
+
517
+ """
518
+
519
+ q_ids = torch.arange(query_size, dtype=torch.long, device=device)
520
+ k_ids = torch.arange(key_size, dtype=torch.long, device=device)
521
+ rel_pos_ids = q_ids[:, None] - k_ids.view(1, -1).repeat(query_size, 1)
522
+ rel_pos_ids = rel_pos_ids[:query_size, :]
523
+ rel_pos_ids = rel_pos_ids.unsqueeze(0)
524
+ return rel_pos_ids
525
+
526
+
527
+ @torch.jit.script
528
+ def c2p_dynamic_expand(c2p_pos, query_layer, relative_pos):
529
+ return c2p_pos.expand([query_layer.size(0), query_layer.size(1), query_layer.size(2), relative_pos.size(-1)])
530
+
531
+
532
+ @torch.jit.script
533
+ def p2c_dynamic_expand(c2p_pos, query_layer, key_layer):
534
+ return c2p_pos.expand([query_layer.size(0), query_layer.size(1), key_layer.size(-2), key_layer.size(-2)])
535
+
536
+
537
+ @torch.jit.script
538
+ def pos_dynamic_expand(pos_index, p2c_att, key_layer):
539
+ return pos_index.expand(p2c_att.size()[:2] + (pos_index.size(-2), key_layer.size(-2)))
540
+
541
+
542
+ class DisentangledSelfAttention(nn.Module):
543
+ """
544
+ Disentangled self-attention module
545
+
546
+ Parameters:
547
+ config (`str`):
548
+ A model config class instance with the configuration to build a new model. The schema is similar to
549
+ *BertConfig*, for more details, please refer [`DebertaConfig`]
550
+
551
+ """
552
+
553
+ def __init__(self, config):
554
+ super().__init__()
555
+ if config.hidden_size % config.num_attention_heads != 0:
556
+ raise ValueError(
557
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
558
+ f"heads ({config.num_attention_heads})"
559
+ )
560
+ self.num_attention_heads = config.num_attention_heads
561
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
562
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
563
+ self.in_proj = nn.Linear(config.hidden_size, self.all_head_size * 3, bias=False)
564
+ self.q_bias = nn.Parameter(torch.zeros((self.all_head_size), dtype=torch.float))
565
+ self.v_bias = nn.Parameter(torch.zeros((self.all_head_size), dtype=torch.float))
566
+ self.pos_att_type = config.pos_att_type if config.pos_att_type is not None else []
567
+
568
+ self.relative_attention = getattr(config, "relative_attention", False)
569
+ self.talking_head = getattr(config, "talking_head", False)
570
+
571
+ if self.talking_head:
572
+ self.head_logits_proj = nn.Linear(config.num_attention_heads, config.num_attention_heads, bias=False)
573
+ self.head_weights_proj = nn.Linear(config.num_attention_heads, config.num_attention_heads, bias=False)
574
+
575
+ if self.relative_attention:
576
+ self.max_relative_positions = getattr(config, "max_relative_positions", -1)
577
+ if self.max_relative_positions < 1:
578
+ self.max_relative_positions = config.max_position_embeddings
579
+ self.pos_dropout = StableDropout(config.hidden_dropout_prob)
580
+
581
+ if "c2p" in self.pos_att_type:
582
+ self.pos_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=False)
583
+ if "p2c" in self.pos_att_type:
584
+ self.pos_q_proj = nn.Linear(config.hidden_size, self.all_head_size)
585
+
586
+ self.dropout = StableDropout(config.attention_probs_dropout_prob)
587
+
588
+ def transpose_for_scores(self, x):
589
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, -1)
590
+ x = x.view(new_x_shape)
591
+ return x.permute(0, 2, 1, 3)
592
+
593
+ def forward(
594
+ self,
595
+ hidden_states,
596
+ attention_mask,
597
+ output_attentions=False,
598
+ query_states=None,
599
+ relative_pos=None,
600
+ rel_embeddings=None,
601
+ ):
602
+ """
603
+ Call the module
604
+
605
+ Args:
606
+ hidden_states (`torch.FloatTensor`):
607
+ Input states to the module usually the output from previous layer, it will be the Q,K and V in
608
+ *Attention(Q,K,V)*
609
+
610
+ attention_mask (`torch.BoolTensor`):
611
+ An attention mask matrix of shape [*B*, *N*, *N*] where *B* is the batch size, *N* is the maximum
612
+ sequence length in which element [i,j] = *1* means the *i* th token in the input can attend to the *j*
613
+ th token.
614
+
615
+ output_attentions (`bool`, optional):
616
+ Whether return the attention matrix.
617
+
618
+ query_states (`torch.FloatTensor`, optional):
619
+ The *Q* state in *Attention(Q,K,V)*.
620
+
621
+ relative_pos (`torch.LongTensor`):
622
+ The relative position encoding between the tokens in the sequence. It's of shape [*B*, *N*, *N*] with
623
+ values ranging in [*-max_relative_positions*, *max_relative_positions*].
624
+
625
+ rel_embeddings (`torch.FloatTensor`):
626
+ The embedding of relative distances. It's a tensor of shape [\\(2 \\times
627
+ \\text{max_relative_positions}\\), *hidden_size*].
628
+
629
+
630
+ """
631
+ if query_states is None:
632
+ qp = self.in_proj(hidden_states) # .split(self.all_head_size, dim=-1)
633
+ query_layer, key_layer, value_layer = self.transpose_for_scores(qp).chunk(3, dim=-1)
634
+ else:
635
+
636
+ def linear(w, b, x):
637
+ if b is not None:
638
+ return torch.matmul(x, w.t()) + b.t()
639
+ else:
640
+ return torch.matmul(x, w.t()) # + b.t()
641
+
642
+ ws = self.in_proj.weight.chunk(self.num_attention_heads * 3, dim=0)
643
+ qkvw = [torch.cat([ws[i * 3 + k] for i in range(self.num_attention_heads)], dim=0) for k in range(3)]
644
+ qkvb = [None] * 3
645
+
646
+ q = linear(qkvw[0], qkvb[0], query_states.to(dtype=qkvw[0].dtype))
647
+ k, v = [linear(qkvw[i], qkvb[i], hidden_states.to(dtype=qkvw[i].dtype)) for i in range(1, 3)]
648
+ query_layer, key_layer, value_layer = [self.transpose_for_scores(x) for x in [q, k, v]]
649
+
650
+ query_layer = query_layer + self.transpose_for_scores(self.q_bias[None, None, :])
651
+ value_layer = value_layer + self.transpose_for_scores(self.v_bias[None, None, :])
652
+
653
+ rel_att = None
654
+ # Take the dot product between "query" and "key" to get the raw attention scores.
655
+ scale_factor = 1 + len(self.pos_att_type)
656
+ scale = torch.sqrt(torch.tensor(query_layer.size(-1), dtype=torch.float) * scale_factor)
657
+ query_layer = query_layer / scale.to(dtype=query_layer.dtype)
658
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
659
+ if self.relative_attention:
660
+ rel_embeddings = self.pos_dropout(rel_embeddings)
661
+ rel_att = self.disentangled_att_bias(query_layer, key_layer, relative_pos, rel_embeddings, scale_factor)
662
+
663
+ if rel_att is not None:
664
+ attention_scores = attention_scores + rel_att
665
+
666
+ # bxhxlxd
667
+ if self.talking_head:
668
+ attention_scores = self.head_logits_proj(attention_scores.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
669
+
670
+ attention_probs = XSoftmax.apply(attention_scores, attention_mask, -1)
671
+ attention_probs = self.dropout(attention_probs)
672
+ if self.talking_head:
673
+ attention_probs = self.head_weights_proj(attention_probs.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
674
+
675
+ context_layer = torch.matmul(attention_probs, value_layer)
676
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
677
+ new_context_layer_shape = context_layer.size()[:-2] + (-1,)
678
+ context_layer = context_layer.view(new_context_layer_shape)
679
+ if output_attentions:
680
+ return (context_layer, attention_probs)
681
+ else:
682
+ return context_layer
683
+
684
+ def disentangled_att_bias(self, query_layer, key_layer, relative_pos, rel_embeddings, scale_factor):
685
+ if relative_pos is None:
686
+ q = query_layer.size(-2)
687
+ relative_pos = build_relative_position(q, key_layer.size(-2), query_layer.device)
688
+ if relative_pos.dim() == 2:
689
+ relative_pos = relative_pos.unsqueeze(0).unsqueeze(0)
690
+ elif relative_pos.dim() == 3:
691
+ relative_pos = relative_pos.unsqueeze(1)
692
+ # bxhxqxk
693
+ elif relative_pos.dim() != 4:
694
+ raise ValueError(f"Relative position ids must be of dim 2 or 3 or 4. {relative_pos.dim()}")
695
+
696
+ att_span = min(max(query_layer.size(-2), key_layer.size(-2)), self.max_relative_positions)
697
+ relative_pos = relative_pos.long().to(query_layer.device)
698
+ rel_embeddings = rel_embeddings[
699
+ self.max_relative_positions - att_span : self.max_relative_positions + att_span, :
700
+ ].unsqueeze(0)
701
+
702
+ score = 0
703
+
704
+ # content->position
705
+ if "c2p" in self.pos_att_type:
706
+ pos_key_layer = self.pos_proj(rel_embeddings)
707
+ pos_key_layer = self.transpose_for_scores(pos_key_layer)
708
+ c2p_att = torch.matmul(query_layer, pos_key_layer.transpose(-1, -2))
709
+ c2p_pos = torch.clamp(relative_pos + att_span, 0, att_span * 2 - 1)
710
+ c2p_att = torch.gather(c2p_att, dim=-1, index=c2p_dynamic_expand(c2p_pos, query_layer, relative_pos))
711
+ score += c2p_att
712
+
713
+ # position->content
714
+ if "p2c" in self.pos_att_type:
715
+ pos_query_layer = self.pos_q_proj(rel_embeddings)
716
+ pos_query_layer = self.transpose_for_scores(pos_query_layer)
717
+ pos_query_layer /= torch.sqrt(torch.tensor(pos_query_layer.size(-1), dtype=torch.float) * scale_factor)
718
+ if query_layer.size(-2) != key_layer.size(-2):
719
+ r_pos = build_relative_position(key_layer.size(-2), key_layer.size(-2), query_layer.device)
720
+ else:
721
+ r_pos = relative_pos
722
+ p2c_pos = torch.clamp(-r_pos + att_span, 0, att_span * 2 - 1)
723
+ p2c_att = torch.matmul(key_layer, pos_query_layer.transpose(-1, -2).to(dtype=key_layer.dtype))
724
+ p2c_att = torch.gather(
725
+ p2c_att, dim=-1, index=p2c_dynamic_expand(p2c_pos, query_layer, key_layer)
726
+ ).transpose(-1, -2)
727
+
728
+ if query_layer.size(-2) != key_layer.size(-2):
729
+ pos_index = relative_pos[:, :, :, 0].unsqueeze(-1)
730
+ p2c_att = torch.gather(p2c_att, dim=-2, index=pos_dynamic_expand(pos_index, p2c_att, key_layer))
731
+ score += p2c_att
732
+
733
+ return score
734
+
735
+
736
+ class DebertaEmbeddings(nn.Module):
737
+ """Construct the embeddings from word, position and token_type embeddings."""
738
+
739
+ def __init__(self, config):
740
+ super().__init__()
741
+ pad_token_id = getattr(config, "pad_token_id", 0)
742
+ self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
743
+ self.word_embeddings = nn.Embedding(config.vocab_size, self.embedding_size, padding_idx=pad_token_id)
744
+
745
+ self.position_biased_input = getattr(config, "position_biased_input", True)
746
+ if not self.position_biased_input:
747
+ self.position_embeddings = None
748
+ else:
749
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, self.embedding_size)
750
+
751
+ if config.type_vocab_size > 0:
752
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, self.embedding_size)
753
+
754
+ if self.embedding_size != config.hidden_size:
755
+ self.embed_proj = nn.Linear(self.embedding_size, config.hidden_size, bias=False)
756
+ self.LayerNorm = DebertaLayerNorm(config.hidden_size, config.layer_norm_eps)
757
+ self.dropout = StableDropout(config.hidden_dropout_prob)
758
+ self.config = config
759
+
760
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
761
+ self.register_buffer(
762
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
763
+ )
764
+
765
+ def forward(self, input_ids=None, token_type_ids=None, position_ids=None, mask=None, inputs_embeds=None):
766
+ if input_ids is not None:
767
+ input_shape = input_ids.size()
768
+ else:
769
+ input_shape = inputs_embeds.size()[:-1]
770
+
771
+ seq_length = input_shape[1]
772
+
773
+ if position_ids is None:
774
+ position_ids = self.position_ids[:, :seq_length]
775
+
776
+ if token_type_ids is None:
777
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
778
+
779
+ if inputs_embeds is None:
780
+ inputs_embeds = self.word_embeddings(input_ids)
781
+
782
+ if self.position_embeddings is not None:
783
+ position_embeddings = self.position_embeddings(position_ids.long())
784
+ else:
785
+ position_embeddings = torch.zeros_like(inputs_embeds)
786
+
787
+ embeddings = inputs_embeds
788
+ if self.position_biased_input:
789
+ embeddings += position_embeddings
790
+ if self.config.type_vocab_size > 0:
791
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
792
+ embeddings += token_type_embeddings
793
+
794
+ if self.embedding_size != self.config.hidden_size:
795
+ embeddings = self.embed_proj(embeddings)
796
+
797
+ embeddings = self.LayerNorm(embeddings)
798
+
799
+ if mask is not None:
800
+ if mask.dim() != embeddings.dim():
801
+ if mask.dim() == 4:
802
+ mask = mask.squeeze(1).squeeze(1)
803
+ mask = mask.unsqueeze(2)
804
+ mask = mask.to(embeddings.dtype)
805
+
806
+ embeddings = embeddings * mask
807
+
808
+ embeddings = self.dropout(embeddings)
809
+ return embeddings
810
+
811
+
812
+ class DebertaPreTrainedModel(PreTrainedModel):
813
+ """
814
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
815
+ models.
816
+ """
817
+
818
+ config_class = DebertaConfig
819
+ base_model_prefix = "deberta"
820
+ _keys_to_ignore_on_load_unexpected = ["position_embeddings"]
821
+ supports_gradient_checkpointing = True
822
+
823
+ def _init_weights(self, module):
824
+ """Initialize the weights."""
825
+ if isinstance(module, nn.Linear):
826
+ # Slightly different from the TF version which uses truncated_normal for initialization
827
+ # cf https://github.com/pytorch/pytorch/pull/5617
828
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
829
+ if module.bias is not None:
830
+ module.bias.data.zero_()
831
+ elif isinstance(module, nn.Embedding):
832
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
833
+ if module.padding_idx is not None:
834
+ module.weight.data[module.padding_idx].zero_()
835
+
836
+
837
+ DEBERTA_START_DOCSTRING = r"""
838
+ The DeBERTa model was proposed in [DeBERTa: Decoding-enhanced BERT with Disentangled
839
+ Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It's build
840
+ on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two
841
+ improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.
842
+
843
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
844
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
845
+ and behavior.
846
+
847
+
848
+ Parameters:
849
+ config ([`DebertaConfig`]): Model configuration class with all the parameters of the model.
850
+ Initializing with a config file does not load the weights associated with the model, only the
851
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
852
+ """
853
+
854
+ DEBERTA_INPUTS_DOCSTRING = r"""
855
+ Args:
856
+ input_ids (`torch.LongTensor` of shape `({0})`):
857
+ Indices of input sequence tokens in the vocabulary.
858
+
859
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
860
+ [`PreTrainedTokenizer.__call__`] for details.
861
+
862
+ [What are input IDs?](../glossary#input-ids)
863
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
864
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
865
+
866
+ - 1 for tokens that are **not masked**,
867
+ - 0 for tokens that are **masked**.
868
+
869
+ [What are attention masks?](../glossary#attention-mask)
870
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
871
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
872
+ 1]`:
873
+
874
+ - 0 corresponds to a *sentence A* token,
875
+ - 1 corresponds to a *sentence B* token.
876
+
877
+ [What are token type IDs?](../glossary#token-type-ids)
878
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
879
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
880
+ config.max_position_embeddings - 1]`.
881
+
882
+ [What are position IDs?](../glossary#position-ids)
883
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
884
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
885
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
886
+ model's internal embedding lookup matrix.
887
+ output_attentions (`bool`, *optional*):
888
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
889
+ tensors for more detail.
890
+ output_hidden_states (`bool`, *optional*):
891
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
892
+ more detail.
893
+ return_dict (`bool`, *optional*):
894
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
895
+ """
896
+
897
+
898
+ @add_start_docstrings(
899
+ "The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top.",
900
+ DEBERTA_START_DOCSTRING,
901
+ )
902
+ class DebertaModel(DebertaPreTrainedModel):
903
+ def __init__(self, config):
904
+ super().__init__(config)
905
+
906
+ self.embeddings = DebertaEmbeddings(config)
907
+ self.encoder = DebertaEncoder(config)
908
+ self.z_steps = 0
909
+ self.config = config
910
+ # Initialize weights and apply final processing
911
+ self.post_init()
912
+
913
+ def get_input_embeddings(self):
914
+ return self.embeddings.word_embeddings
915
+
916
+ def set_input_embeddings(self, new_embeddings):
917
+ self.embeddings.word_embeddings = new_embeddings
918
+
919
+ def _prune_heads(self, heads_to_prune):
920
+ """
921
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
922
+ class PreTrainedModel
923
+ """
924
+ raise NotImplementedError("The prune function is not implemented in DeBERTa model.")
925
+
926
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
927
+ @add_code_sample_docstrings(
928
+ checkpoint=_CHECKPOINT_FOR_DOC,
929
+ output_type=BaseModelOutput,
930
+ config_class=_CONFIG_FOR_DOC,
931
+ )
932
+ def forward(
933
+ self,
934
+ input_ids: Optional[torch.Tensor] = None,
935
+ attention_mask: Optional[torch.Tensor] = None,
936
+ token_type_ids: Optional[torch.Tensor] = None,
937
+ position_ids: Optional[torch.Tensor] = None,
938
+ inputs_embeds: Optional[torch.Tensor] = None,
939
+ output_attentions: Optional[bool] = None,
940
+ output_hidden_states: Optional[bool] = None,
941
+ return_dict: Optional[bool] = None,
942
+ ) -> Union[Tuple, BaseModelOutput]:
943
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
944
+ output_hidden_states = (
945
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
946
+ )
947
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
948
+
949
+ if input_ids is not None and inputs_embeds is not None:
950
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
951
+ elif input_ids is not None:
952
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
953
+ input_shape = input_ids.size()
954
+ elif inputs_embeds is not None:
955
+ input_shape = inputs_embeds.size()[:-1]
956
+ else:
957
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
958
+
959
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
960
+
961
+ if attention_mask is None:
962
+ attention_mask = torch.ones(input_shape, device=device)
963
+ if token_type_ids is None:
964
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
965
+
966
+ embedding_output = self.embeddings(
967
+ input_ids=input_ids,
968
+ token_type_ids=token_type_ids,
969
+ position_ids=position_ids,
970
+ mask=attention_mask,
971
+ inputs_embeds=inputs_embeds,
972
+ )
973
+
974
+ encoder_outputs = self.encoder(
975
+ embedding_output,
976
+ attention_mask,
977
+ output_hidden_states=True,
978
+ output_attentions=output_attentions,
979
+ return_dict=return_dict,
980
+ )
981
+ encoded_layers = encoder_outputs[1]
982
+
983
+ if self.z_steps > 1:
984
+ hidden_states = encoded_layers[-2]
985
+ layers = [self.encoder.layer[-1] for _ in range(self.z_steps)]
986
+ query_states = encoded_layers[-1]
987
+ rel_embeddings = self.encoder.get_rel_embedding()
988
+ attention_mask = self.encoder.get_attention_mask(attention_mask)
989
+ rel_pos = self.encoder.get_rel_pos(embedding_output)
990
+ for layer in layers[1:]:
991
+ query_states = layer(
992
+ hidden_states,
993
+ attention_mask,
994
+ output_attentions=False,
995
+ query_states=query_states,
996
+ relative_pos=rel_pos,
997
+ rel_embeddings=rel_embeddings,
998
+ )
999
+ encoded_layers.append(query_states)
1000
+
1001
+ sequence_output = encoded_layers[-1]
1002
+
1003
+ if not return_dict:
1004
+ return (sequence_output,) + encoder_outputs[(1 if output_hidden_states else 2) :]
1005
+
1006
+ return BaseModelOutput(
1007
+ last_hidden_state=sequence_output,
1008
+ hidden_states=encoder_outputs.hidden_states if output_hidden_states else None,
1009
+ attentions=encoder_outputs.attentions,
1010
+ )
1011
+
1012
+
1013
+ @add_start_docstrings("""DeBERTa Model with a `language modeling` head on top.""", DEBERTA_START_DOCSTRING)
1014
+ class DebertaForMaskedLM(DebertaPreTrainedModel):
1015
+ _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"]
1016
+
1017
+ def __init__(self, config):
1018
+ super().__init__(config)
1019
+
1020
+ self.deberta = DebertaModel(config)
1021
+ self.cls = DebertaOnlyMLMHead(config)
1022
+
1023
+ # Initialize weights and apply final processing
1024
+ self.post_init()
1025
+
1026
+ def get_output_embeddings(self):
1027
+ return self.cls.predictions.decoder
1028
+
1029
+ def set_output_embeddings(self, new_embeddings):
1030
+ self.cls.predictions.decoder = new_embeddings
1031
+
1032
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1033
+ @add_code_sample_docstrings(
1034
+ checkpoint=_CHECKPOINT_FOR_MASKED_LM,
1035
+ output_type=MaskedLMOutput,
1036
+ config_class=_CONFIG_FOR_DOC,
1037
+ mask="[MASK]",
1038
+ expected_output=_MASKED_LM_EXPECTED_OUTPUT,
1039
+ expected_loss=_MASKED_LM_EXPECTED_LOSS,
1040
+ )
1041
+ def forward(
1042
+ self,
1043
+ input_ids: Optional[torch.Tensor] = None,
1044
+ attention_mask: Optional[torch.Tensor] = None,
1045
+ token_type_ids: Optional[torch.Tensor] = None,
1046
+ position_ids: Optional[torch.Tensor] = None,
1047
+ inputs_embeds: Optional[torch.Tensor] = None,
1048
+ labels: Optional[torch.Tensor] = None,
1049
+ output_attentions: Optional[bool] = None,
1050
+ output_hidden_states: Optional[bool] = None,
1051
+ return_dict: Optional[bool] = None,
1052
+ ) -> Union[Tuple, MaskedLMOutput]:
1053
+ r"""
1054
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1055
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1056
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1057
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1058
+ """
1059
+
1060
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1061
+
1062
+ outputs = self.deberta(
1063
+ input_ids,
1064
+ attention_mask=attention_mask,
1065
+ token_type_ids=token_type_ids,
1066
+ position_ids=position_ids,
1067
+ inputs_embeds=inputs_embeds,
1068
+ output_attentions=output_attentions,
1069
+ output_hidden_states=output_hidden_states,
1070
+ return_dict=return_dict,
1071
+ )
1072
+
1073
+ sequence_output = outputs[0]
1074
+ prediction_scores = self.cls(sequence_output)
1075
+
1076
+ masked_lm_loss = None
1077
+ if labels is not None:
1078
+ loss_fct = CrossEntropyLoss() # -100 index = padding token
1079
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1080
+
1081
+ if not return_dict:
1082
+ output = (prediction_scores,) + outputs[1:]
1083
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1084
+
1085
+ return MaskedLMOutput(
1086
+ loss=masked_lm_loss,
1087
+ logits=prediction_scores,
1088
+ hidden_states=outputs.hidden_states,
1089
+ attentions=outputs.attentions,
1090
+ )
1091
+
1092
+
1093
+ class DebertaPredictionHeadTransform(nn.Module):
1094
+ def __init__(self, config):
1095
+ super().__init__()
1096
+ self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
1097
+
1098
+ self.dense = nn.Linear(config.hidden_size, self.embedding_size)
1099
+ if isinstance(config.hidden_act, str):
1100
+ self.transform_act_fn = ACT2FN[config.hidden_act]
1101
+ else:
1102
+ self.transform_act_fn = config.hidden_act
1103
+ self.LayerNorm = nn.LayerNorm(self.embedding_size, eps=config.layer_norm_eps)
1104
+
1105
+ def forward(self, hidden_states):
1106
+ hidden_states = self.dense(hidden_states)
1107
+ hidden_states = self.transform_act_fn(hidden_states)
1108
+ hidden_states = self.LayerNorm(hidden_states)
1109
+ return hidden_states
1110
+
1111
+
1112
+ class DebertaLMPredictionHead(nn.Module):
1113
+ def __init__(self, config):
1114
+ super().__init__()
1115
+ self.transform = DebertaPredictionHeadTransform(config)
1116
+
1117
+ self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
1118
+ # The output weights are the same as the input embeddings, but there is
1119
+ # an output-only bias for each token.
1120
+ self.decoder = nn.Linear(self.embedding_size, config.vocab_size, bias=False)
1121
+
1122
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
1123
+
1124
+ # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
1125
+ self.decoder.bias = self.bias
1126
+
1127
+ def forward(self, hidden_states):
1128
+ hidden_states = self.transform(hidden_states)
1129
+ hidden_states = self.decoder(hidden_states)
1130
+ return hidden_states
1131
+
1132
+
1133
+ # copied from transformers.models.bert.BertOnlyMLMHead with bert -> deberta
1134
+ class DebertaOnlyMLMHead(nn.Module):
1135
+ def __init__(self, config):
1136
+ super().__init__()
1137
+ self.predictions = DebertaLMPredictionHead(config)
1138
+
1139
+ def forward(self, sequence_output):
1140
+ prediction_scores = self.predictions(sequence_output)
1141
+ return prediction_scores
1142
+
1143
+
1144
+ @add_start_docstrings(
1145
+ """
1146
+ DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the
1147
+ pooled output) e.g. for GLUE tasks.
1148
+ """,
1149
+ DEBERTA_START_DOCSTRING,
1150
+ )
1151
+ class DebertaForSequenceClassification(DebertaPreTrainedModel):
1152
+ def __init__(self, config):
1153
+ super().__init__(config)
1154
+
1155
+ num_labels = getattr(config, "num_labels", 2)
1156
+ self.num_labels = num_labels
1157
+
1158
+ self.deberta = DebertaModel(config)
1159
+ self.pooler = ContextPooler(config)
1160
+ output_dim = self.pooler.output_dim
1161
+
1162
+ self.classifier = nn.Linear(output_dim, num_labels)
1163
+ drop_out = getattr(config, "cls_dropout", None)
1164
+ drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out
1165
+ self.dropout = StableDropout(drop_out)
1166
+
1167
+ # Initialize weights and apply final processing
1168
+ self.post_init()
1169
+
1170
+ def get_input_embeddings(self):
1171
+ return self.deberta.get_input_embeddings()
1172
+
1173
+ def set_input_embeddings(self, new_embeddings):
1174
+ self.deberta.set_input_embeddings(new_embeddings)
1175
+
1176
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1177
+ @add_code_sample_docstrings(
1178
+ checkpoint=_CHECKPOINT_FOR_DOC,
1179
+ output_type=SequenceClassifierOutput,
1180
+ config_class=_CONFIG_FOR_DOC,
1181
+ )
1182
+ def forward(
1183
+ self,
1184
+ input_ids: Optional[torch.Tensor] = None,
1185
+ attention_mask: Optional[torch.Tensor] = None,
1186
+ token_type_ids: Optional[torch.Tensor] = None,
1187
+ position_ids: Optional[torch.Tensor] = None,
1188
+ inputs_embeds: Optional[torch.Tensor] = None,
1189
+ labels: Optional[torch.Tensor] = None,
1190
+ output_attentions: Optional[bool] = None,
1191
+ output_hidden_states: Optional[bool] = None,
1192
+ return_dict: Optional[bool] = None,
1193
+ ) -> Union[Tuple, SequenceClassifierOutput]:
1194
+ r"""
1195
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1196
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1197
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1198
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1199
+ """
1200
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1201
+
1202
+ outputs = self.deberta(
1203
+ input_ids,
1204
+ token_type_ids=token_type_ids,
1205
+ attention_mask=attention_mask,
1206
+ position_ids=position_ids,
1207
+ inputs_embeds=inputs_embeds,
1208
+ output_attentions=output_attentions,
1209
+ output_hidden_states=output_hidden_states,
1210
+ return_dict=return_dict,
1211
+ )
1212
+
1213
+ encoder_layer = outputs[0]
1214
+ pooled_output = self.pooler(encoder_layer)
1215
+ pooled_output = self.dropout(pooled_output)
1216
+ logits = self.classifier(pooled_output)
1217
+
1218
+ loss = None
1219
+ if labels is not None:
1220
+ if self.config.problem_type is None:
1221
+ if self.num_labels == 1:
1222
+ # regression task
1223
+ loss_fn = nn.MSELoss()
1224
+ logits = logits.view(-1).to(labels.dtype)
1225
+ loss = loss_fn(logits, labels.view(-1))
1226
+ elif labels.dim() == 1 or labels.size(-1) == 1:
1227
+ label_index = (labels >= 0).nonzero()
1228
+ labels = labels.long()
1229
+ if label_index.size(0) > 0:
1230
+ labeled_logits = torch.gather(
1231
+ logits, 0, label_index.expand(label_index.size(0), logits.size(1))
1232
+ )
1233
+ labels = torch.gather(labels, 0, label_index.view(-1))
1234
+ loss_fct = CrossEntropyLoss()
1235
+ loss = loss_fct(labeled_logits.view(-1, self.num_labels).float(), labels.view(-1))
1236
+ else:
1237
+ loss = torch.tensor(0).to(logits)
1238
+ else:
1239
+ log_softmax = nn.LogSoftmax(-1)
1240
+ loss = -((log_softmax(logits) * labels).sum(-1)).mean()
1241
+ elif self.config.problem_type == "regression":
1242
+ loss_fct = MSELoss()
1243
+ if self.num_labels == 1:
1244
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1245
+ else:
1246
+ loss = loss_fct(logits, labels)
1247
+ elif self.config.problem_type == "single_label_classification":
1248
+ loss_fct = CrossEntropyLoss()
1249
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1250
+ elif self.config.problem_type == "multi_label_classification":
1251
+ loss_fct = BCEWithLogitsLoss()
1252
+ loss = loss_fct(logits, labels)
1253
+ if not return_dict:
1254
+ output = (logits,) + outputs[1:]
1255
+ return ((loss,) + output) if loss is not None else output
1256
+
1257
+ return SequenceClassifierOutput(
1258
+ loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions
1259
+ )
1260
+
1261
+
1262
+ @add_start_docstrings(
1263
+ """
1264
+ DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1265
+ Named-Entity-Recognition (NER) tasks.
1266
+ """,
1267
+ DEBERTA_START_DOCSTRING,
1268
+ )
1269
+ class DebertaForTokenClassification(DebertaPreTrainedModel):
1270
+ def __init__(self, config):
1271
+ super().__init__(config)
1272
+ self.num_labels = config.num_labels
1273
+
1274
+ self.deberta = DebertaModel(config)
1275
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1276
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1277
+
1278
+ # Initialize weights and apply final processing
1279
+ self.post_init()
1280
+
1281
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1282
+ @add_code_sample_docstrings(
1283
+ checkpoint=_CHECKPOINT_FOR_DOC,
1284
+ output_type=TokenClassifierOutput,
1285
+ config_class=_CONFIG_FOR_DOC,
1286
+ )
1287
+ def forward(
1288
+ self,
1289
+ input_ids: Optional[torch.Tensor] = None,
1290
+ attention_mask: Optional[torch.Tensor] = None,
1291
+ token_type_ids: Optional[torch.Tensor] = None,
1292
+ position_ids: Optional[torch.Tensor] = None,
1293
+ inputs_embeds: Optional[torch.Tensor] = None,
1294
+ labels: Optional[torch.Tensor] = None,
1295
+ output_attentions: Optional[bool] = None,
1296
+ output_hidden_states: Optional[bool] = None,
1297
+ return_dict: Optional[bool] = None,
1298
+ ) -> Union[Tuple, TokenClassifierOutput]:
1299
+ r"""
1300
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1301
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1302
+ """
1303
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1304
+
1305
+ outputs = self.deberta(
1306
+ input_ids,
1307
+ attention_mask=attention_mask,
1308
+ token_type_ids=token_type_ids,
1309
+ position_ids=position_ids,
1310
+ inputs_embeds=inputs_embeds,
1311
+ output_attentions=output_attentions,
1312
+ output_hidden_states=output_hidden_states,
1313
+ return_dict=return_dict,
1314
+ )
1315
+
1316
+ sequence_output = outputs[0]
1317
+
1318
+ sequence_output = self.dropout(sequence_output)
1319
+ logits = self.classifier(sequence_output)
1320
+
1321
+ loss = None
1322
+ if labels is not None:
1323
+ loss_fct = CrossEntropyLoss()
1324
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1325
+
1326
+ if not return_dict:
1327
+ output = (logits,) + outputs[1:]
1328
+ return ((loss,) + output) if loss is not None else output
1329
+
1330
+ return TokenClassifierOutput(
1331
+ loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions
1332
+ )
1333
+
1334
+
1335
+ @add_start_docstrings(
1336
+ """
1337
+ DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1338
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1339
+ """,
1340
+ DEBERTA_START_DOCSTRING,
1341
+ )
1342
+ class DebertaForQuestionAnswering(DebertaPreTrainedModel):
1343
+ def __init__(self, config):
1344
+ super().__init__(config)
1345
+ self.num_labels = config.num_labels
1346
+
1347
+ self.deberta = DebertaModel(config)
1348
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1349
+
1350
+ # Initialize weights and apply final processing
1351
+ self.post_init()
1352
+
1353
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1354
+ @add_code_sample_docstrings(
1355
+ checkpoint=_CHECKPOINT_FOR_QA,
1356
+ output_type=QuestionAnsweringModelOutput,
1357
+ config_class=_CONFIG_FOR_DOC,
1358
+ expected_output=_QA_EXPECTED_OUTPUT,
1359
+ expected_loss=_QA_EXPECTED_LOSS,
1360
+ qa_target_start_index=_QA_TARGET_START_INDEX,
1361
+ qa_target_end_index=_QA_TARGET_END_INDEX,
1362
+ )
1363
+ def forward(
1364
+ self,
1365
+ input_ids: Optional[torch.Tensor] = None,
1366
+ attention_mask: Optional[torch.Tensor] = None,
1367
+ token_type_ids: Optional[torch.Tensor] = None,
1368
+ position_ids: Optional[torch.Tensor] = None,
1369
+ inputs_embeds: Optional[torch.Tensor] = None,
1370
+ start_positions: Optional[torch.Tensor] = None,
1371
+ end_positions: Optional[torch.Tensor] = None,
1372
+ output_attentions: Optional[bool] = None,
1373
+ output_hidden_states: Optional[bool] = None,
1374
+ return_dict: Optional[bool] = None,
1375
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
1376
+ r"""
1377
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1378
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1379
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1380
+ are not taken into account for computing the loss.
1381
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1382
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1383
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1384
+ are not taken into account for computing the loss.
1385
+ """
1386
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1387
+
1388
+ outputs = self.deberta(
1389
+ input_ids,
1390
+ attention_mask=attention_mask,
1391
+ token_type_ids=token_type_ids,
1392
+ position_ids=position_ids,
1393
+ inputs_embeds=inputs_embeds,
1394
+ output_attentions=output_attentions,
1395
+ output_hidden_states=output_hidden_states,
1396
+ return_dict=return_dict,
1397
+ )
1398
+
1399
+ sequence_output = outputs[0]
1400
+
1401
+ logits = self.qa_outputs(sequence_output)
1402
+ start_logits, end_logits = logits.split(1, dim=-1)
1403
+ start_logits = start_logits.squeeze(-1).contiguous()
1404
+ end_logits = end_logits.squeeze(-1).contiguous()
1405
+
1406
+ total_loss = None
1407
+ if start_positions is not None and end_positions is not None:
1408
+ # If we are on multi-GPU, split add a dimension
1409
+ if len(start_positions.size()) > 1:
1410
+ start_positions = start_positions.squeeze(-1)
1411
+ if len(end_positions.size()) > 1:
1412
+ end_positions = end_positions.squeeze(-1)
1413
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1414
+ ignored_index = start_logits.size(1)
1415
+ start_positions = start_positions.clamp(0, ignored_index)
1416
+ end_positions = end_positions.clamp(0, ignored_index)
1417
+
1418
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1419
+ start_loss = loss_fct(start_logits, start_positions)
1420
+ end_loss = loss_fct(end_logits, end_positions)
1421
+ total_loss = (start_loss + end_loss) / 2
1422
+
1423
+ if not return_dict:
1424
+ output = (start_logits, end_logits) + outputs[1:]
1425
+ return ((total_loss,) + output) if total_loss is not None else output
1426
+
1427
+ return QuestionAnsweringModelOutput(
1428
+ loss=total_loss,
1429
+ start_logits=start_logits,
1430
+ end_logits=end_logits,
1431
+ hidden_states=outputs.hidden_states,
1432
+ attentions=outputs.attentions,
1433
+ )
mgm/lib/python3.10/site-packages/transformers/models/deberta/modeling_tf_deberta.py ADDED
@@ -0,0 +1,1432 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 Microsoft and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ TF 2.0 DeBERTa model."""
16
+
17
+
18
+ from __future__ import annotations
19
+
20
+ import math
21
+ from typing import Dict, Optional, Sequence, Tuple, Union
22
+
23
+ import numpy as np
24
+ import tensorflow as tf
25
+
26
+ from ...activations_tf import get_tf_activation
27
+ from ...modeling_tf_outputs import (
28
+ TFBaseModelOutput,
29
+ TFMaskedLMOutput,
30
+ TFQuestionAnsweringModelOutput,
31
+ TFSequenceClassifierOutput,
32
+ TFTokenClassifierOutput,
33
+ )
34
+ from ...modeling_tf_utils import (
35
+ TFMaskedLanguageModelingLoss,
36
+ TFModelInputType,
37
+ TFPreTrainedModel,
38
+ TFQuestionAnsweringLoss,
39
+ TFSequenceClassificationLoss,
40
+ TFTokenClassificationLoss,
41
+ get_initializer,
42
+ unpack_inputs,
43
+ )
44
+ from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
45
+ from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
46
+ from .configuration_deberta import DebertaConfig
47
+
48
+
49
+ logger = logging.get_logger(__name__)
50
+
51
+
52
+ _CONFIG_FOR_DOC = "DebertaConfig"
53
+ _CHECKPOINT_FOR_DOC = "kamalkraj/deberta-base"
54
+
55
+ TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = [
56
+ "kamalkraj/deberta-base",
57
+ # See all DeBERTa models at https://huggingface.co/models?filter=DeBERTa
58
+ ]
59
+
60
+
61
+ class TFDebertaContextPooler(tf.keras.layers.Layer):
62
+ def __init__(self, config: DebertaConfig, **kwargs):
63
+ super().__init__(**kwargs)
64
+ self.dense = tf.keras.layers.Dense(config.pooler_hidden_size, name="dense")
65
+ self.dropout = TFDebertaStableDropout(config.pooler_dropout, name="dropout")
66
+ self.config = config
67
+
68
+ def call(self, hidden_states, training: bool = False):
69
+ # We "pool" the model by simply taking the hidden state corresponding
70
+ # to the first token.
71
+ context_token = hidden_states[:, 0]
72
+ context_token = self.dropout(context_token, training=training)
73
+ pooled_output = self.dense(context_token)
74
+ pooled_output = get_tf_activation(self.config.pooler_hidden_act)(pooled_output)
75
+ return pooled_output
76
+
77
+ @property
78
+ def output_dim(self) -> int:
79
+ return self.config.hidden_size
80
+
81
+
82
+ class TFDebertaXSoftmax(tf.keras.layers.Layer):
83
+ """
84
+ Masked Softmax which is optimized for saving memory
85
+
86
+ Args:
87
+ input (`tf.Tensor`): The input tensor that will apply softmax.
88
+ mask (`tf.Tensor`): The mask matrix where 0 indicate that element will be ignored in the softmax calculation.
89
+ dim (int): The dimension that will apply softmax
90
+ """
91
+
92
+ def __init__(self, axis=-1, **kwargs):
93
+ super().__init__(**kwargs)
94
+ self.axis = axis
95
+
96
+ def call(self, inputs: tf.Tensor, mask: tf.Tensor):
97
+ rmask = tf.logical_not(tf.cast(mask, tf.bool))
98
+ output = tf.where(rmask, float("-inf"), inputs)
99
+ output = stable_softmax(output, self.axis)
100
+ output = tf.where(rmask, 0.0, output)
101
+ return output
102
+
103
+
104
+ class TFDebertaStableDropout(tf.keras.layers.Layer):
105
+ """
106
+ Optimized dropout module for stabilizing the training
107
+
108
+ Args:
109
+ drop_prob (float): the dropout probabilities
110
+ """
111
+
112
+ def __init__(self, drop_prob, **kwargs):
113
+ super().__init__(**kwargs)
114
+ self.drop_prob = drop_prob
115
+
116
+ @tf.custom_gradient
117
+ def xdropout(self, inputs):
118
+ """
119
+ Applies dropout to the inputs, as vanilla dropout, but also scales the remaining elements up by 1/drop_prob.
120
+ """
121
+ mask = tf.cast(
122
+ 1
123
+ - tf.compat.v1.distributions.Bernoulli(probs=1.0 - self.drop_prob).sample(sample_shape=shape_list(inputs)),
124
+ tf.bool,
125
+ )
126
+ scale = tf.convert_to_tensor(1.0 / (1 - self.drop_prob), dtype=tf.float32)
127
+ if self.drop_prob > 0:
128
+ inputs = tf.where(mask, 0.0, inputs) * scale
129
+
130
+ def grad(upstream):
131
+ if self.drop_prob > 0:
132
+ return tf.where(mask, 0.0, upstream) * scale
133
+ else:
134
+ return upstream
135
+
136
+ return inputs, grad
137
+
138
+ def call(self, inputs: tf.Tensor, training: tf.Tensor = False):
139
+ if training:
140
+ return self.xdropout(inputs)
141
+ return inputs
142
+
143
+
144
+ class TFDebertaLayerNorm(tf.keras.layers.Layer):
145
+ """LayerNorm module in the TF style (epsilon inside the square root)."""
146
+
147
+ def __init__(self, size, eps=1e-12, **kwargs):
148
+ super().__init__(**kwargs)
149
+ self.size = size
150
+ self.eps = eps
151
+
152
+ def build(self, input_shape):
153
+ self.gamma = self.add_weight(shape=[self.size], initializer=tf.ones_initializer(), name="weight")
154
+ self.beta = self.add_weight(shape=[self.size], initializer=tf.zeros_initializer(), name="bias")
155
+ return super().build(input_shape)
156
+
157
+ def call(self, x: tf.Tensor) -> tf.Tensor:
158
+ mean = tf.reduce_mean(x, axis=[-1], keepdims=True)
159
+ variance = tf.reduce_mean(tf.square(x - mean), axis=[-1], keepdims=True)
160
+ std = tf.math.sqrt(variance + self.eps)
161
+ return self.gamma * (x - mean) / std + self.beta
162
+
163
+
164
+ class TFDebertaSelfOutput(tf.keras.layers.Layer):
165
+ def __init__(self, config: DebertaConfig, **kwargs):
166
+ super().__init__(**kwargs)
167
+ self.dense = tf.keras.layers.Dense(config.hidden_size, name="dense")
168
+ self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
169
+ self.dropout = TFDebertaStableDropout(config.hidden_dropout_prob, name="dropout")
170
+
171
+ def call(self, hidden_states, input_tensor, training: bool = False):
172
+ hidden_states = self.dense(hidden_states)
173
+ hidden_states = self.dropout(hidden_states, training=training)
174
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
175
+ return hidden_states
176
+
177
+
178
+ class TFDebertaAttention(tf.keras.layers.Layer):
179
+ def __init__(self, config: DebertaConfig, **kwargs):
180
+ super().__init__(**kwargs)
181
+ self.self = TFDebertaDisentangledSelfAttention(config, name="self")
182
+ self.dense_output = TFDebertaSelfOutput(config, name="output")
183
+ self.config = config
184
+
185
+ def call(
186
+ self,
187
+ input_tensor: tf.Tensor,
188
+ attention_mask: tf.Tensor,
189
+ query_states: tf.Tensor = None,
190
+ relative_pos: tf.Tensor = None,
191
+ rel_embeddings: tf.Tensor = None,
192
+ output_attentions: bool = False,
193
+ training: bool = False,
194
+ ) -> Tuple[tf.Tensor]:
195
+ self_outputs = self.self(
196
+ hidden_states=input_tensor,
197
+ attention_mask=attention_mask,
198
+ query_states=query_states,
199
+ relative_pos=relative_pos,
200
+ rel_embeddings=rel_embeddings,
201
+ output_attentions=output_attentions,
202
+ training=training,
203
+ )
204
+ if query_states is None:
205
+ query_states = input_tensor
206
+ attention_output = self.dense_output(
207
+ hidden_states=self_outputs[0], input_tensor=query_states, training=training
208
+ )
209
+
210
+ output = (attention_output,) + self_outputs[1:]
211
+
212
+ return output
213
+
214
+
215
+ class TFDebertaIntermediate(tf.keras.layers.Layer):
216
+ def __init__(self, config: DebertaConfig, **kwargs):
217
+ super().__init__(**kwargs)
218
+
219
+ self.dense = tf.keras.layers.Dense(
220
+ units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
221
+ )
222
+
223
+ if isinstance(config.hidden_act, str):
224
+ self.intermediate_act_fn = get_tf_activation(config.hidden_act)
225
+ else:
226
+ self.intermediate_act_fn = config.hidden_act
227
+
228
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
229
+ hidden_states = self.dense(inputs=hidden_states)
230
+ hidden_states = self.intermediate_act_fn(hidden_states)
231
+
232
+ return hidden_states
233
+
234
+
235
+ class TFDebertaOutput(tf.keras.layers.Layer):
236
+ def __init__(self, config: DebertaConfig, **kwargs):
237
+ super().__init__(**kwargs)
238
+
239
+ self.dense = tf.keras.layers.Dense(
240
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
241
+ )
242
+ self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
243
+ self.dropout = TFDebertaStableDropout(config.hidden_dropout_prob, name="dropout")
244
+
245
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
246
+ hidden_states = self.dense(inputs=hidden_states)
247
+ hidden_states = self.dropout(hidden_states, training=training)
248
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
249
+
250
+ return hidden_states
251
+
252
+
253
+ class TFDebertaLayer(tf.keras.layers.Layer):
254
+ def __init__(self, config: DebertaConfig, **kwargs):
255
+ super().__init__(**kwargs)
256
+
257
+ self.attention = TFDebertaAttention(config, name="attention")
258
+ self.intermediate = TFDebertaIntermediate(config, name="intermediate")
259
+ self.bert_output = TFDebertaOutput(config, name="output")
260
+
261
+ def call(
262
+ self,
263
+ hidden_states: tf.Tensor,
264
+ attention_mask: tf.Tensor,
265
+ query_states: tf.Tensor = None,
266
+ relative_pos: tf.Tensor = None,
267
+ rel_embeddings: tf.Tensor = None,
268
+ output_attentions: bool = False,
269
+ training: bool = False,
270
+ ) -> Tuple[tf.Tensor]:
271
+ attention_outputs = self.attention(
272
+ input_tensor=hidden_states,
273
+ attention_mask=attention_mask,
274
+ query_states=query_states,
275
+ relative_pos=relative_pos,
276
+ rel_embeddings=rel_embeddings,
277
+ output_attentions=output_attentions,
278
+ training=training,
279
+ )
280
+ attention_output = attention_outputs[0]
281
+ intermediate_output = self.intermediate(hidden_states=attention_output)
282
+ layer_output = self.bert_output(
283
+ hidden_states=intermediate_output, input_tensor=attention_output, training=training
284
+ )
285
+ outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
286
+
287
+ return outputs
288
+
289
+
290
+ class TFDebertaEncoder(tf.keras.layers.Layer):
291
+ def __init__(self, config: DebertaConfig, **kwargs):
292
+ super().__init__(**kwargs)
293
+
294
+ self.layer = [TFDebertaLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
295
+ self.relative_attention = getattr(config, "relative_attention", False)
296
+ self.config = config
297
+ if self.relative_attention:
298
+ self.max_relative_positions = getattr(config, "max_relative_positions", -1)
299
+ if self.max_relative_positions < 1:
300
+ self.max_relative_positions = config.max_position_embeddings
301
+
302
+ def build(self, input_shape):
303
+ if self.relative_attention:
304
+ self.rel_embeddings = self.add_weight(
305
+ name="rel_embeddings.weight",
306
+ shape=[self.max_relative_positions * 2, self.config.hidden_size],
307
+ initializer=get_initializer(self.config.initializer_range),
308
+ )
309
+ return super().build(input_shape)
310
+
311
+ def get_rel_embedding(self):
312
+ rel_embeddings = self.rel_embeddings if self.relative_attention else None
313
+ return rel_embeddings
314
+
315
+ def get_attention_mask(self, attention_mask):
316
+ if len(shape_list(attention_mask)) <= 2:
317
+ extended_attention_mask = tf.expand_dims(tf.expand_dims(attention_mask, 1), 2)
318
+ attention_mask = extended_attention_mask * tf.expand_dims(tf.squeeze(extended_attention_mask, -2), -1)
319
+ attention_mask = tf.cast(attention_mask, tf.uint8)
320
+ elif len(shape_list(attention_mask)) == 3:
321
+ attention_mask = tf.expand_dims(attention_mask, 1)
322
+
323
+ return attention_mask
324
+
325
+ def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None):
326
+ if self.relative_attention and relative_pos is None:
327
+ q = shape_list(query_states)[-2] if query_states is not None else shape_list(hidden_states)[-2]
328
+ relative_pos = build_relative_position(q, shape_list(hidden_states)[-2])
329
+ return relative_pos
330
+
331
+ def call(
332
+ self,
333
+ hidden_states: tf.Tensor,
334
+ attention_mask: tf.Tensor,
335
+ query_states: tf.Tensor = None,
336
+ relative_pos: tf.Tensor = None,
337
+ output_attentions: bool = False,
338
+ output_hidden_states: bool = False,
339
+ return_dict: bool = True,
340
+ training: bool = False,
341
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
342
+ all_hidden_states = () if output_hidden_states else None
343
+ all_attentions = () if output_attentions else None
344
+
345
+ attention_mask = self.get_attention_mask(attention_mask)
346
+ relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos)
347
+
348
+ if isinstance(hidden_states, Sequence):
349
+ next_kv = hidden_states[0]
350
+ else:
351
+ next_kv = hidden_states
352
+
353
+ rel_embeddings = self.get_rel_embedding()
354
+
355
+ for i, layer_module in enumerate(self.layer):
356
+ if output_hidden_states:
357
+ all_hidden_states = all_hidden_states + (hidden_states,)
358
+
359
+ layer_outputs = layer_module(
360
+ hidden_states=next_kv,
361
+ attention_mask=attention_mask,
362
+ query_states=query_states,
363
+ relative_pos=relative_pos,
364
+ rel_embeddings=rel_embeddings,
365
+ output_attentions=output_attentions,
366
+ training=training,
367
+ )
368
+ hidden_states = layer_outputs[0]
369
+
370
+ if query_states is not None:
371
+ query_states = hidden_states
372
+ if isinstance(hidden_states, Sequence):
373
+ next_kv = hidden_states[i + 1] if i + 1 < len(self.layer) else None
374
+ else:
375
+ next_kv = hidden_states
376
+
377
+ if output_attentions:
378
+ all_attentions = all_attentions + (layer_outputs[1],)
379
+
380
+ # Add last layer
381
+ if output_hidden_states:
382
+ all_hidden_states = all_hidden_states + (hidden_states,)
383
+
384
+ if not return_dict:
385
+ return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
386
+
387
+ return TFBaseModelOutput(
388
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
389
+ )
390
+
391
+
392
+ def build_relative_position(query_size, key_size):
393
+ """
394
+ Build relative position according to the query and key
395
+
396
+ We assume the absolute position of query \\(P_q\\) is range from (0, query_size) and the absolute position of key
397
+ \\(P_k\\) is range from (0, key_size), The relative positions from query to key is \\(R_{q \\rightarrow k} = P_q -
398
+ P_k\\)
399
+
400
+ Args:
401
+ query_size (int): the length of query
402
+ key_size (int): the length of key
403
+
404
+ Return:
405
+ `tf.Tensor`: A tensor with shape [1, query_size, key_size]
406
+
407
+ """
408
+ q_ids = tf.range(query_size, dtype=tf.int32)
409
+ k_ids = tf.range(key_size, dtype=tf.int32)
410
+ rel_pos_ids = q_ids[:, None] - tf.tile(tf.reshape(k_ids, [1, -1]), [query_size, 1])
411
+ rel_pos_ids = rel_pos_ids[:query_size, :]
412
+ rel_pos_ids = tf.expand_dims(rel_pos_ids, axis=0)
413
+ return tf.cast(rel_pos_ids, tf.int64)
414
+
415
+
416
+ def c2p_dynamic_expand(c2p_pos, query_layer, relative_pos):
417
+ shapes = [
418
+ shape_list(query_layer)[0],
419
+ shape_list(query_layer)[1],
420
+ shape_list(query_layer)[2],
421
+ shape_list(relative_pos)[-1],
422
+ ]
423
+ return tf.broadcast_to(c2p_pos, shapes)
424
+
425
+
426
+ def p2c_dynamic_expand(c2p_pos, query_layer, key_layer):
427
+ shapes = [
428
+ shape_list(query_layer)[0],
429
+ shape_list(query_layer)[1],
430
+ shape_list(key_layer)[-2],
431
+ shape_list(key_layer)[-2],
432
+ ]
433
+ return tf.broadcast_to(c2p_pos, shapes)
434
+
435
+
436
+ def pos_dynamic_expand(pos_index, p2c_att, key_layer):
437
+ shapes = shape_list(p2c_att)[:2] + [shape_list(pos_index)[-2], shape_list(key_layer)[-2]]
438
+ return tf.broadcast_to(pos_index, shapes)
439
+
440
+
441
+ def torch_gather(x, indices, gather_axis):
442
+ if gather_axis < 0:
443
+ gather_axis = tf.rank(x) + gather_axis
444
+
445
+ if gather_axis != tf.rank(x) - 1:
446
+ pre_roll = tf.rank(x) - 1 - gather_axis
447
+ permutation = tf.roll(tf.range(tf.rank(x)), pre_roll, axis=0)
448
+ x = tf.transpose(x, perm=permutation)
449
+ indices = tf.transpose(indices, perm=permutation)
450
+ else:
451
+ pre_roll = 0
452
+
453
+ flat_x = tf.reshape(x, (-1, tf.shape(x)[-1]))
454
+ flat_indices = tf.reshape(indices, (-1, tf.shape(indices)[-1]))
455
+ gathered = tf.gather(flat_x, flat_indices, batch_dims=1)
456
+ gathered = tf.reshape(gathered, tf.shape(indices))
457
+
458
+ if pre_roll != 0:
459
+ permutation = tf.roll(tf.range(tf.rank(x)), -pre_roll, axis=0)
460
+ gathered = tf.transpose(gathered, perm=permutation)
461
+
462
+ return gathered
463
+
464
+
465
+ class TFDebertaDisentangledSelfAttention(tf.keras.layers.Layer):
466
+ """
467
+ Disentangled self-attention module
468
+
469
+ Parameters:
470
+ config (`str`):
471
+ A model config class instance with the configuration to build a new model. The schema is similar to
472
+ *BertConfig*, for more details, please refer [`DebertaConfig`]
473
+
474
+ """
475
+
476
+ def __init__(self, config: DebertaConfig, **kwargs):
477
+ super().__init__(**kwargs)
478
+ if config.hidden_size % config.num_attention_heads != 0:
479
+ raise ValueError(
480
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
481
+ f"heads ({config.num_attention_heads})"
482
+ )
483
+ self.num_attention_heads = config.num_attention_heads
484
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
485
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
486
+ self.in_proj = tf.keras.layers.Dense(
487
+ self.all_head_size * 3,
488
+ kernel_initializer=get_initializer(config.initializer_range),
489
+ name="in_proj",
490
+ use_bias=False,
491
+ )
492
+ self.pos_att_type = config.pos_att_type if config.pos_att_type is not None else []
493
+
494
+ self.relative_attention = getattr(config, "relative_attention", False)
495
+ self.talking_head = getattr(config, "talking_head", False)
496
+
497
+ if self.talking_head:
498
+ self.head_logits_proj = tf.keras.layers.Dense(
499
+ self.num_attention_heads,
500
+ kernel_initializer=get_initializer(config.initializer_range),
501
+ name="head_logits_proj",
502
+ use_bias=False,
503
+ )
504
+ self.head_weights_proj = tf.keras.layers.Dense(
505
+ self.num_attention_heads,
506
+ kernel_initializer=get_initializer(config.initializer_range),
507
+ name="head_weights_proj",
508
+ use_bias=False,
509
+ )
510
+
511
+ self.softmax = TFDebertaXSoftmax(axis=-1)
512
+
513
+ if self.relative_attention:
514
+ self.max_relative_positions = getattr(config, "max_relative_positions", -1)
515
+ if self.max_relative_positions < 1:
516
+ self.max_relative_positions = config.max_position_embeddings
517
+ self.pos_dropout = TFDebertaStableDropout(config.hidden_dropout_prob, name="pos_dropout")
518
+ if "c2p" in self.pos_att_type:
519
+ self.pos_proj = tf.keras.layers.Dense(
520
+ self.all_head_size,
521
+ kernel_initializer=get_initializer(config.initializer_range),
522
+ name="pos_proj",
523
+ use_bias=False,
524
+ )
525
+ if "p2c" in self.pos_att_type:
526
+ self.pos_q_proj = tf.keras.layers.Dense(
527
+ self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="pos_q_proj"
528
+ )
529
+
530
+ self.dropout = TFDebertaStableDropout(config.attention_probs_dropout_prob, name="dropout")
531
+
532
+ def build(self, input_shape):
533
+ self.q_bias = self.add_weight(
534
+ name="q_bias", shape=(self.all_head_size), initializer=tf.keras.initializers.Zeros()
535
+ )
536
+ self.v_bias = self.add_weight(
537
+ name="v_bias", shape=(self.all_head_size), initializer=tf.keras.initializers.Zeros()
538
+ )
539
+ return super().build(input_shape)
540
+
541
+ def transpose_for_scores(self, tensor: tf.Tensor) -> tf.Tensor:
542
+ shape = shape_list(tensor)[:-1] + [self.num_attention_heads, -1]
543
+ # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
544
+ tensor = tf.reshape(tensor=tensor, shape=shape)
545
+
546
+ # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size]
547
+ return tf.transpose(tensor, perm=[0, 2, 1, 3])
548
+
549
+ def call(
550
+ self,
551
+ hidden_states: tf.Tensor,
552
+ attention_mask: tf.Tensor,
553
+ query_states: tf.Tensor = None,
554
+ relative_pos: tf.Tensor = None,
555
+ rel_embeddings: tf.Tensor = None,
556
+ output_attentions: bool = False,
557
+ training: bool = False,
558
+ ) -> Tuple[tf.Tensor]:
559
+ """
560
+ Call the module
561
+
562
+ Args:
563
+ hidden_states (`tf.Tensor`):
564
+ Input states to the module usually the output from previous layer, it will be the Q,K and V in
565
+ *Attention(Q,K,V)*
566
+
567
+ attention_mask (`tf.Tensor`):
568
+ An attention mask matrix of shape [*B*, *N*, *N*] where *B* is the batch size, *N* is the maximum
569
+ sequence length in which element [i,j] = *1* means the *i* th token in the input can attend to the *j*
570
+ th token.
571
+
572
+ return_att (`bool`, optional):
573
+ Whether return the attention matrix.
574
+
575
+ query_states (`tf.Tensor`, optional):
576
+ The *Q* state in *Attention(Q,K,V)*.
577
+
578
+ relative_pos (`tf.Tensor`):
579
+ The relative position encoding between the tokens in the sequence. It's of shape [*B*, *N*, *N*] with
580
+ values ranging in [*-max_relative_positions*, *max_relative_positions*].
581
+
582
+ rel_embeddings (`tf.Tensor`):
583
+ The embedding of relative distances. It's a tensor of shape [\\(2 \\times
584
+ \\text{max_relative_positions}\\), *hidden_size*].
585
+
586
+
587
+ """
588
+ if query_states is None:
589
+ qp = self.in_proj(hidden_states) # .split(self.all_head_size, dim=-1)
590
+ query_layer, key_layer, value_layer = tf.split(
591
+ self.transpose_for_scores(qp), num_or_size_splits=3, axis=-1
592
+ )
593
+ else:
594
+
595
+ def linear(w, b, x):
596
+ out = tf.matmul(x, w, transpose_b=True)
597
+ if b is not None:
598
+ out += tf.transpose(b)
599
+ return out
600
+
601
+ ws = tf.split(
602
+ tf.transpose(self.in_proj.weight[0]), num_or_size_splits=self.num_attention_heads * 3, axis=0
603
+ )
604
+ qkvw = tf.TensorArray(dtype=tf.float32, size=3)
605
+ for k in tf.range(3):
606
+ qkvw_inside = tf.TensorArray(dtype=tf.float32, size=self.num_attention_heads)
607
+ for i in tf.range(self.num_attention_heads):
608
+ qkvw_inside = qkvw_inside.write(i, ws[i * 3 + k])
609
+ qkvw = qkvw.write(k, qkvw_inside.concat())
610
+ qkvb = [None] * 3
611
+
612
+ q = linear(qkvw[0], qkvb[0], query_states)
613
+ k = linear(qkvw[1], qkvb[1], hidden_states)
614
+ v = linear(qkvw[2], qkvb[2], hidden_states)
615
+ query_layer = self.transpose_for_scores(q)
616
+ key_layer = self.transpose_for_scores(k)
617
+ value_layer = self.transpose_for_scores(v)
618
+
619
+ query_layer = query_layer + self.transpose_for_scores(self.q_bias[None, None, :])
620
+ value_layer = value_layer + self.transpose_for_scores(self.v_bias[None, None, :])
621
+
622
+ rel_att = None
623
+ # Take the dot product between "query" and "key" to get the raw attention scores.
624
+ scale_factor = 1 + len(self.pos_att_type)
625
+ scale = math.sqrt(shape_list(query_layer)[-1] * scale_factor)
626
+ query_layer = query_layer / scale
627
+
628
+ attention_scores = tf.matmul(query_layer, tf.transpose(key_layer, [0, 1, 3, 2]))
629
+ if self.relative_attention:
630
+ rel_embeddings = self.pos_dropout(rel_embeddings, training=training)
631
+ rel_att = self.disentangled_att_bias(query_layer, key_layer, relative_pos, rel_embeddings, scale_factor)
632
+
633
+ if rel_att is not None:
634
+ attention_scores = attention_scores + rel_att
635
+
636
+ if self.talking_head:
637
+ attention_scores = tf.transpose(
638
+ self.head_logits_proj(tf.transpose(attention_scores, [0, 2, 3, 1])), [0, 3, 1, 2]
639
+ )
640
+
641
+ attention_probs = self.softmax(attention_scores, attention_mask)
642
+ attention_probs = self.dropout(attention_probs, training=training)
643
+ if self.talking_head:
644
+ attention_probs = tf.transpose(
645
+ self.head_weights_proj(tf.transpose(attention_probs, [0, 2, 3, 1])), [0, 3, 1, 2]
646
+ )
647
+
648
+ context_layer = tf.matmul(attention_probs, value_layer)
649
+ context_layer = tf.transpose(context_layer, [0, 2, 1, 3])
650
+ context_layer_shape = shape_list(context_layer)
651
+ # Set the final dimension here explicitly.
652
+ # Calling tf.reshape(context_layer, (*context_layer_shape[:-2], -1)) raises an error when executing
653
+ # the model in graph mode as context_layer is reshaped to (None, 7, None) and Dense layer in TFDebertaV2SelfOutput
654
+ # requires final input dimension to be defined
655
+ new_context_layer_shape = context_layer_shape[:-2] + [context_layer_shape[-2] * context_layer_shape[-1]]
656
+ context_layer = tf.reshape(context_layer, new_context_layer_shape)
657
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
658
+ return outputs
659
+
660
+ def disentangled_att_bias(self, query_layer, key_layer, relative_pos, rel_embeddings, scale_factor):
661
+ if relative_pos is None:
662
+ q = shape_list(query_layer)[-2]
663
+ relative_pos = build_relative_position(q, shape_list(key_layer)[-2])
664
+ shape_list_pos = shape_list(relative_pos)
665
+ if len(shape_list_pos) == 2:
666
+ relative_pos = tf.expand_dims(tf.expand_dims(relative_pos, 0), 0)
667
+ elif len(shape_list_pos) == 3:
668
+ relative_pos = tf.expand_dims(relative_pos, 1)
669
+ # bxhxqxk
670
+ elif len(shape_list_pos) != 4:
671
+ raise ValueError(f"Relative position ids must be of dim 2 or 3 or 4. {len(shape_list_pos)}")
672
+
673
+ att_span = tf.cast(
674
+ tf.minimum(
675
+ tf.maximum(shape_list(query_layer)[-2], shape_list(key_layer)[-2]), self.max_relative_positions
676
+ ),
677
+ tf.int64,
678
+ )
679
+ rel_embeddings = tf.expand_dims(
680
+ rel_embeddings[self.max_relative_positions - att_span : self.max_relative_positions + att_span, :], 0
681
+ )
682
+
683
+ score = 0
684
+
685
+ # content->position
686
+ if "c2p" in self.pos_att_type:
687
+ pos_key_layer = self.pos_proj(rel_embeddings)
688
+ pos_key_layer = self.transpose_for_scores(pos_key_layer)
689
+ c2p_att = tf.matmul(query_layer, tf.transpose(pos_key_layer, [0, 1, 3, 2]))
690
+ c2p_pos = tf.clip_by_value(relative_pos + att_span, 0, att_span * 2 - 1)
691
+ c2p_att = torch_gather(c2p_att, c2p_dynamic_expand(c2p_pos, query_layer, relative_pos), -1)
692
+ score += c2p_att
693
+
694
+ # position->content
695
+ if "p2c" in self.pos_att_type:
696
+ pos_query_layer = self.pos_q_proj(rel_embeddings)
697
+ pos_query_layer = self.transpose_for_scores(pos_query_layer)
698
+ pos_query_layer /= tf.math.sqrt(tf.cast(shape_list(pos_query_layer)[-1] * scale_factor, dtype=tf.float32))
699
+ if shape_list(query_layer)[-2] != shape_list(key_layer)[-2]:
700
+ r_pos = build_relative_position(shape_list(key_layer)[-2], shape_list(key_layer)[-2])
701
+ else:
702
+ r_pos = relative_pos
703
+ p2c_pos = tf.clip_by_value(-r_pos + att_span, 0, att_span * 2 - 1)
704
+ p2c_att = tf.matmul(key_layer, tf.transpose(pos_query_layer, [0, 1, 3, 2]))
705
+ p2c_att = tf.transpose(
706
+ torch_gather(p2c_att, p2c_dynamic_expand(p2c_pos, query_layer, key_layer), -1), [0, 1, 3, 2]
707
+ )
708
+ if shape_list(query_layer)[-2] != shape_list(key_layer)[-2]:
709
+ pos_index = tf.expand_dims(relative_pos[:, :, :, 0], -1)
710
+ p2c_att = torch_gather(p2c_att, pos_dynamic_expand(pos_index, p2c_att, key_layer), -2)
711
+ score += p2c_att
712
+
713
+ return score
714
+
715
+
716
+ class TFDebertaEmbeddings(tf.keras.layers.Layer):
717
+ """Construct the embeddings from word, position and token_type embeddings."""
718
+
719
+ def __init__(self, config, **kwargs):
720
+ super().__init__(**kwargs)
721
+
722
+ self.config = config
723
+ self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
724
+ self.hidden_size = config.hidden_size
725
+ self.max_position_embeddings = config.max_position_embeddings
726
+ self.position_biased_input = getattr(config, "position_biased_input", True)
727
+ self.initializer_range = config.initializer_range
728
+ if self.embedding_size != config.hidden_size:
729
+ self.embed_proj = tf.keras.layers.Dense(
730
+ config.hidden_size,
731
+ kernel_initializer=get_initializer(config.initializer_range),
732
+ name="embed_proj",
733
+ use_bias=False,
734
+ )
735
+ self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
736
+ self.dropout = TFDebertaStableDropout(config.hidden_dropout_prob, name="dropout")
737
+
738
+ def build(self, input_shape: tf.TensorShape):
739
+ with tf.name_scope("word_embeddings"):
740
+ self.weight = self.add_weight(
741
+ name="weight",
742
+ shape=[self.config.vocab_size, self.embedding_size],
743
+ initializer=get_initializer(self.initializer_range),
744
+ )
745
+
746
+ with tf.name_scope("token_type_embeddings"):
747
+ if self.config.type_vocab_size > 0:
748
+ self.token_type_embeddings = self.add_weight(
749
+ name="embeddings",
750
+ shape=[self.config.type_vocab_size, self.embedding_size],
751
+ initializer=get_initializer(self.initializer_range),
752
+ )
753
+ else:
754
+ self.token_type_embeddings = None
755
+
756
+ with tf.name_scope("position_embeddings"):
757
+ if self.position_biased_input:
758
+ self.position_embeddings = self.add_weight(
759
+ name="embeddings",
760
+ shape=[self.max_position_embeddings, self.hidden_size],
761
+ initializer=get_initializer(self.initializer_range),
762
+ )
763
+ else:
764
+ self.position_embeddings = None
765
+
766
+ super().build(input_shape)
767
+
768
+ def call(
769
+ self,
770
+ input_ids: tf.Tensor = None,
771
+ position_ids: tf.Tensor = None,
772
+ token_type_ids: tf.Tensor = None,
773
+ inputs_embeds: tf.Tensor = None,
774
+ mask: tf.Tensor = None,
775
+ training: bool = False,
776
+ ) -> tf.Tensor:
777
+ """
778
+ Applies embedding based on inputs tensor.
779
+
780
+ Returns:
781
+ final_embeddings (`tf.Tensor`): output embedding tensor.
782
+ """
783
+ if input_ids is None and inputs_embeds is None:
784
+ raise ValueError("Need to provide either `input_ids` or `input_embeds`.")
785
+
786
+ if input_ids is not None:
787
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
788
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
789
+
790
+ input_shape = shape_list(inputs_embeds)[:-1]
791
+
792
+ if token_type_ids is None:
793
+ token_type_ids = tf.fill(dims=input_shape, value=0)
794
+
795
+ if position_ids is None:
796
+ position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)
797
+
798
+ final_embeddings = inputs_embeds
799
+ if self.position_biased_input:
800
+ position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
801
+ final_embeddings += position_embeds
802
+ if self.config.type_vocab_size > 0:
803
+ token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
804
+ final_embeddings += token_type_embeds
805
+
806
+ if self.embedding_size != self.hidden_size:
807
+ final_embeddings = self.embed_proj(final_embeddings)
808
+
809
+ final_embeddings = self.LayerNorm(final_embeddings)
810
+
811
+ if mask is not None:
812
+ if len(shape_list(mask)) != len(shape_list(final_embeddings)):
813
+ if len(shape_list(mask)) == 4:
814
+ mask = tf.squeeze(tf.squeeze(mask, axis=1), axis=1)
815
+ mask = tf.cast(tf.expand_dims(mask, axis=2), tf.float32)
816
+
817
+ final_embeddings = final_embeddings * mask
818
+
819
+ final_embeddings = self.dropout(final_embeddings, training=training)
820
+
821
+ return final_embeddings
822
+
823
+
824
+ class TFDebertaPredictionHeadTransform(tf.keras.layers.Layer):
825
+ def __init__(self, config: DebertaConfig, **kwargs):
826
+ super().__init__(**kwargs)
827
+
828
+ self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
829
+
830
+ self.dense = tf.keras.layers.Dense(
831
+ units=self.embedding_size,
832
+ kernel_initializer=get_initializer(config.initializer_range),
833
+ name="dense",
834
+ )
835
+
836
+ if isinstance(config.hidden_act, str):
837
+ self.transform_act_fn = get_tf_activation(config.hidden_act)
838
+ else:
839
+ self.transform_act_fn = config.hidden_act
840
+ self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
841
+
842
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
843
+ hidden_states = self.dense(inputs=hidden_states)
844
+ hidden_states = self.transform_act_fn(hidden_states)
845
+ hidden_states = self.LayerNorm(hidden_states)
846
+
847
+ return hidden_states
848
+
849
+
850
+ class TFDebertaLMPredictionHead(tf.keras.layers.Layer):
851
+ def __init__(self, config: DebertaConfig, input_embeddings: tf.keras.layers.Layer, **kwargs):
852
+ super().__init__(**kwargs)
853
+
854
+ self.config = config
855
+ self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
856
+
857
+ self.transform = TFDebertaPredictionHeadTransform(config, name="transform")
858
+
859
+ # The output weights are the same as the input embeddings, but there is
860
+ # an output-only bias for each token.
861
+ self.input_embeddings = input_embeddings
862
+
863
+ def build(self, input_shape: tf.TensorShape):
864
+ self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
865
+
866
+ super().build(input_shape)
867
+
868
+ def get_output_embeddings(self) -> tf.keras.layers.Layer:
869
+ return self.input_embeddings
870
+
871
+ def set_output_embeddings(self, value: tf.Variable):
872
+ self.input_embeddings.weight = value
873
+ self.input_embeddings.vocab_size = shape_list(value)[0]
874
+
875
+ def get_bias(self) -> Dict[str, tf.Variable]:
876
+ return {"bias": self.bias}
877
+
878
+ def set_bias(self, value: tf.Variable):
879
+ self.bias = value["bias"]
880
+ self.config.vocab_size = shape_list(value["bias"])[0]
881
+
882
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
883
+ hidden_states = self.transform(hidden_states=hidden_states)
884
+ seq_length = shape_list(hidden_states)[1]
885
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.embedding_size])
886
+ hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
887
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
888
+ hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
889
+
890
+ return hidden_states
891
+
892
+
893
+ class TFDebertaOnlyMLMHead(tf.keras.layers.Layer):
894
+ def __init__(self, config: DebertaConfig, input_embeddings: tf.keras.layers.Layer, **kwargs):
895
+ super().__init__(**kwargs)
896
+ self.predictions = TFDebertaLMPredictionHead(config, input_embeddings, name="predictions")
897
+
898
+ def call(self, sequence_output: tf.Tensor) -> tf.Tensor:
899
+ prediction_scores = self.predictions(hidden_states=sequence_output)
900
+
901
+ return prediction_scores
902
+
903
+
904
+ # @keras_serializable
905
+ class TFDebertaMainLayer(tf.keras.layers.Layer):
906
+ config_class = DebertaConfig
907
+
908
+ def __init__(self, config: DebertaConfig, **kwargs):
909
+ super().__init__(**kwargs)
910
+
911
+ self.config = config
912
+
913
+ self.embeddings = TFDebertaEmbeddings(config, name="embeddings")
914
+ self.encoder = TFDebertaEncoder(config, name="encoder")
915
+
916
+ def get_input_embeddings(self) -> tf.keras.layers.Layer:
917
+ return self.embeddings
918
+
919
+ def set_input_embeddings(self, value: tf.Variable):
920
+ self.embeddings.weight = value
921
+ self.embeddings.vocab_size = shape_list(value)[0]
922
+
923
+ def _prune_heads(self, heads_to_prune):
924
+ """
925
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
926
+ class PreTrainedModel
927
+ """
928
+ raise NotImplementedError
929
+
930
+ @unpack_inputs
931
+ def call(
932
+ self,
933
+ input_ids: TFModelInputType | None = None,
934
+ attention_mask: np.ndarray | tf.Tensor | None = None,
935
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
936
+ position_ids: np.ndarray | tf.Tensor | None = None,
937
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
938
+ output_attentions: Optional[bool] = None,
939
+ output_hidden_states: Optional[bool] = None,
940
+ return_dict: Optional[bool] = None,
941
+ training: bool = False,
942
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
943
+ if input_ids is not None and inputs_embeds is not None:
944
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
945
+ elif input_ids is not None:
946
+ input_shape = shape_list(input_ids)
947
+ elif inputs_embeds is not None:
948
+ input_shape = shape_list(inputs_embeds)[:-1]
949
+ else:
950
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
951
+
952
+ if attention_mask is None:
953
+ attention_mask = tf.fill(dims=input_shape, value=1)
954
+
955
+ if token_type_ids is None:
956
+ token_type_ids = tf.fill(dims=input_shape, value=0)
957
+
958
+ embedding_output = self.embeddings(
959
+ input_ids=input_ids,
960
+ position_ids=position_ids,
961
+ token_type_ids=token_type_ids,
962
+ inputs_embeds=inputs_embeds,
963
+ mask=attention_mask,
964
+ training=training,
965
+ )
966
+
967
+ encoder_outputs = self.encoder(
968
+ hidden_states=embedding_output,
969
+ attention_mask=attention_mask,
970
+ output_attentions=output_attentions,
971
+ output_hidden_states=output_hidden_states,
972
+ return_dict=return_dict,
973
+ training=training,
974
+ )
975
+
976
+ sequence_output = encoder_outputs[0]
977
+
978
+ if not return_dict:
979
+ return (sequence_output,) + encoder_outputs[1:]
980
+
981
+ return TFBaseModelOutput(
982
+ last_hidden_state=sequence_output,
983
+ hidden_states=encoder_outputs.hidden_states,
984
+ attentions=encoder_outputs.attentions,
985
+ )
986
+
987
+
988
+ class TFDebertaPreTrainedModel(TFPreTrainedModel):
989
+ """
990
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
991
+ models.
992
+ """
993
+
994
+ config_class = DebertaConfig
995
+ base_model_prefix = "deberta"
996
+
997
+
998
+ DEBERTA_START_DOCSTRING = r"""
999
+ The DeBERTa model was proposed in [DeBERTa: Decoding-enhanced BERT with Disentangled
1000
+ Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It's build
1001
+ on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two
1002
+ improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.
1003
+
1004
+ This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
1005
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
1006
+ behavior.
1007
+
1008
+ <Tip>
1009
+
1010
+ TensorFlow models and layers in `transformers` accept two formats as input:
1011
+
1012
+ - having all inputs as keyword arguments (like PyTorch models), or
1013
+ - having all inputs as a list, tuple or dict in the first positional argument.
1014
+
1015
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
1016
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
1017
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
1018
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
1019
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
1020
+ positional argument:
1021
+
1022
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
1023
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
1024
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
1025
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
1026
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
1027
+
1028
+ Note that when creating models and layers with
1029
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
1030
+ about any of this, as you can just pass inputs like you would to any other Python function!
1031
+
1032
+ </Tip>
1033
+
1034
+ Parameters:
1035
+ config ([`DebertaConfig`]): Model configuration class with all the parameters of the model.
1036
+ Initializing with a config file does not load the weights associated with the model, only the
1037
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
1038
+ """
1039
+
1040
+ DEBERTA_INPUTS_DOCSTRING = r"""
1041
+ Args:
1042
+ input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):
1043
+ Indices of input sequence tokens in the vocabulary.
1044
+
1045
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1046
+ [`PreTrainedTokenizer.__call__`] for details.
1047
+
1048
+ [What are input IDs?](../glossary#input-ids)
1049
+ attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
1050
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1051
+
1052
+ - 1 for tokens that are **not masked**,
1053
+ - 0 for tokens that are **masked**.
1054
+
1055
+ [What are attention masks?](../glossary#attention-mask)
1056
+ token_type_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
1057
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1058
+ 1]`:
1059
+
1060
+ - 0 corresponds to a *sentence A* token,
1061
+ - 1 corresponds to a *sentence B* token.
1062
+
1063
+ [What are token type IDs?](../glossary#token-type-ids)
1064
+ position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
1065
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
1066
+ config.max_position_embeddings - 1]`.
1067
+
1068
+ [What are position IDs?](../glossary#position-ids)
1069
+ inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
1070
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1071
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
1072
+ model's internal embedding lookup matrix.
1073
+ output_attentions (`bool`, *optional*):
1074
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1075
+ tensors for more detail.
1076
+ output_hidden_states (`bool`, *optional*):
1077
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1078
+ more detail.
1079
+ return_dict (`bool`, *optional*):
1080
+ Whether or not to return a [`~utils.ModelOutput``] instead of a plain tuple.
1081
+ """
1082
+
1083
+
1084
+ @add_start_docstrings(
1085
+ "The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top.",
1086
+ DEBERTA_START_DOCSTRING,
1087
+ )
1088
+ class TFDebertaModel(TFDebertaPreTrainedModel):
1089
+ def __init__(self, config: DebertaConfig, *inputs, **kwargs):
1090
+ super().__init__(config, *inputs, **kwargs)
1091
+
1092
+ self.deberta = TFDebertaMainLayer(config, name="deberta")
1093
+
1094
+ @unpack_inputs
1095
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1096
+ @add_code_sample_docstrings(
1097
+ checkpoint=_CHECKPOINT_FOR_DOC,
1098
+ output_type=TFBaseModelOutput,
1099
+ config_class=_CONFIG_FOR_DOC,
1100
+ )
1101
+ def call(
1102
+ self,
1103
+ input_ids: TFModelInputType | None = None,
1104
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1105
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1106
+ position_ids: np.ndarray | tf.Tensor | None = None,
1107
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1108
+ output_attentions: Optional[bool] = None,
1109
+ output_hidden_states: Optional[bool] = None,
1110
+ return_dict: Optional[bool] = None,
1111
+ training: Optional[bool] = False,
1112
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
1113
+ outputs = self.deberta(
1114
+ input_ids=input_ids,
1115
+ attention_mask=attention_mask,
1116
+ token_type_ids=token_type_ids,
1117
+ position_ids=position_ids,
1118
+ inputs_embeds=inputs_embeds,
1119
+ output_attentions=output_attentions,
1120
+ output_hidden_states=output_hidden_states,
1121
+ return_dict=return_dict,
1122
+ training=training,
1123
+ )
1124
+
1125
+ return outputs
1126
+
1127
+
1128
+ @add_start_docstrings("""DeBERTa Model with a `language modeling` head on top.""", DEBERTA_START_DOCSTRING)
1129
+ class TFDebertaForMaskedLM(TFDebertaPreTrainedModel, TFMaskedLanguageModelingLoss):
1130
+ def __init__(self, config: DebertaConfig, *inputs, **kwargs):
1131
+ super().__init__(config, *inputs, **kwargs)
1132
+
1133
+ if config.is_decoder:
1134
+ logger.warning(
1135
+ "If you want to use `TFDebertaForMaskedLM` make sure `config.is_decoder=False` for "
1136
+ "bi-directional self-attention."
1137
+ )
1138
+
1139
+ self.deberta = TFDebertaMainLayer(config, name="deberta")
1140
+ self.mlm = TFDebertaOnlyMLMHead(config, input_embeddings=self.deberta.embeddings, name="cls")
1141
+
1142
+ def get_lm_head(self) -> tf.keras.layers.Layer:
1143
+ return self.mlm.predictions
1144
+
1145
+ @unpack_inputs
1146
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1147
+ @add_code_sample_docstrings(
1148
+ checkpoint=_CHECKPOINT_FOR_DOC,
1149
+ output_type=TFMaskedLMOutput,
1150
+ config_class=_CONFIG_FOR_DOC,
1151
+ )
1152
+ def call(
1153
+ self,
1154
+ input_ids: TFModelInputType | None = None,
1155
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1156
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1157
+ position_ids: np.ndarray | tf.Tensor | None = None,
1158
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1159
+ output_attentions: Optional[bool] = None,
1160
+ output_hidden_states: Optional[bool] = None,
1161
+ return_dict: Optional[bool] = None,
1162
+ labels: np.ndarray | tf.Tensor | None = None,
1163
+ training: Optional[bool] = False,
1164
+ ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
1165
+ r"""
1166
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
1167
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1168
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1169
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1170
+ """
1171
+ outputs = self.deberta(
1172
+ input_ids=input_ids,
1173
+ attention_mask=attention_mask,
1174
+ token_type_ids=token_type_ids,
1175
+ position_ids=position_ids,
1176
+ inputs_embeds=inputs_embeds,
1177
+ output_attentions=output_attentions,
1178
+ output_hidden_states=output_hidden_states,
1179
+ return_dict=return_dict,
1180
+ training=training,
1181
+ )
1182
+ sequence_output = outputs[0]
1183
+ prediction_scores = self.mlm(sequence_output=sequence_output, training=training)
1184
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=prediction_scores)
1185
+
1186
+ if not return_dict:
1187
+ output = (prediction_scores,) + outputs[2:]
1188
+ return ((loss,) + output) if loss is not None else output
1189
+
1190
+ return TFMaskedLMOutput(
1191
+ loss=loss,
1192
+ logits=prediction_scores,
1193
+ hidden_states=outputs.hidden_states,
1194
+ attentions=outputs.attentions,
1195
+ )
1196
+
1197
+
1198
+ @add_start_docstrings(
1199
+ """
1200
+ DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the
1201
+ pooled output) e.g. for GLUE tasks.
1202
+ """,
1203
+ DEBERTA_START_DOCSTRING,
1204
+ )
1205
+ class TFDebertaForSequenceClassification(TFDebertaPreTrainedModel, TFSequenceClassificationLoss):
1206
+ def __init__(self, config: DebertaConfig, *inputs, **kwargs):
1207
+ super().__init__(config, *inputs, **kwargs)
1208
+
1209
+ self.num_labels = config.num_labels
1210
+
1211
+ self.deberta = TFDebertaMainLayer(config, name="deberta")
1212
+ self.pooler = TFDebertaContextPooler(config, name="pooler")
1213
+
1214
+ drop_out = getattr(config, "cls_dropout", None)
1215
+ drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out
1216
+ self.dropout = TFDebertaStableDropout(drop_out, name="cls_dropout")
1217
+ self.classifier = tf.keras.layers.Dense(
1218
+ units=config.num_labels,
1219
+ kernel_initializer=get_initializer(config.initializer_range),
1220
+ name="classifier",
1221
+ )
1222
+
1223
+ @unpack_inputs
1224
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1225
+ @add_code_sample_docstrings(
1226
+ checkpoint=_CHECKPOINT_FOR_DOC,
1227
+ output_type=TFSequenceClassifierOutput,
1228
+ config_class=_CONFIG_FOR_DOC,
1229
+ )
1230
+ def call(
1231
+ self,
1232
+ input_ids: TFModelInputType | None = None,
1233
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1234
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1235
+ position_ids: np.ndarray | tf.Tensor | None = None,
1236
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1237
+ output_attentions: Optional[bool] = None,
1238
+ output_hidden_states: Optional[bool] = None,
1239
+ return_dict: Optional[bool] = None,
1240
+ labels: np.ndarray | tf.Tensor | None = None,
1241
+ training: Optional[bool] = False,
1242
+ ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
1243
+ r"""
1244
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
1245
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1246
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1247
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1248
+ """
1249
+ outputs = self.deberta(
1250
+ input_ids=input_ids,
1251
+ attention_mask=attention_mask,
1252
+ token_type_ids=token_type_ids,
1253
+ position_ids=position_ids,
1254
+ inputs_embeds=inputs_embeds,
1255
+ output_attentions=output_attentions,
1256
+ output_hidden_states=output_hidden_states,
1257
+ return_dict=return_dict,
1258
+ training=training,
1259
+ )
1260
+ sequence_output = outputs[0]
1261
+ pooled_output = self.pooler(sequence_output, training=training)
1262
+ pooled_output = self.dropout(pooled_output, training=training)
1263
+ logits = self.classifier(pooled_output)
1264
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
1265
+
1266
+ if not return_dict:
1267
+ output = (logits,) + outputs[1:]
1268
+
1269
+ return ((loss,) + output) if loss is not None else output
1270
+
1271
+ return TFSequenceClassifierOutput(
1272
+ loss=loss,
1273
+ logits=logits,
1274
+ hidden_states=outputs.hidden_states,
1275
+ attentions=outputs.attentions,
1276
+ )
1277
+
1278
+
1279
+ @add_start_docstrings(
1280
+ """
1281
+ DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1282
+ Named-Entity-Recognition (NER) tasks.
1283
+ """,
1284
+ DEBERTA_START_DOCSTRING,
1285
+ )
1286
+ class TFDebertaForTokenClassification(TFDebertaPreTrainedModel, TFTokenClassificationLoss):
1287
+ def __init__(self, config: DebertaConfig, *inputs, **kwargs):
1288
+ super().__init__(config, *inputs, **kwargs)
1289
+
1290
+ self.num_labels = config.num_labels
1291
+
1292
+ self.deberta = TFDebertaMainLayer(config, name="deberta")
1293
+ self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
1294
+ self.classifier = tf.keras.layers.Dense(
1295
+ units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
1296
+ )
1297
+
1298
+ @unpack_inputs
1299
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1300
+ @add_code_sample_docstrings(
1301
+ checkpoint=_CHECKPOINT_FOR_DOC,
1302
+ output_type=TFTokenClassifierOutput,
1303
+ config_class=_CONFIG_FOR_DOC,
1304
+ )
1305
+ def call(
1306
+ self,
1307
+ input_ids: TFModelInputType | None = None,
1308
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1309
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1310
+ position_ids: np.ndarray | tf.Tensor | None = None,
1311
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1312
+ output_attentions: Optional[bool] = None,
1313
+ output_hidden_states: Optional[bool] = None,
1314
+ return_dict: Optional[bool] = None,
1315
+ labels: np.ndarray | tf.Tensor | None = None,
1316
+ training: Optional[bool] = False,
1317
+ ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
1318
+ r"""
1319
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
1320
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1321
+ """
1322
+ outputs = self.deberta(
1323
+ input_ids=input_ids,
1324
+ attention_mask=attention_mask,
1325
+ token_type_ids=token_type_ids,
1326
+ position_ids=position_ids,
1327
+ inputs_embeds=inputs_embeds,
1328
+ output_attentions=output_attentions,
1329
+ output_hidden_states=output_hidden_states,
1330
+ return_dict=return_dict,
1331
+ training=training,
1332
+ )
1333
+ sequence_output = outputs[0]
1334
+ sequence_output = self.dropout(sequence_output, training=training)
1335
+ logits = self.classifier(inputs=sequence_output)
1336
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
1337
+
1338
+ if not return_dict:
1339
+ output = (logits,) + outputs[1:]
1340
+ return ((loss,) + output) if loss is not None else output
1341
+
1342
+ return TFTokenClassifierOutput(
1343
+ loss=loss,
1344
+ logits=logits,
1345
+ hidden_states=outputs.hidden_states,
1346
+ attentions=outputs.attentions,
1347
+ )
1348
+
1349
+
1350
+ @add_start_docstrings(
1351
+ """
1352
+ DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1353
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1354
+ """,
1355
+ DEBERTA_START_DOCSTRING,
1356
+ )
1357
+ class TFDebertaForQuestionAnswering(TFDebertaPreTrainedModel, TFQuestionAnsweringLoss):
1358
+ def __init__(self, config: DebertaConfig, *inputs, **kwargs):
1359
+ super().__init__(config, *inputs, **kwargs)
1360
+
1361
+ self.num_labels = config.num_labels
1362
+
1363
+ self.deberta = TFDebertaMainLayer(config, name="deberta")
1364
+ self.qa_outputs = tf.keras.layers.Dense(
1365
+ units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
1366
+ )
1367
+
1368
+ @unpack_inputs
1369
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1370
+ @add_code_sample_docstrings(
1371
+ checkpoint=_CHECKPOINT_FOR_DOC,
1372
+ output_type=TFQuestionAnsweringModelOutput,
1373
+ config_class=_CONFIG_FOR_DOC,
1374
+ )
1375
+ def call(
1376
+ self,
1377
+ input_ids: TFModelInputType | None = None,
1378
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1379
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1380
+ position_ids: np.ndarray | tf.Tensor | None = None,
1381
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1382
+ output_attentions: Optional[bool] = None,
1383
+ output_hidden_states: Optional[bool] = None,
1384
+ return_dict: Optional[bool] = None,
1385
+ start_positions: np.ndarray | tf.Tensor | None = None,
1386
+ end_positions: np.ndarray | tf.Tensor | None = None,
1387
+ training: Optional[bool] = False,
1388
+ ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
1389
+ r"""
1390
+ start_positions (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
1391
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1392
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1393
+ are not taken into account for computing the loss.
1394
+ end_positions (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
1395
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1396
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1397
+ are not taken into account for computing the loss.
1398
+ """
1399
+ outputs = self.deberta(
1400
+ input_ids=input_ids,
1401
+ attention_mask=attention_mask,
1402
+ token_type_ids=token_type_ids,
1403
+ position_ids=position_ids,
1404
+ inputs_embeds=inputs_embeds,
1405
+ output_attentions=output_attentions,
1406
+ output_hidden_states=output_hidden_states,
1407
+ return_dict=return_dict,
1408
+ training=training,
1409
+ )
1410
+ sequence_output = outputs[0]
1411
+ logits = self.qa_outputs(inputs=sequence_output)
1412
+ start_logits, end_logits = tf.split(value=logits, num_or_size_splits=2, axis=-1)
1413
+ start_logits = tf.squeeze(input=start_logits, axis=-1)
1414
+ end_logits = tf.squeeze(input=end_logits, axis=-1)
1415
+ loss = None
1416
+
1417
+ if start_positions is not None and end_positions is not None:
1418
+ labels = {"start_position": start_positions}
1419
+ labels["end_position"] = end_positions
1420
+ loss = self.hf_compute_loss(labels=labels, logits=(start_logits, end_logits))
1421
+
1422
+ if not return_dict:
1423
+ output = (start_logits, end_logits) + outputs[2:]
1424
+ return ((loss,) + output) if loss is not None else output
1425
+
1426
+ return TFQuestionAnsweringModelOutput(
1427
+ loss=loss,
1428
+ start_logits=start_logits,
1429
+ end_logits=end_logits,
1430
+ hidden_states=outputs.hidden_states,
1431
+ attentions=outputs.attentions,
1432
+ )
mgm/lib/python3.10/site-packages/transformers/models/deberta/tokenization_deberta_fast.py ADDED
@@ -0,0 +1,286 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 Microsoft and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Fast Tokenization class for model DeBERTa."""
16
+
17
+ import json
18
+ from typing import List, Optional, Tuple
19
+
20
+ from tokenizers import pre_tokenizers
21
+
22
+ from ...tokenization_utils_base import AddedToken, BatchEncoding
23
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
24
+ from ...utils import logging
25
+ from .tokenization_deberta import DebertaTokenizer
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
31
+
32
+ PRETRAINED_VOCAB_FILES_MAP = {
33
+ "vocab_file": {
34
+ "microsoft/deberta-base": "https://huggingface.co/microsoft/deberta-base/resolve/main/vocab.json",
35
+ "microsoft/deberta-large": "https://huggingface.co/microsoft/deberta-large/resolve/main/vocab.json",
36
+ "microsoft/deberta-xlarge": "https://huggingface.co/microsoft/deberta-xlarge/resolve/main/vocab.json",
37
+ "microsoft/deberta-base-mnli": "https://huggingface.co/microsoft/deberta-base-mnli/resolve/main/vocab.json",
38
+ "microsoft/deberta-large-mnli": "https://huggingface.co/microsoft/deberta-large-mnli/resolve/main/vocab.json",
39
+ "microsoft/deberta-xlarge-mnli": (
40
+ "https://huggingface.co/microsoft/deberta-xlarge-mnli/resolve/main/vocab.json"
41
+ ),
42
+ },
43
+ "merges_file": {
44
+ "microsoft/deberta-base": "https://huggingface.co/microsoft/deberta-base/resolve/main/merges.txt",
45
+ "microsoft/deberta-large": "https://huggingface.co/microsoft/deberta-large/resolve/main/merges.txt",
46
+ "microsoft/deberta-xlarge": "https://huggingface.co/microsoft/deberta-xlarge/resolve/main/merges.txt",
47
+ "microsoft/deberta-base-mnli": "https://huggingface.co/microsoft/deberta-base-mnli/resolve/main/merges.txt",
48
+ "microsoft/deberta-large-mnli": "https://huggingface.co/microsoft/deberta-large-mnli/resolve/main/merges.txt",
49
+ "microsoft/deberta-xlarge-mnli": (
50
+ "https://huggingface.co/microsoft/deberta-xlarge-mnli/resolve/main/merges.txt"
51
+ ),
52
+ },
53
+ }
54
+
55
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
56
+ "microsoft/deberta-base": 512,
57
+ "microsoft/deberta-large": 512,
58
+ "microsoft/deberta-xlarge": 512,
59
+ "microsoft/deberta-base-mnli": 512,
60
+ "microsoft/deberta-large-mnli": 512,
61
+ "microsoft/deberta-xlarge-mnli": 512,
62
+ }
63
+
64
+ PRETRAINED_INIT_CONFIGURATION = {
65
+ "microsoft/deberta-base": {"do_lower_case": False},
66
+ "microsoft/deberta-large": {"do_lower_case": False},
67
+ }
68
+
69
+
70
+ class DebertaTokenizerFast(PreTrainedTokenizerFast):
71
+ """
72
+ Construct a "fast" DeBERTa tokenizer (backed by HuggingFace's *tokenizers* library). Based on byte-level
73
+ Byte-Pair-Encoding.
74
+
75
+ This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
76
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
77
+
78
+ ```python
79
+ >>> from transformers import DebertaTokenizerFast
80
+
81
+ >>> tokenizer = DebertaTokenizerFast.from_pretrained("microsoft/deberta-base")
82
+ >>> tokenizer("Hello world")["input_ids"]
83
+ [1, 31414, 232, 2]
84
+
85
+ >>> tokenizer(" Hello world")["input_ids"]
86
+ [1, 20920, 232, 2]
87
+ ```
88
+
89
+ You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer, but since
90
+ the model was not pretrained this way, it might yield a decrease in performance.
91
+
92
+ <Tip>
93
+
94
+ When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`.
95
+
96
+ </Tip>
97
+
98
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
99
+ refer to this superclass for more information regarding those methods.
100
+
101
+ Args:
102
+ vocab_file (`str`, *optional*):
103
+ Path to the vocabulary file.
104
+ merges_file (`str`, *optional*):
105
+ Path to the merges file.
106
+ tokenizer_file (`str`, *optional*):
107
+ The path to a tokenizer file to use instead of the vocab file.
108
+ errors (`str`, *optional*, defaults to `"replace"`):
109
+ Paradigm to follow when decoding bytes to UTF-8. See
110
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
111
+ bos_token (`str`, *optional*, defaults to `"[CLS]"`):
112
+ The beginning of sequence token.
113
+ eos_token (`str`, *optional*, defaults to `"[SEP]"`):
114
+ The end of sequence token.
115
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
116
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
117
+ sequence classification or for a text and a question for question answering. It is also used as the last
118
+ token of a sequence built with special tokens.
119
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
120
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
121
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
122
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
123
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
124
+ token instead.
125
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
126
+ The token used for padding, for example when batching sequences of different lengths.
127
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
128
+ The token used for masking values. This is the token used when training this model with masked language
129
+ modeling. This is the token which the model will try to predict.
130
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
131
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
132
+ other word. (Deberta tokenizer detect beginning of words by the preceding space).
133
+ """
134
+
135
+ vocab_files_names = VOCAB_FILES_NAMES
136
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
137
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
138
+ model_input_names = ["input_ids", "attention_mask", "token_type_ids"]
139
+ slow_tokenizer_class = DebertaTokenizer
140
+
141
+ def __init__(
142
+ self,
143
+ vocab_file=None,
144
+ merges_file=None,
145
+ tokenizer_file=None,
146
+ errors="replace",
147
+ bos_token="[CLS]",
148
+ eos_token="[SEP]",
149
+ sep_token="[SEP]",
150
+ cls_token="[CLS]",
151
+ unk_token="[UNK]",
152
+ pad_token="[PAD]",
153
+ mask_token="[MASK]",
154
+ add_prefix_space=False,
155
+ **kwargs,
156
+ ):
157
+ super().__init__(
158
+ vocab_file,
159
+ merges_file,
160
+ tokenizer_file=tokenizer_file,
161
+ errors=errors,
162
+ bos_token=bos_token,
163
+ eos_token=eos_token,
164
+ unk_token=unk_token,
165
+ sep_token=sep_token,
166
+ cls_token=cls_token,
167
+ pad_token=pad_token,
168
+ mask_token=mask_token,
169
+ add_prefix_space=add_prefix_space,
170
+ **kwargs,
171
+ )
172
+ self.add_bos_token = kwargs.pop("add_bos_token", False)
173
+
174
+ pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
175
+ if pre_tok_state.get("add_prefix_space", add_prefix_space) != add_prefix_space:
176
+ pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop("type"))
177
+ pre_tok_state["add_prefix_space"] = add_prefix_space
178
+ self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state)
179
+
180
+ self.add_prefix_space = add_prefix_space
181
+
182
+ @property
183
+ def mask_token(self) -> str:
184
+ """
185
+ `str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not
186
+ having been set.
187
+
188
+ Deberta tokenizer has a special mask token to be used in the fill-mask pipeline. The mask token will greedily
189
+ comprise the space before the *[MASK]*.
190
+ """
191
+ if self._mask_token is None:
192
+ if self.verbose:
193
+ logger.error("Using mask_token, but it is not set yet.")
194
+ return None
195
+ return str(self._mask_token)
196
+
197
+ @mask_token.setter
198
+ def mask_token(self, value):
199
+ """
200
+ Overriding the default behavior of the mask token to have it eat the space before it.
201
+ """
202
+ # Mask token behave like a normal word, i.e. include the space before it
203
+ # So we set lstrip to True
204
+ value = AddedToken(value, lstrip=True, rstrip=False) if isinstance(value, str) else value
205
+ self._mask_token = value
206
+
207
+ def build_inputs_with_special_tokens(
208
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
209
+ ) -> List[int]:
210
+ """
211
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
212
+ adding special tokens. A DeBERTa sequence has the following format:
213
+
214
+ - single sequence: [CLS] X [SEP]
215
+ - pair of sequences: [CLS] A [SEP] B [SEP]
216
+
217
+ Args:
218
+ token_ids_0 (`List[int]`):
219
+ List of IDs to which the special tokens will be added.
220
+ token_ids_1 (`List[int]`, *optional*):
221
+ Optional second list of IDs for sequence pairs.
222
+
223
+ Returns:
224
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
225
+ """
226
+ if token_ids_1 is None:
227
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
228
+ cls = [self.cls_token_id]
229
+ sep = [self.sep_token_id]
230
+ return cls + token_ids_0 + sep + token_ids_1 + sep
231
+
232
+ def create_token_type_ids_from_sequences(
233
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
234
+ ) -> List[int]:
235
+ """
236
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A DeBERTa
237
+ sequence pair mask has the following format:
238
+
239
+ ```
240
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
241
+ | first sequence | second sequence |
242
+ ```
243
+
244
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
245
+
246
+ Args:
247
+ token_ids_0 (`List[int]`):
248
+ List of IDs.
249
+ token_ids_1 (`List[int]`, *optional*):
250
+ Optional second list of IDs for sequence pairs.
251
+
252
+ Returns:
253
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
254
+ """
255
+ sep = [self.sep_token_id]
256
+ cls = [self.cls_token_id]
257
+
258
+ if token_ids_1 is None:
259
+ return len(cls + token_ids_0 + sep) * [0]
260
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
261
+
262
+ # Copied from transformers.models.gpt2.tokenization_gpt2_fast.GPT2TokenizerFast._batch_encode_plus
263
+ def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding:
264
+ is_split_into_words = kwargs.get("is_split_into_words", False)
265
+ assert self.add_prefix_space or not is_split_into_words, (
266
+ f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
267
+ "to use it with pretokenized inputs."
268
+ )
269
+
270
+ return super()._batch_encode_plus(*args, **kwargs)
271
+
272
+ # Copied from transformers.models.gpt2.tokenization_gpt2_fast.GPT2TokenizerFast._encode_plus
273
+ def _encode_plus(self, *args, **kwargs) -> BatchEncoding:
274
+ is_split_into_words = kwargs.get("is_split_into_words", False)
275
+
276
+ assert self.add_prefix_space or not is_split_into_words, (
277
+ f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
278
+ "to use it with pretokenized inputs."
279
+ )
280
+
281
+ return super()._encode_plus(*args, **kwargs)
282
+
283
+ # Copied from transformers.models.gpt2.tokenization_gpt2_fast.GPT2TokenizerFast.save_vocabulary
284
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
285
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
286
+ return tuple(files)
mgm/lib/python3.10/site-packages/transformers/models/deta/__init__.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
18
+
19
+
20
+ _import_structure = {
21
+ "configuration_deta": ["DETA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DetaConfig"],
22
+ }
23
+
24
+ try:
25
+ if not is_vision_available():
26
+ raise OptionalDependencyNotAvailable()
27
+ except OptionalDependencyNotAvailable:
28
+ pass
29
+ else:
30
+ _import_structure["image_processing_deta"] = ["DetaImageProcessor"]
31
+
32
+ try:
33
+ if not is_torch_available():
34
+ raise OptionalDependencyNotAvailable()
35
+ except OptionalDependencyNotAvailable:
36
+ pass
37
+ else:
38
+ _import_structure["modeling_deta"] = [
39
+ "DETA_PRETRAINED_MODEL_ARCHIVE_LIST",
40
+ "DetaForObjectDetection",
41
+ "DetaModel",
42
+ "DetaPreTrainedModel",
43
+ ]
44
+
45
+
46
+ if TYPE_CHECKING:
47
+ from .configuration_deta import DETA_PRETRAINED_CONFIG_ARCHIVE_MAP, DetaConfig
48
+
49
+ try:
50
+ if not is_vision_available():
51
+ raise OptionalDependencyNotAvailable()
52
+ except OptionalDependencyNotAvailable:
53
+ pass
54
+ else:
55
+ from .image_processing_deta import DetaImageProcessor
56
+
57
+ try:
58
+ if not is_torch_available():
59
+ raise OptionalDependencyNotAvailable()
60
+ except OptionalDependencyNotAvailable:
61
+ pass
62
+ else:
63
+ from .modeling_deta import (
64
+ DETA_PRETRAINED_MODEL_ARCHIVE_LIST,
65
+ DetaForObjectDetection,
66
+ DetaModel,
67
+ DetaPreTrainedModel,
68
+ )
69
+
70
+ else:
71
+ import sys
72
+
73
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
mgm/lib/python3.10/site-packages/transformers/models/deta/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.1 kB). View file
 
mgm/lib/python3.10/site-packages/transformers/models/deta/__pycache__/configuration_deta.cpython-310.pyc ADDED
Binary file (9.7 kB). View file
 
mgm/lib/python3.10/site-packages/transformers/models/deta/__pycache__/convert_deta_resnet_to_pytorch.cpython-310.pyc ADDED
Binary file (10.2 kB). View file
 
mgm/lib/python3.10/site-packages/transformers/models/deta/__pycache__/convert_deta_swin_to_pytorch.cpython-310.pyc ADDED
Binary file (12.1 kB). View file
 
mgm/lib/python3.10/site-packages/transformers/models/deta/__pycache__/image_processing_deta.cpython-310.pyc ADDED
Binary file (37.4 kB). View file
 
mgm/lib/python3.10/site-packages/transformers/models/deta/__pycache__/modeling_deta.cpython-310.pyc ADDED
Binary file (96.5 kB). View file
 
mgm/lib/python3.10/site-packages/transformers/models/deta/configuration_deta.py ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 SenseTime and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ DETA model configuration"""
16
+
17
+
18
+ from ...configuration_utils import PretrainedConfig
19
+ from ...utils import logging
20
+ from ..auto import CONFIG_MAPPING
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+ DETA_PRETRAINED_CONFIG_ARCHIVE_MAP = {
26
+ "ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
27
+ }
28
+
29
+
30
+ class DetaConfig(PretrainedConfig):
31
+ r"""
32
+ This is the configuration class to store the configuration of a [`DetaModel`]. It is used to instantiate a DETA
33
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
34
+ defaults will yield a similar configuration to that of the DETA
35
+ [SenseTime/deformable-detr](https://huggingface.co/SenseTime/deformable-detr) architecture.
36
+
37
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
38
+ documentation from [`PretrainedConfig`] for more information.
39
+
40
+ Args:
41
+ backbone_config (`PretrainedConfig` or `dict`, *optional*, defaults to `ResNetConfig()`):
42
+ The configuration of the backbone model.
43
+ num_queries (`int`, *optional*, defaults to 900):
44
+ Number of object queries, i.e. detection slots. This is the maximal number of objects [`DetaModel`] can
45
+ detect in a single image. In case `two_stage` is set to `True`, we use `two_stage_num_proposals` instead.
46
+ d_model (`int`, *optional*, defaults to 256):
47
+ Dimension of the layers.
48
+ encoder_layers (`int`, *optional*, defaults to 6):
49
+ Number of encoder layers.
50
+ decoder_layers (`int`, *optional*, defaults to 6):
51
+ Number of decoder layers.
52
+ encoder_attention_heads (`int`, *optional*, defaults to 8):
53
+ Number of attention heads for each attention layer in the Transformer encoder.
54
+ decoder_attention_heads (`int`, *optional*, defaults to 8):
55
+ Number of attention heads for each attention layer in the Transformer decoder.
56
+ decoder_ffn_dim (`int`, *optional*, defaults to 2048):
57
+ Dimension of the "intermediate" (often named feed-forward) layer in decoder.
58
+ encoder_ffn_dim (`int`, *optional*, defaults to 2048):
59
+ Dimension of the "intermediate" (often named feed-forward) layer in decoder.
60
+ activation_function (`str` or `function`, *optional*, defaults to `"relu"`):
61
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
62
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
63
+ dropout (`float`, *optional*, defaults to 0.1):
64
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
65
+ attention_dropout (`float`, *optional*, defaults to 0.0):
66
+ The dropout ratio for the attention probabilities.
67
+ activation_dropout (`float`, *optional*, defaults to 0.0):
68
+ The dropout ratio for activations inside the fully connected layer.
69
+ init_std (`float`, *optional*, defaults to 0.02):
70
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
71
+ init_xavier_std (`float`, *optional*, defaults to 1):
72
+ The scaling factor used for the Xavier initialization gain in the HM Attention map module.
73
+ encoder_layerdrop (`float`, *optional*, defaults to 0.0):
74
+ The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
75
+ for more details.
76
+ auxiliary_loss (`bool`, *optional*, defaults to `False`):
77
+ Whether auxiliary decoding losses (loss at each decoder layer) are to be used.
78
+ position_embedding_type (`str`, *optional*, defaults to `"sine"`):
79
+ Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`.
80
+ class_cost (`float`, *optional*, defaults to 1):
81
+ Relative weight of the classification error in the Hungarian matching cost.
82
+ bbox_cost (`float`, *optional*, defaults to 5):
83
+ Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost.
84
+ giou_cost (`float`, *optional*, defaults to 2):
85
+ Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost.
86
+ mask_loss_coefficient (`float`, *optional*, defaults to 1):
87
+ Relative weight of the Focal loss in the panoptic segmentation loss.
88
+ dice_loss_coefficient (`float`, *optional*, defaults to 1):
89
+ Relative weight of the DICE/F-1 loss in the panoptic segmentation loss.
90
+ bbox_loss_coefficient (`float`, *optional*, defaults to 5):
91
+ Relative weight of the L1 bounding box loss in the object detection loss.
92
+ giou_loss_coefficient (`float`, *optional*, defaults to 2):
93
+ Relative weight of the generalized IoU loss in the object detection loss.
94
+ eos_coefficient (`float`, *optional*, defaults to 0.1):
95
+ Relative classification weight of the 'no-object' class in the object detection loss.
96
+ num_feature_levels (`int`, *optional*, defaults to 5):
97
+ The number of input feature levels.
98
+ encoder_n_points (`int`, *optional*, defaults to 4):
99
+ The number of sampled keys in each feature level for each attention head in the encoder.
100
+ decoder_n_points (`int`, *optional*, defaults to 4):
101
+ The number of sampled keys in each feature level for each attention head in the decoder.
102
+ two_stage (`bool`, *optional*, defaults to `True`):
103
+ Whether to apply a two-stage deformable DETR, where the region proposals are also generated by a variant of
104
+ DETA, which are further fed into the decoder for iterative bounding box refinement.
105
+ two_stage_num_proposals (`int`, *optional*, defaults to 300):
106
+ The number of region proposals to be generated, in case `two_stage` is set to `True`.
107
+ with_box_refine (`bool`, *optional*, defaults to `True`):
108
+ Whether to apply iterative bounding box refinement, where each decoder layer refines the bounding boxes
109
+ based on the predictions from the previous layer.
110
+ focal_alpha (`float`, *optional*, defaults to 0.25):
111
+ Alpha parameter in the focal loss.
112
+
113
+ Examples:
114
+
115
+ ```python
116
+ >>> from transformers import DetaConfig, DetaModel
117
+
118
+ >>> # Initializing a DETA SenseTime/deformable-detr style configuration
119
+ >>> configuration = DetaConfig()
120
+
121
+ >>> # Initializing a model (with random weights) from the SenseTime/deformable-detr style configuration
122
+ >>> model = DetaModel(configuration)
123
+
124
+ >>> # Accessing the model configuration
125
+ >>> configuration = model.config
126
+ ```"""
127
+
128
+ model_type = "deta"
129
+ attribute_map = {
130
+ "hidden_size": "d_model",
131
+ "num_attention_heads": "encoder_attention_heads",
132
+ }
133
+
134
+ def __init__(
135
+ self,
136
+ backbone_config=None,
137
+ num_queries=900,
138
+ max_position_embeddings=2048,
139
+ encoder_layers=6,
140
+ encoder_ffn_dim=2048,
141
+ encoder_attention_heads=8,
142
+ decoder_layers=6,
143
+ decoder_ffn_dim=1024,
144
+ decoder_attention_heads=8,
145
+ encoder_layerdrop=0.0,
146
+ is_encoder_decoder=True,
147
+ activation_function="relu",
148
+ d_model=256,
149
+ dropout=0.1,
150
+ attention_dropout=0.0,
151
+ activation_dropout=0.0,
152
+ init_std=0.02,
153
+ init_xavier_std=1.0,
154
+ return_intermediate=True,
155
+ auxiliary_loss=False,
156
+ position_embedding_type="sine",
157
+ num_feature_levels=5,
158
+ encoder_n_points=4,
159
+ decoder_n_points=4,
160
+ two_stage=True,
161
+ two_stage_num_proposals=300,
162
+ with_box_refine=True,
163
+ assign_first_stage=True,
164
+ class_cost=1,
165
+ bbox_cost=5,
166
+ giou_cost=2,
167
+ mask_loss_coefficient=1,
168
+ dice_loss_coefficient=1,
169
+ bbox_loss_coefficient=5,
170
+ giou_loss_coefficient=2,
171
+ eos_coefficient=0.1,
172
+ focal_alpha=0.25,
173
+ **kwargs,
174
+ ):
175
+ if backbone_config is None:
176
+ logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
177
+ backbone_config = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"])
178
+ else:
179
+ if isinstance(backbone_config, dict):
180
+ backbone_model_type = backbone_config.pop("model_type")
181
+ config_class = CONFIG_MAPPING[backbone_model_type]
182
+ backbone_config = config_class.from_dict(backbone_config)
183
+
184
+ self.backbone_config = backbone_config
185
+ self.num_queries = num_queries
186
+ self.max_position_embeddings = max_position_embeddings
187
+ self.d_model = d_model
188
+ self.encoder_ffn_dim = encoder_ffn_dim
189
+ self.encoder_layers = encoder_layers
190
+ self.encoder_attention_heads = encoder_attention_heads
191
+ self.decoder_ffn_dim = decoder_ffn_dim
192
+ self.decoder_layers = decoder_layers
193
+ self.decoder_attention_heads = decoder_attention_heads
194
+ self.dropout = dropout
195
+ self.attention_dropout = attention_dropout
196
+ self.activation_dropout = activation_dropout
197
+ self.activation_function = activation_function
198
+ self.init_std = init_std
199
+ self.init_xavier_std = init_xavier_std
200
+ self.encoder_layerdrop = encoder_layerdrop
201
+ self.auxiliary_loss = auxiliary_loss
202
+ self.position_embedding_type = position_embedding_type
203
+ # deformable attributes
204
+ self.num_feature_levels = num_feature_levels
205
+ self.encoder_n_points = encoder_n_points
206
+ self.decoder_n_points = decoder_n_points
207
+ self.two_stage = two_stage
208
+ self.two_stage_num_proposals = two_stage_num_proposals
209
+ self.with_box_refine = with_box_refine
210
+ self.assign_first_stage = assign_first_stage
211
+ if two_stage is True and with_box_refine is False:
212
+ raise ValueError("If two_stage is True, with_box_refine must be True.")
213
+ # Hungarian matcher
214
+ self.class_cost = class_cost
215
+ self.bbox_cost = bbox_cost
216
+ self.giou_cost = giou_cost
217
+ # Loss coefficients
218
+ self.mask_loss_coefficient = mask_loss_coefficient
219
+ self.dice_loss_coefficient = dice_loss_coefficient
220
+ self.bbox_loss_coefficient = bbox_loss_coefficient
221
+ self.giou_loss_coefficient = giou_loss_coefficient
222
+ self.eos_coefficient = eos_coefficient
223
+ self.focal_alpha = focal_alpha
224
+ super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
225
+
226
+ @property
227
+ def num_attention_heads(self) -> int:
228
+ return self.encoder_attention_heads
229
+
230
+ @property
231
+ def hidden_size(self) -> int:
232
+ return self.d_model
mgm/lib/python3.10/site-packages/transformers/models/deta/convert_deta_resnet_to_pytorch.py ADDED
@@ -0,0 +1,320 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert DETA checkpoints from the original repository.
16
+
17
+ URL: https://github.com/jozhang97/DETA/tree/master"""
18
+
19
+
20
+ import argparse
21
+ import json
22
+ from pathlib import Path
23
+
24
+ import requests
25
+ import torch
26
+ from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
27
+ from PIL import Image
28
+
29
+ from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor
30
+ from transformers.utils import logging
31
+
32
+
33
+ logging.set_verbosity_info()
34
+ logger = logging.get_logger(__name__)
35
+
36
+
37
+ def get_deta_config():
38
+ config = DetaConfig(
39
+ num_queries=900,
40
+ encoder_ffn_dim=2048,
41
+ decoder_ffn_dim=2048,
42
+ num_feature_levels=5,
43
+ assign_first_stage=True,
44
+ with_box_refine=True,
45
+ two_stage=True,
46
+ )
47
+
48
+ # set labels
49
+ config.num_labels = 91
50
+ repo_id = "huggingface/label-files"
51
+ filename = "coco-detection-id2label.json"
52
+ id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename, repo_type="dataset")), "r"))
53
+ id2label = {int(k): v for k, v in id2label.items()}
54
+ config.id2label = id2label
55
+ config.label2id = {v: k for k, v in id2label.items()}
56
+
57
+ return config
58
+
59
+
60
+ # here we list all keys to be renamed (original name on the left, our name on the right)
61
+ def create_rename_keys(config):
62
+ rename_keys = []
63
+
64
+ # stem
65
+ # fmt: off
66
+ rename_keys.append(("backbone.0.body.conv1.weight", "model.backbone.model.embedder.embedder.convolution.weight"))
67
+ rename_keys.append(("backbone.0.body.bn1.weight", "model.backbone.model.embedder.embedder.normalization.weight"))
68
+ rename_keys.append(("backbone.0.body.bn1.bias", "model.backbone.model.embedder.embedder.normalization.bias"))
69
+ rename_keys.append(("backbone.0.body.bn1.running_mean", "model.backbone.model.embedder.embedder.normalization.running_mean"))
70
+ rename_keys.append(("backbone.0.body.bn1.running_var", "model.backbone.model.embedder.embedder.normalization.running_var"))
71
+ # stages
72
+ for stage_idx in range(len(config.backbone_config.depths)):
73
+ for layer_idx in range(config.backbone_config.depths[stage_idx]):
74
+ # shortcut
75
+ if layer_idx == 0:
76
+ rename_keys.append(
77
+ (
78
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight",
79
+ f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight",
80
+ )
81
+ )
82
+ rename_keys.append(
83
+ (
84
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight",
85
+ f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight",
86
+ )
87
+ )
88
+ rename_keys.append(
89
+ (
90
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias",
91
+ f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias",
92
+ )
93
+ )
94
+ rename_keys.append(
95
+ (
96
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean",
97
+ f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean",
98
+ )
99
+ )
100
+ rename_keys.append(
101
+ (
102
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var",
103
+ f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var",
104
+ )
105
+ )
106
+ # 3 convs
107
+ for i in range(3):
108
+ rename_keys.append(
109
+ (
110
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight",
111
+ f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight",
112
+ )
113
+ )
114
+ rename_keys.append(
115
+ (
116
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight",
117
+ f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight",
118
+ )
119
+ )
120
+ rename_keys.append(
121
+ (
122
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias",
123
+ f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias",
124
+ )
125
+ )
126
+ rename_keys.append(
127
+ (
128
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean",
129
+ f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean",
130
+ )
131
+ )
132
+ rename_keys.append(
133
+ (
134
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var",
135
+ f"model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var",
136
+ )
137
+ )
138
+ # transformer encoder
139
+ for i in range(config.encoder_layers):
140
+ rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight", f"model.encoder.layers.{i}.self_attn.sampling_offsets.weight"))
141
+ rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias", f"model.encoder.layers.{i}.self_attn.sampling_offsets.bias"))
142
+ rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.attention_weights.weight", f"model.encoder.layers.{i}.self_attn.attention_weights.weight"))
143
+ rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.attention_weights.bias", f"model.encoder.layers.{i}.self_attn.attention_weights.bias"))
144
+ rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.value_proj.weight", f"model.encoder.layers.{i}.self_attn.value_proj.weight"))
145
+ rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.value_proj.bias", f"model.encoder.layers.{i}.self_attn.value_proj.bias"))
146
+ rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.output_proj.weight", f"model.encoder.layers.{i}.self_attn.output_proj.weight"))
147
+ rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.output_proj.bias", f"model.encoder.layers.{i}.self_attn.output_proj.bias"))
148
+ rename_keys.append((f"transformer.encoder.layers.{i}.norm1.weight", f"model.encoder.layers.{i}.self_attn_layer_norm.weight"))
149
+ rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"model.encoder.layers.{i}.self_attn_layer_norm.bias"))
150
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"model.encoder.layers.{i}.fc1.weight"))
151
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"model.encoder.layers.{i}.fc1.bias"))
152
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"model.encoder.layers.{i}.fc2.weight"))
153
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"model.encoder.layers.{i}.fc2.bias"))
154
+ rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"model.encoder.layers.{i}.final_layer_norm.weight"))
155
+ rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"model.encoder.layers.{i}.final_layer_norm.bias"))
156
+
157
+ # transformer decoder
158
+ for i in range(config.decoder_layers):
159
+ rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight", f"model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight"))
160
+ rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias", f"model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias"))
161
+ rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.attention_weights.weight", f"model.decoder.layers.{i}.encoder_attn.attention_weights.weight"))
162
+ rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.attention_weights.bias", f"model.decoder.layers.{i}.encoder_attn.attention_weights.bias"))
163
+ rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.value_proj.weight", f"model.decoder.layers.{i}.encoder_attn.value_proj.weight"))
164
+ rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.value_proj.bias", f"model.decoder.layers.{i}.encoder_attn.value_proj.bias"))
165
+ rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.output_proj.weight", f"model.decoder.layers.{i}.encoder_attn.output_proj.weight"))
166
+ rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.output_proj.bias", f"model.decoder.layers.{i}.encoder_attn.output_proj.bias"))
167
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm1.weight", f"model.decoder.layers.{i}.encoder_attn_layer_norm.weight"))
168
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"model.decoder.layers.{i}.encoder_attn_layer_norm.bias"))
169
+ rename_keys.append((f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"model.decoder.layers.{i}.self_attn.out_proj.weight"))
170
+ rename_keys.append((f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"model.decoder.layers.{i}.self_attn.out_proj.bias"))
171
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm2.weight", f"model.decoder.layers.{i}.self_attn_layer_norm.weight"))
172
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm2.bias", f"model.decoder.layers.{i}.self_attn_layer_norm.bias"))
173
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"model.decoder.layers.{i}.fc1.weight"))
174
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"model.decoder.layers.{i}.fc1.bias"))
175
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"model.decoder.layers.{i}.fc2.weight"))
176
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"model.decoder.layers.{i}.fc2.bias"))
177
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"model.decoder.layers.{i}.final_layer_norm.weight"))
178
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"model.decoder.layers.{i}.final_layer_norm.bias"))
179
+
180
+ # fmt: on
181
+
182
+ return rename_keys
183
+
184
+
185
+ def rename_key(dct, old, new):
186
+ val = dct.pop(old)
187
+ dct[new] = val
188
+
189
+
190
+ def read_in_decoder_q_k_v(state_dict, config):
191
+ # transformer decoder self-attention layers
192
+ hidden_size = config.d_model
193
+ for i in range(config.decoder_layers):
194
+ # read in weights + bias of input projection layer of self-attention
195
+ in_proj_weight = state_dict.pop(f"transformer.decoder.layers.{i}.self_attn.in_proj_weight")
196
+ in_proj_bias = state_dict.pop(f"transformer.decoder.layers.{i}.self_attn.in_proj_bias")
197
+ # next, add query, keys and values (in that order) to the state dict
198
+ state_dict[f"model.decoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:hidden_size, :]
199
+ state_dict[f"model.decoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:hidden_size]
200
+ state_dict[f"model.decoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[
201
+ hidden_size : hidden_size * 2, :
202
+ ]
203
+ state_dict[f"model.decoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[hidden_size : hidden_size * 2]
204
+ state_dict[f"model.decoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-hidden_size:, :]
205
+ state_dict[f"model.decoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-hidden_size:]
206
+
207
+
208
+ # We will verify our results on an image of cute cats
209
+ def prepare_img():
210
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
211
+ im = Image.open(requests.get(url, stream=True).raw)
212
+
213
+ return im
214
+
215
+
216
+ @torch.no_grad()
217
+ def convert_deta_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub):
218
+ """
219
+ Copy/paste/tweak model's weights to our DETA structure.
220
+ """
221
+
222
+ # load config
223
+ config = get_deta_config()
224
+
225
+ # load original state dict
226
+ if model_name == "deta-resnet-50":
227
+ filename = "adet_checkpoint0011.pth"
228
+ elif model_name == "deta-resnet-50-24-epochs":
229
+ filename = "adet_2x_checkpoint0023.pth"
230
+ else:
231
+ raise ValueError(f"Model name {model_name} not supported")
232
+ checkpoint_path = hf_hub_download(repo_id="nielsr/deta-checkpoints", filename=filename)
233
+ state_dict = torch.load(checkpoint_path, map_location="cpu")["model"]
234
+
235
+ # rename keys
236
+ rename_keys = create_rename_keys(config)
237
+ for src, dest in rename_keys:
238
+ rename_key(state_dict, src, dest)
239
+ read_in_decoder_q_k_v(state_dict, config)
240
+
241
+ # fix some prefixes
242
+ for key in state_dict.copy().keys():
243
+ if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
244
+ val = state_dict.pop(key)
245
+ state_dict[key.replace("transformer.decoder", "model.decoder")] = val
246
+ if "input_proj" in key:
247
+ val = state_dict.pop(key)
248
+ state_dict["model." + key] = val
249
+ if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
250
+ val = state_dict.pop(key)
251
+ state_dict[key.replace("transformer", "model")] = val
252
+
253
+ # finally, create HuggingFace model and load state dict
254
+ model = DetaForObjectDetection(config)
255
+ model.load_state_dict(state_dict)
256
+ model.eval()
257
+
258
+ device = "cuda" if torch.cuda.is_available() else "cpu"
259
+ model.to(device)
260
+
261
+ # load image processor
262
+ processor = DetaImageProcessor(format="coco_detection")
263
+
264
+ # verify our conversion on image
265
+ img = prepare_img()
266
+ encoding = processor(images=img, return_tensors="pt")
267
+ pixel_values = encoding["pixel_values"]
268
+ outputs = model(pixel_values.to(device))
269
+
270
+ # verify logits
271
+ if model_name == "deta-resnet-50":
272
+ expected_logits = torch.tensor(
273
+ [[-7.3978, -2.5406, -4.1668], [-8.2684, -3.9933, -3.8096], [-7.0515, -3.7973, -5.8516]]
274
+ )
275
+ expected_boxes = torch.tensor([[0.5043, 0.4973, 0.9998], [0.2542, 0.5489, 0.4748], [0.5490, 0.2765, 0.0570]])
276
+ elif model_name == "deta-resnet-50-24-epochs":
277
+ expected_logits = torch.tensor(
278
+ [[-7.1688, -2.4857, -4.8669], [-7.8630, -3.8154, -4.2674], [-7.2730, -4.1865, -5.5323]]
279
+ )
280
+ expected_boxes = torch.tensor([[0.5021, 0.4971, 0.9994], [0.2546, 0.5486, 0.4731], [0.1686, 0.1986, 0.2142]])
281
+
282
+ assert torch.allclose(outputs.logits[0, :3, :3], expected_logits.to(device), atol=1e-4)
283
+ assert torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes.to(device), atol=1e-4)
284
+ print("Everything ok!")
285
+
286
+ if pytorch_dump_folder_path:
287
+ # Save model and processor
288
+ logger.info(f"Saving PyTorch model and processor to {pytorch_dump_folder_path}...")
289
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
290
+ model.save_pretrained(pytorch_dump_folder_path)
291
+ processor.save_pretrained(pytorch_dump_folder_path)
292
+
293
+ # Push to hub
294
+ if push_to_hub:
295
+ print("Pushing model and processor to hub...")
296
+ model.push_to_hub(f"jozhang97/{model_name}")
297
+ processor.push_to_hub(f"jozhang97/{model_name}")
298
+
299
+
300
+ if __name__ == "__main__":
301
+ parser = argparse.ArgumentParser()
302
+
303
+ parser.add_argument(
304
+ "--model_name",
305
+ type=str,
306
+ default="deta-resnet-50",
307
+ choices=["deta-resnet-50", "deta-resnet-50-24-epochs"],
308
+ help="Name of the model you'd like to convert.",
309
+ )
310
+ parser.add_argument(
311
+ "--pytorch_dump_folder_path",
312
+ default=None,
313
+ type=str,
314
+ help="Path to the folder to output PyTorch model.",
315
+ )
316
+ parser.add_argument(
317
+ "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
318
+ )
319
+ args = parser.parse_args()
320
+ convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
mgm/lib/python3.10/site-packages/transformers/models/deta/convert_deta_swin_to_pytorch.py ADDED
@@ -0,0 +1,327 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert DETA checkpoints from the original repository.
16
+
17
+ URL: https://github.com/jozhang97/DETA/tree/master"""
18
+
19
+
20
+ import argparse
21
+ import json
22
+ from pathlib import Path
23
+
24
+ import requests
25
+ import torch
26
+ from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
27
+ from PIL import Image
28
+
29
+ from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
30
+ from transformers.utils import logging
31
+
32
+
33
+ logging.set_verbosity_info()
34
+ logger = logging.get_logger(__name__)
35
+
36
+
37
+ def get_deta_config(model_name):
38
+ backbone_config = SwinConfig(
39
+ embed_dim=192,
40
+ depths=(2, 2, 18, 2),
41
+ num_heads=(6, 12, 24, 48),
42
+ window_size=12,
43
+ out_features=["stage2", "stage3", "stage4"],
44
+ )
45
+
46
+ config = DetaConfig(
47
+ backbone_config=backbone_config,
48
+ num_queries=900,
49
+ encoder_ffn_dim=2048,
50
+ decoder_ffn_dim=2048,
51
+ num_feature_levels=5,
52
+ assign_first_stage=True,
53
+ with_box_refine=True,
54
+ two_stage=True,
55
+ )
56
+
57
+ # set labels
58
+ repo_id = "huggingface/label-files"
59
+ if "o365" in model_name:
60
+ num_labels = 366
61
+ filename = "object365-id2label.json"
62
+ else:
63
+ num_labels = 91
64
+ filename = "coco-detection-id2label.json"
65
+
66
+ config.num_labels = num_labels
67
+ id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename, repo_type="dataset")), "r"))
68
+ id2label = {int(k): v for k, v in id2label.items()}
69
+ config.id2label = id2label
70
+ config.label2id = {v: k for k, v in id2label.items()}
71
+
72
+ return config
73
+
74
+
75
+ # here we list all keys to be renamed (original name on the left, our name on the right)
76
+ def create_rename_keys(config):
77
+ rename_keys = []
78
+
79
+ # stem
80
+ # fmt: off
81
+ rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight"))
82
+ rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias"))
83
+ rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight"))
84
+ rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias"))
85
+ # stages
86
+ for i in range(len(config.backbone_config.depths)):
87
+ for j in range(config.backbone_config.depths[i]):
88
+ rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm1.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight"))
89
+ rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm1.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias"))
90
+ rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table"))
91
+ rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index"))
92
+ rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight"))
93
+ rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias"))
94
+ rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm2.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight"))
95
+ rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm2.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias"))
96
+ rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight"))
97
+ rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias"))
98
+ rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight"))
99
+ rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias"))
100
+
101
+ if i < 3:
102
+ rename_keys.append((f"backbone.0.body.layers.{i}.downsample.reduction.weight", f"model.backbone.model.encoder.layers.{i}.downsample.reduction.weight"))
103
+ rename_keys.append((f"backbone.0.body.layers.{i}.downsample.norm.weight", f"model.backbone.model.encoder.layers.{i}.downsample.norm.weight"))
104
+ rename_keys.append((f"backbone.0.body.layers.{i}.downsample.norm.bias", f"model.backbone.model.encoder.layers.{i}.downsample.norm.bias"))
105
+
106
+ rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight"))
107
+ rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias"))
108
+ rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight"))
109
+ rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias"))
110
+ rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight"))
111
+ rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias"))
112
+
113
+ # transformer encoder
114
+ for i in range(config.encoder_layers):
115
+ rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight", f"model.encoder.layers.{i}.self_attn.sampling_offsets.weight"))
116
+ rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias", f"model.encoder.layers.{i}.self_attn.sampling_offsets.bias"))
117
+ rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.attention_weights.weight", f"model.encoder.layers.{i}.self_attn.attention_weights.weight"))
118
+ rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.attention_weights.bias", f"model.encoder.layers.{i}.self_attn.attention_weights.bias"))
119
+ rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.value_proj.weight", f"model.encoder.layers.{i}.self_attn.value_proj.weight"))
120
+ rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.value_proj.bias", f"model.encoder.layers.{i}.self_attn.value_proj.bias"))
121
+ rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.output_proj.weight", f"model.encoder.layers.{i}.self_attn.output_proj.weight"))
122
+ rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.output_proj.bias", f"model.encoder.layers.{i}.self_attn.output_proj.bias"))
123
+ rename_keys.append((f"transformer.encoder.layers.{i}.norm1.weight", f"model.encoder.layers.{i}.self_attn_layer_norm.weight"))
124
+ rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"model.encoder.layers.{i}.self_attn_layer_norm.bias"))
125
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"model.encoder.layers.{i}.fc1.weight"))
126
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"model.encoder.layers.{i}.fc1.bias"))
127
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"model.encoder.layers.{i}.fc2.weight"))
128
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"model.encoder.layers.{i}.fc2.bias"))
129
+ rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"model.encoder.layers.{i}.final_layer_norm.weight"))
130
+ rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"model.encoder.layers.{i}.final_layer_norm.bias"))
131
+
132
+ # transformer decoder
133
+ for i in range(config.decoder_layers):
134
+ rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight", f"model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight"))
135
+ rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias", f"model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias"))
136
+ rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.attention_weights.weight", f"model.decoder.layers.{i}.encoder_attn.attention_weights.weight"))
137
+ rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.attention_weights.bias", f"model.decoder.layers.{i}.encoder_attn.attention_weights.bias"))
138
+ rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.value_proj.weight", f"model.decoder.layers.{i}.encoder_attn.value_proj.weight"))
139
+ rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.value_proj.bias", f"model.decoder.layers.{i}.encoder_attn.value_proj.bias"))
140
+ rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.output_proj.weight", f"model.decoder.layers.{i}.encoder_attn.output_proj.weight"))
141
+ rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.output_proj.bias", f"model.decoder.layers.{i}.encoder_attn.output_proj.bias"))
142
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm1.weight", f"model.decoder.layers.{i}.encoder_attn_layer_norm.weight"))
143
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"model.decoder.layers.{i}.encoder_attn_layer_norm.bias"))
144
+ rename_keys.append((f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"model.decoder.layers.{i}.self_attn.out_proj.weight"))
145
+ rename_keys.append((f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"model.decoder.layers.{i}.self_attn.out_proj.bias"))
146
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm2.weight", f"model.decoder.layers.{i}.self_attn_layer_norm.weight"))
147
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm2.bias", f"model.decoder.layers.{i}.self_attn_layer_norm.bias"))
148
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"model.decoder.layers.{i}.fc1.weight"))
149
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"model.decoder.layers.{i}.fc1.bias"))
150
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"model.decoder.layers.{i}.fc2.weight"))
151
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"model.decoder.layers.{i}.fc2.bias"))
152
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"model.decoder.layers.{i}.final_layer_norm.weight"))
153
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"model.decoder.layers.{i}.final_layer_norm.bias"))
154
+
155
+ # fmt: on
156
+
157
+ return rename_keys
158
+
159
+
160
+ def rename_key(dct, old, new):
161
+ val = dct.pop(old)
162
+ dct[new] = val
163
+
164
+
165
+ # we split up the matrix of each encoder layer into queries, keys and values
166
+ def read_in_swin_q_k_v(state_dict, backbone_config):
167
+ num_features = [int(backbone_config.embed_dim * 2**i) for i in range(len(backbone_config.depths))]
168
+ for i in range(len(backbone_config.depths)):
169
+ dim = num_features[i]
170
+ for j in range(backbone_config.depths[i]):
171
+ # fmt: off
172
+ # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
173
+ in_proj_weight = state_dict.pop(f"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight")
174
+ in_proj_bias = state_dict.pop(f"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias")
175
+ # next, add query, keys and values (in that order) to the state dict
176
+ state_dict[f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.query.weight"] = in_proj_weight[:dim, :]
177
+ state_dict[f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.query.bias"] = in_proj_bias[: dim]
178
+ state_dict[f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.key.weight"] = in_proj_weight[
179
+ dim : dim * 2, :
180
+ ]
181
+ state_dict[f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.key.bias"] = in_proj_bias[
182
+ dim : dim * 2
183
+ ]
184
+ state_dict[f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.value.weight"] = in_proj_weight[
185
+ -dim :, :
186
+ ]
187
+ state_dict[f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.value.bias"] = in_proj_bias[-dim :]
188
+ # fmt: on
189
+
190
+
191
+ def read_in_decoder_q_k_v(state_dict, config):
192
+ # transformer decoder self-attention layers
193
+ hidden_size = config.d_model
194
+ for i in range(config.decoder_layers):
195
+ # read in weights + bias of input projection layer of self-attention
196
+ in_proj_weight = state_dict.pop(f"transformer.decoder.layers.{i}.self_attn.in_proj_weight")
197
+ in_proj_bias = state_dict.pop(f"transformer.decoder.layers.{i}.self_attn.in_proj_bias")
198
+ # next, add query, keys and values (in that order) to the state dict
199
+ state_dict[f"model.decoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:hidden_size, :]
200
+ state_dict[f"model.decoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:hidden_size]
201
+ state_dict[f"model.decoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[
202
+ hidden_size : hidden_size * 2, :
203
+ ]
204
+ state_dict[f"model.decoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[hidden_size : hidden_size * 2]
205
+ state_dict[f"model.decoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-hidden_size:, :]
206
+ state_dict[f"model.decoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-hidden_size:]
207
+
208
+
209
+ # We will verify our results on an image of cute cats
210
+ def prepare_img():
211
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
212
+ im = Image.open(requests.get(url, stream=True).raw)
213
+
214
+ return im
215
+
216
+
217
+ @torch.no_grad()
218
+ def convert_deta_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub):
219
+ """
220
+ Copy/paste/tweak model's weights to our DETA structure.
221
+ """
222
+
223
+ # load config
224
+ config = get_deta_config(model_name)
225
+
226
+ # load original state dict
227
+ if model_name == "deta-swin-large":
228
+ checkpoint_path = hf_hub_download(repo_id="nielsr/deta-checkpoints", filename="adet_swin_ft.pth")
229
+ elif model_name == "deta-swin-large-o365":
230
+ checkpoint_path = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365", filename="deta_swin_pt_o365.pth")
231
+ else:
232
+ raise ValueError(f"Model name {model_name} not supported")
233
+
234
+ state_dict = torch.load(checkpoint_path, map_location="cpu")["model"]
235
+
236
+ # original state dict
237
+ for name, param in state_dict.items():
238
+ print(name, param.shape)
239
+
240
+ # rename keys
241
+ rename_keys = create_rename_keys(config)
242
+ for src, dest in rename_keys:
243
+ rename_key(state_dict, src, dest)
244
+ read_in_swin_q_k_v(state_dict, config.backbone_config)
245
+ read_in_decoder_q_k_v(state_dict, config)
246
+
247
+ # fix some prefixes
248
+ for key in state_dict.copy().keys():
249
+ if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
250
+ val = state_dict.pop(key)
251
+ state_dict[key.replace("transformer.decoder", "model.decoder")] = val
252
+ if "input_proj" in key:
253
+ val = state_dict.pop(key)
254
+ state_dict["model." + key] = val
255
+ if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
256
+ val = state_dict.pop(key)
257
+ state_dict[key.replace("transformer", "model")] = val
258
+
259
+ # finally, create HuggingFace model and load state dict
260
+ model = DetaForObjectDetection(config)
261
+ model.load_state_dict(state_dict)
262
+ model.eval()
263
+
264
+ device = "cuda" if torch.cuda.is_available() else "cpu"
265
+ model.to(device)
266
+
267
+ # load image processor
268
+ processor = DetaImageProcessor(format="coco_detection")
269
+
270
+ # verify our conversion on image
271
+ img = prepare_img()
272
+ encoding = processor(images=img, return_tensors="pt")
273
+ pixel_values = encoding["pixel_values"]
274
+ outputs = model(pixel_values.to(device))
275
+
276
+ # verify logits
277
+ print("Logits:", outputs.logits[0, :3, :3])
278
+ print("Boxes:", outputs.pred_boxes[0, :3, :3])
279
+ if model_name == "deta-swin-large":
280
+ expected_logits = torch.tensor(
281
+ [[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]]
282
+ )
283
+ expected_boxes = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]])
284
+ elif model_name == "deta-swin-large-o365":
285
+ expected_logits = torch.tensor(
286
+ [[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]]
287
+ )
288
+ expected_boxes = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]])
289
+ assert torch.allclose(outputs.logits[0, :3, :3], expected_logits.to(device), atol=1e-4)
290
+ assert torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes.to(device), atol=1e-4)
291
+ print("Everything ok!")
292
+
293
+ if pytorch_dump_folder_path:
294
+ # Save model and processor
295
+ logger.info(f"Saving PyTorch model and processor to {pytorch_dump_folder_path}...")
296
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
297
+ model.save_pretrained(pytorch_dump_folder_path)
298
+ processor.save_pretrained(pytorch_dump_folder_path)
299
+
300
+ # Push to hub
301
+ if push_to_hub:
302
+ print("Pushing model and processor to hub...")
303
+ model.push_to_hub(f"jozhang97/{model_name}")
304
+ processor.push_to_hub(f"jozhang97/{model_name}")
305
+
306
+
307
+ if __name__ == "__main__":
308
+ parser = argparse.ArgumentParser()
309
+
310
+ parser.add_argument(
311
+ "--model_name",
312
+ type=str,
313
+ default="deta-swin-large",
314
+ choices=["deta-swin-large", "deta-swin-large-o365"],
315
+ help="Name of the model you'd like to convert.",
316
+ )
317
+ parser.add_argument(
318
+ "--pytorch_dump_folder_path",
319
+ default=None,
320
+ type=str,
321
+ help="Path to the folder to output PyTorch model.",
322
+ )
323
+ parser.add_argument(
324
+ "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
325
+ )
326
+ args = parser.parse_args()
327
+ convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
mgm/lib/python3.10/site-packages/transformers/models/deta/image_processing_deta.py ADDED
@@ -0,0 +1,1095 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for Deformable DETR."""
16
+
17
+ import pathlib
18
+ from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union
19
+
20
+ import numpy as np
21
+
22
+ from ...feature_extraction_utils import BatchFeature
23
+ from ...image_processing_utils import BaseImageProcessor, get_size_dict
24
+ from ...image_transforms import (
25
+ PaddingMode,
26
+ center_to_corners_format,
27
+ corners_to_center_format,
28
+ pad,
29
+ rescale,
30
+ resize,
31
+ rgb_to_id,
32
+ to_channel_dimension_format,
33
+ )
34
+ from ...image_utils import (
35
+ IMAGENET_DEFAULT_MEAN,
36
+ IMAGENET_DEFAULT_STD,
37
+ ChannelDimension,
38
+ ImageInput,
39
+ PILImageResampling,
40
+ get_image_size,
41
+ infer_channel_dimension_format,
42
+ is_batched,
43
+ is_scaled_image,
44
+ to_numpy_array,
45
+ valid_coco_detection_annotations,
46
+ valid_coco_panoptic_annotations,
47
+ valid_images,
48
+ )
49
+ from ...utils import (
50
+ is_flax_available,
51
+ is_jax_tensor,
52
+ is_tf_available,
53
+ is_tf_tensor,
54
+ is_torch_available,
55
+ is_torch_tensor,
56
+ is_torchvision_available,
57
+ is_vision_available,
58
+ logging,
59
+ )
60
+ from ...utils.generic import ExplicitEnum, TensorType
61
+
62
+
63
+ if is_torch_available():
64
+ import torch
65
+
66
+
67
+ if is_torchvision_available():
68
+ from torchvision.ops.boxes import batched_nms
69
+
70
+ if is_vision_available():
71
+ import PIL
72
+
73
+
74
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
75
+
76
+
77
+ class AnnotionFormat(ExplicitEnum):
78
+ COCO_DETECTION = "coco_detection"
79
+ COCO_PANOPTIC = "coco_panoptic"
80
+
81
+
82
+ SUPPORTED_ANNOTATION_FORMATS = (AnnotionFormat.COCO_DETECTION, AnnotionFormat.COCO_PANOPTIC)
83
+
84
+
85
+ # Copied from transformers.models.detr.image_processing_detr.get_size_with_aspect_ratio
86
+ def get_size_with_aspect_ratio(image_size, size, max_size=None) -> Tuple[int, int]:
87
+ """
88
+ Computes the output image size given the input image size and the desired output size.
89
+
90
+ Args:
91
+ image_size (`Tuple[int, int]`):
92
+ The input image size.
93
+ size (`int`):
94
+ The desired output size.
95
+ max_size (`int`, *optional*):
96
+ The maximum allowed output size.
97
+ """
98
+ height, width = image_size
99
+ if max_size is not None:
100
+ min_original_size = float(min((height, width)))
101
+ max_original_size = float(max((height, width)))
102
+ if max_original_size / min_original_size * size > max_size:
103
+ size = int(round(max_size * min_original_size / max_original_size))
104
+
105
+ if (height <= width and height == size) or (width <= height and width == size):
106
+ return height, width
107
+
108
+ if width < height:
109
+ ow = size
110
+ oh = int(size * height / width)
111
+ else:
112
+ oh = size
113
+ ow = int(size * width / height)
114
+ return (oh, ow)
115
+
116
+
117
+ # Copied from transformers.models.detr.image_processing_detr.get_resize_output_image_size
118
+ def get_resize_output_image_size(
119
+ input_image: np.ndarray,
120
+ size: Union[int, Tuple[int, int], List[int]],
121
+ max_size: Optional[int] = None,
122
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
123
+ ) -> Tuple[int, int]:
124
+ """
125
+ Computes the output image size given the input image size and the desired output size. If the desired output size
126
+ is a tuple or list, the output image size is returned as is. If the desired output size is an integer, the output
127
+ image size is computed by keeping the aspect ratio of the input image size.
128
+
129
+ Args:
130
+ input_image (`np.ndarray`):
131
+ The image to resize.
132
+ size (`int` or `Tuple[int, int]` or `List[int]`):
133
+ The desired output size.
134
+ max_size (`int`, *optional*):
135
+ The maximum allowed output size.
136
+ input_data_format (`ChannelDimension` or `str`, *optional*):
137
+ The channel dimension format of the input image. If not provided, it will be inferred from the input image.
138
+ """
139
+ image_size = get_image_size(input_image, input_data_format)
140
+ if isinstance(size, (list, tuple)):
141
+ return size
142
+
143
+ return get_size_with_aspect_ratio(image_size, size, max_size)
144
+
145
+
146
+ # Copied from transformers.models.detr.image_processing_detr.get_numpy_to_framework_fn
147
+ def get_numpy_to_framework_fn(arr) -> Callable:
148
+ """
149
+ Returns a function that converts a numpy array to the framework of the input array.
150
+
151
+ Args:
152
+ arr (`np.ndarray`): The array to convert.
153
+ """
154
+ if isinstance(arr, np.ndarray):
155
+ return np.array
156
+ if is_tf_available() and is_tf_tensor(arr):
157
+ import tensorflow as tf
158
+
159
+ return tf.convert_to_tensor
160
+ if is_torch_available() and is_torch_tensor(arr):
161
+ import torch
162
+
163
+ return torch.tensor
164
+ if is_flax_available() and is_jax_tensor(arr):
165
+ import jax.numpy as jnp
166
+
167
+ return jnp.array
168
+ raise ValueError(f"Cannot convert arrays of type {type(arr)}")
169
+
170
+
171
+ # Copied from transformers.models.detr.image_processing_detr.safe_squeeze
172
+ def safe_squeeze(arr: np.ndarray, axis: Optional[int] = None) -> np.ndarray:
173
+ """
174
+ Squeezes an array, but only if the axis specified has dim 1.
175
+ """
176
+ if axis is None:
177
+ return arr.squeeze()
178
+
179
+ try:
180
+ return arr.squeeze(axis=axis)
181
+ except ValueError:
182
+ return arr
183
+
184
+
185
+ # Copied from transformers.models.detr.image_processing_detr.normalize_annotation
186
+ def normalize_annotation(annotation: Dict, image_size: Tuple[int, int]) -> Dict:
187
+ image_height, image_width = image_size
188
+ norm_annotation = {}
189
+ for key, value in annotation.items():
190
+ if key == "boxes":
191
+ boxes = value
192
+ boxes = corners_to_center_format(boxes)
193
+ boxes /= np.asarray([image_width, image_height, image_width, image_height], dtype=np.float32)
194
+ norm_annotation[key] = boxes
195
+ else:
196
+ norm_annotation[key] = value
197
+ return norm_annotation
198
+
199
+
200
+ # Copied from transformers.models.detr.image_processing_detr.max_across_indices
201
+ def max_across_indices(values: Iterable[Any]) -> List[Any]:
202
+ """
203
+ Return the maximum value across all indices of an iterable of values.
204
+ """
205
+ return [max(values_i) for values_i in zip(*values)]
206
+
207
+
208
+ # Copied from transformers.models.detr.image_processing_detr.get_max_height_width
209
+ def get_max_height_width(
210
+ images: List[np.ndarray], input_data_format: Optional[Union[str, ChannelDimension]] = None
211
+ ) -> List[int]:
212
+ """
213
+ Get the maximum height and width across all images in a batch.
214
+ """
215
+ if input_data_format is None:
216
+ input_data_format = infer_channel_dimension_format(images[0])
217
+
218
+ if input_data_format == ChannelDimension.FIRST:
219
+ _, max_height, max_width = max_across_indices([img.shape for img in images])
220
+ elif input_data_format == ChannelDimension.LAST:
221
+ max_height, max_width, _ = max_across_indices([img.shape for img in images])
222
+ else:
223
+ raise ValueError(f"Invalid channel dimension format: {input_data_format}")
224
+ return (max_height, max_width)
225
+
226
+
227
+ # Copied from transformers.models.detr.image_processing_detr.make_pixel_mask
228
+ def make_pixel_mask(
229
+ image: np.ndarray, output_size: Tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]] = None
230
+ ) -> np.ndarray:
231
+ """
232
+ Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding.
233
+
234
+ Args:
235
+ image (`np.ndarray`):
236
+ Image to make the pixel mask for.
237
+ output_size (`Tuple[int, int]`):
238
+ Output size of the mask.
239
+ """
240
+ input_height, input_width = get_image_size(image, channel_dim=input_data_format)
241
+ mask = np.zeros(output_size, dtype=np.int64)
242
+ mask[:input_height, :input_width] = 1
243
+ return mask
244
+
245
+
246
+ # Copied from transformers.models.detr.image_processing_detr.convert_coco_poly_to_mask
247
+ def convert_coco_poly_to_mask(segmentations, height: int, width: int) -> np.ndarray:
248
+ """
249
+ Convert a COCO polygon annotation to a mask.
250
+
251
+ Args:
252
+ segmentations (`List[List[float]]`):
253
+ List of polygons, each polygon represented by a list of x-y coordinates.
254
+ height (`int`):
255
+ Height of the mask.
256
+ width (`int`):
257
+ Width of the mask.
258
+ """
259
+ try:
260
+ from pycocotools import mask as coco_mask
261
+ except ImportError:
262
+ raise ImportError("Pycocotools is not installed in your environment.")
263
+
264
+ masks = []
265
+ for polygons in segmentations:
266
+ rles = coco_mask.frPyObjects(polygons, height, width)
267
+ mask = coco_mask.decode(rles)
268
+ if len(mask.shape) < 3:
269
+ mask = mask[..., None]
270
+ mask = np.asarray(mask, dtype=np.uint8)
271
+ mask = np.any(mask, axis=2)
272
+ masks.append(mask)
273
+ if masks:
274
+ masks = np.stack(masks, axis=0)
275
+ else:
276
+ masks = np.zeros((0, height, width), dtype=np.uint8)
277
+
278
+ return masks
279
+
280
+
281
+ # Copied from transformers.models.detr.image_processing_detr.prepare_coco_detection_annotation with DETR->DETA
282
+ def prepare_coco_detection_annotation(
283
+ image,
284
+ target,
285
+ return_segmentation_masks: bool = False,
286
+ input_data_format: Optional[Union[ChannelDimension, str]] = None,
287
+ ):
288
+ """
289
+ Convert the target in COCO format into the format expected by DETA.
290
+ """
291
+ image_height, image_width = get_image_size(image, channel_dim=input_data_format)
292
+
293
+ image_id = target["image_id"]
294
+ image_id = np.asarray([image_id], dtype=np.int64)
295
+
296
+ # Get all COCO annotations for the given image.
297
+ annotations = target["annotations"]
298
+ annotations = [obj for obj in annotations if "iscrowd" not in obj or obj["iscrowd"] == 0]
299
+
300
+ classes = [obj["category_id"] for obj in annotations]
301
+ classes = np.asarray(classes, dtype=np.int64)
302
+
303
+ # for conversion to coco api
304
+ area = np.asarray([obj["area"] for obj in annotations], dtype=np.float32)
305
+ iscrowd = np.asarray([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in annotations], dtype=np.int64)
306
+
307
+ boxes = [obj["bbox"] for obj in annotations]
308
+ # guard against no boxes via resizing
309
+ boxes = np.asarray(boxes, dtype=np.float32).reshape(-1, 4)
310
+ boxes[:, 2:] += boxes[:, :2]
311
+ boxes[:, 0::2] = boxes[:, 0::2].clip(min=0, max=image_width)
312
+ boxes[:, 1::2] = boxes[:, 1::2].clip(min=0, max=image_height)
313
+
314
+ keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
315
+
316
+ new_target = {}
317
+ new_target["image_id"] = image_id
318
+ new_target["class_labels"] = classes[keep]
319
+ new_target["boxes"] = boxes[keep]
320
+ new_target["area"] = area[keep]
321
+ new_target["iscrowd"] = iscrowd[keep]
322
+ new_target["orig_size"] = np.asarray([int(image_height), int(image_width)], dtype=np.int64)
323
+
324
+ if annotations and "keypoints" in annotations[0]:
325
+ keypoints = [obj["keypoints"] for obj in annotations]
326
+ # Converting the filtered keypoints list to a numpy array
327
+ keypoints = np.asarray(keypoints, dtype=np.float32)
328
+ # Apply the keep mask here to filter the relevant annotations
329
+ keypoints = keypoints[keep]
330
+ num_keypoints = keypoints.shape[0]
331
+ keypoints = keypoints.reshape((-1, 3)) if num_keypoints else keypoints
332
+ new_target["keypoints"] = keypoints
333
+
334
+ if return_segmentation_masks:
335
+ segmentation_masks = [obj["segmentation"] for obj in annotations]
336
+ masks = convert_coco_poly_to_mask(segmentation_masks, image_height, image_width)
337
+ new_target["masks"] = masks[keep]
338
+
339
+ return new_target
340
+
341
+
342
+ # Copied from transformers.models.detr.image_processing_detr.masks_to_boxes
343
+ def masks_to_boxes(masks: np.ndarray) -> np.ndarray:
344
+ """
345
+ Compute the bounding boxes around the provided panoptic segmentation masks.
346
+
347
+ Args:
348
+ masks: masks in format `[number_masks, height, width]` where N is the number of masks
349
+
350
+ Returns:
351
+ boxes: bounding boxes in format `[number_masks, 4]` in xyxy format
352
+ """
353
+ if masks.size == 0:
354
+ return np.zeros((0, 4))
355
+
356
+ h, w = masks.shape[-2:]
357
+ y = np.arange(0, h, dtype=np.float32)
358
+ x = np.arange(0, w, dtype=np.float32)
359
+ # see https://github.com/pytorch/pytorch/issues/50276
360
+ y, x = np.meshgrid(y, x, indexing="ij")
361
+
362
+ x_mask = masks * np.expand_dims(x, axis=0)
363
+ x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1)
364
+ x = np.ma.array(x_mask, mask=~(np.array(masks, dtype=bool)))
365
+ x_min = x.filled(fill_value=1e8)
366
+ x_min = x_min.reshape(x_min.shape[0], -1).min(-1)
367
+
368
+ y_mask = masks * np.expand_dims(y, axis=0)
369
+ y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1)
370
+ y = np.ma.array(y_mask, mask=~(np.array(masks, dtype=bool)))
371
+ y_min = y.filled(fill_value=1e8)
372
+ y_min = y_min.reshape(y_min.shape[0], -1).min(-1)
373
+
374
+ return np.stack([x_min, y_min, x_max, y_max], 1)
375
+
376
+
377
+ # Copied from transformers.models.detr.image_processing_detr.prepare_coco_panoptic_annotation with DETR->DETA
378
+ def prepare_coco_panoptic_annotation(
379
+ image: np.ndarray,
380
+ target: Dict,
381
+ masks_path: Union[str, pathlib.Path],
382
+ return_masks: bool = True,
383
+ input_data_format: Union[ChannelDimension, str] = None,
384
+ ) -> Dict:
385
+ """
386
+ Prepare a coco panoptic annotation for DETA.
387
+ """
388
+ image_height, image_width = get_image_size(image, channel_dim=input_data_format)
389
+ annotation_path = pathlib.Path(masks_path) / target["file_name"]
390
+
391
+ new_target = {}
392
+ new_target["image_id"] = np.asarray([target["image_id"] if "image_id" in target else target["id"]], dtype=np.int64)
393
+ new_target["size"] = np.asarray([image_height, image_width], dtype=np.int64)
394
+ new_target["orig_size"] = np.asarray([image_height, image_width], dtype=np.int64)
395
+
396
+ if "segments_info" in target:
397
+ masks = np.asarray(PIL.Image.open(annotation_path), dtype=np.uint32)
398
+ masks = rgb_to_id(masks)
399
+
400
+ ids = np.array([segment_info["id"] for segment_info in target["segments_info"]])
401
+ masks = masks == ids[:, None, None]
402
+ masks = masks.astype(np.uint8)
403
+ if return_masks:
404
+ new_target["masks"] = masks
405
+ new_target["boxes"] = masks_to_boxes(masks)
406
+ new_target["class_labels"] = np.array(
407
+ [segment_info["category_id"] for segment_info in target["segments_info"]], dtype=np.int64
408
+ )
409
+ new_target["iscrowd"] = np.asarray(
410
+ [segment_info["iscrowd"] for segment_info in target["segments_info"]], dtype=np.int64
411
+ )
412
+ new_target["area"] = np.asarray(
413
+ [segment_info["area"] for segment_info in target["segments_info"]], dtype=np.float32
414
+ )
415
+
416
+ return new_target
417
+
418
+
419
+ # Copied from transformers.models.detr.image_processing_detr.resize_annotation
420
+ def resize_annotation(
421
+ annotation: Dict[str, Any],
422
+ orig_size: Tuple[int, int],
423
+ target_size: Tuple[int, int],
424
+ threshold: float = 0.5,
425
+ resample: PILImageResampling = PILImageResampling.NEAREST,
426
+ ):
427
+ """
428
+ Resizes an annotation to a target size.
429
+
430
+ Args:
431
+ annotation (`Dict[str, Any]`):
432
+ The annotation dictionary.
433
+ orig_size (`Tuple[int, int]`):
434
+ The original size of the input image.
435
+ target_size (`Tuple[int, int]`):
436
+ The target size of the image, as returned by the preprocessing `resize` step.
437
+ threshold (`float`, *optional*, defaults to 0.5):
438
+ The threshold used to binarize the segmentation masks.
439
+ resample (`PILImageResampling`, defaults to `PILImageResampling.NEAREST`):
440
+ The resampling filter to use when resizing the masks.
441
+ """
442
+ ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(target_size, orig_size))
443
+ ratio_height, ratio_width = ratios
444
+
445
+ new_annotation = {}
446
+ new_annotation["size"] = target_size
447
+
448
+ for key, value in annotation.items():
449
+ if key == "boxes":
450
+ boxes = value
451
+ scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32)
452
+ new_annotation["boxes"] = scaled_boxes
453
+ elif key == "area":
454
+ area = value
455
+ scaled_area = area * (ratio_width * ratio_height)
456
+ new_annotation["area"] = scaled_area
457
+ elif key == "masks":
458
+ masks = value[:, None]
459
+ masks = np.array([resize(mask, target_size, resample=resample) for mask in masks])
460
+ masks = masks.astype(np.float32)
461
+ masks = masks[:, 0] > threshold
462
+ new_annotation["masks"] = masks
463
+ elif key == "size":
464
+ new_annotation["size"] = target_size
465
+ else:
466
+ new_annotation[key] = value
467
+
468
+ return new_annotation
469
+
470
+
471
+ class DetaImageProcessor(BaseImageProcessor):
472
+ r"""
473
+ Constructs a Deformable DETR image processor.
474
+
475
+ Args:
476
+ format (`str`, *optional*, defaults to `"coco_detection"`):
477
+ Data format of the annotations. One of "coco_detection" or "coco_panoptic".
478
+ do_resize (`bool`, *optional*, defaults to `True`):
479
+ Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be
480
+ overridden by the `do_resize` parameter in the `preprocess` method.
481
+ size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 800, "longest_edge": 1333}`):
482
+ Size of the image's (height, width) dimensions after resizing. Can be overridden by the `size` parameter in
483
+ the `preprocess` method.
484
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
485
+ Resampling filter to use if resizing the image.
486
+ do_rescale (`bool`, *optional*, defaults to `True`):
487
+ Controls whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the
488
+ `do_rescale` parameter in the `preprocess` method.
489
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
490
+ Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
491
+ `preprocess` method.
492
+ do_normalize:
493
+ Controls whether to normalize the image. Can be overridden by the `do_normalize` parameter in the
494
+ `preprocess` method.
495
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`):
496
+ Mean values to use when normalizing the image. Can be a single value or a list of values, one for each
497
+ channel. Can be overridden by the `image_mean` parameter in the `preprocess` method.
498
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`):
499
+ Standard deviation values to use when normalizing the image. Can be a single value or a list of values, one
500
+ for each channel. Can be overridden by the `image_std` parameter in the `preprocess` method.
501
+ do_pad (`bool`, *optional*, defaults to `True`):
502
+ Controls whether to pad the image to the largest image in a batch and create a pixel mask. Can be
503
+ overridden by the `do_pad` parameter in the `preprocess` method.
504
+ """
505
+
506
+ model_input_names = ["pixel_values", "pixel_mask"]
507
+
508
+ def __init__(
509
+ self,
510
+ format: Union[str, AnnotionFormat] = AnnotionFormat.COCO_DETECTION,
511
+ do_resize: bool = True,
512
+ size: Dict[str, int] = None,
513
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
514
+ do_rescale: bool = True,
515
+ rescale_factor: Union[int, float] = 1 / 255,
516
+ do_normalize: bool = True,
517
+ image_mean: Union[float, List[float]] = None,
518
+ image_std: Union[float, List[float]] = None,
519
+ do_pad: bool = True,
520
+ **kwargs,
521
+ ) -> None:
522
+ if "pad_and_return_pixel_mask" in kwargs:
523
+ do_pad = kwargs.pop("pad_and_return_pixel_mask")
524
+
525
+ size = size if size is not None else {"shortest_edge": 800, "longest_edge": 1333}
526
+ size = get_size_dict(size, default_to_square=False)
527
+
528
+ super().__init__(**kwargs)
529
+ self.format = format
530
+ self.do_resize = do_resize
531
+ self.size = size
532
+ self.resample = resample
533
+ self.do_rescale = do_rescale
534
+ self.rescale_factor = rescale_factor
535
+ self.do_normalize = do_normalize
536
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
537
+ self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
538
+ self.do_pad = do_pad
539
+
540
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_annotation with DETR->DETA
541
+ def prepare_annotation(
542
+ self,
543
+ image: np.ndarray,
544
+ target: Dict,
545
+ format: Optional[AnnotionFormat] = None,
546
+ return_segmentation_masks: bool = None,
547
+ masks_path: Optional[Union[str, pathlib.Path]] = None,
548
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
549
+ ) -> Dict:
550
+ """
551
+ Prepare an annotation for feeding into DETA model.
552
+ """
553
+ format = format if format is not None else self.format
554
+
555
+ if format == AnnotionFormat.COCO_DETECTION:
556
+ return_segmentation_masks = False if return_segmentation_masks is None else return_segmentation_masks
557
+ target = prepare_coco_detection_annotation(
558
+ image, target, return_segmentation_masks, input_data_format=input_data_format
559
+ )
560
+ elif format == AnnotionFormat.COCO_PANOPTIC:
561
+ return_segmentation_masks = True if return_segmentation_masks is None else return_segmentation_masks
562
+ target = prepare_coco_panoptic_annotation(
563
+ image,
564
+ target,
565
+ masks_path=masks_path,
566
+ return_masks=return_segmentation_masks,
567
+ input_data_format=input_data_format,
568
+ )
569
+ else:
570
+ raise ValueError(f"Format {format} is not supported.")
571
+ return target
572
+
573
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare
574
+ def prepare(self, image, target, return_segmentation_masks=None, masks_path=None):
575
+ logger.warning_once(
576
+ "The `prepare` method is deprecated and will be removed in a v4.33. "
577
+ "Please use `prepare_annotation` instead. Note: the `prepare_annotation` method "
578
+ "does not return the image anymore.",
579
+ )
580
+ target = self.prepare_annotation(image, target, return_segmentation_masks, masks_path, self.format)
581
+ return image, target
582
+
583
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.convert_coco_poly_to_mask
584
+ def convert_coco_poly_to_mask(self, *args, **kwargs):
585
+ logger.warning_once("The `convert_coco_poly_to_mask` method is deprecated and will be removed in v4.33. ")
586
+ return convert_coco_poly_to_mask(*args, **kwargs)
587
+
588
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_coco_detection
589
+ def prepare_coco_detection(self, *args, **kwargs):
590
+ logger.warning_once("The `prepare_coco_detection` method is deprecated and will be removed in v4.33. ")
591
+ return prepare_coco_detection_annotation(*args, **kwargs)
592
+
593
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_coco_panoptic
594
+ def prepare_coco_panoptic(self, *args, **kwargs):
595
+ logger.warning_once("The `prepare_coco_panoptic` method is deprecated and will be removed in v4.33. ")
596
+ return prepare_coco_panoptic_annotation(*args, **kwargs)
597
+
598
+ def resize(
599
+ self,
600
+ image: np.ndarray,
601
+ size: Dict[str, int],
602
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
603
+ data_format: Optional[ChannelDimension] = None,
604
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
605
+ **kwargs,
606
+ ) -> np.ndarray:
607
+ """
608
+ Resize the image to the given size. Size can be `min_size` (scalar) or `(height, width)` tuple. If size is an
609
+ int, smaller edge of the image will be matched to this number.
610
+
611
+ Args:
612
+ image (`np.ndarray`):
613
+ Image to resize.
614
+ size (`Dict[str, int]`):
615
+ The desired output size. Can contain keys `shortest_edge` and `longest_edge` or `height` and `width`.
616
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
617
+ Resampling filter to use if resizing the image.
618
+ data_format (`ChannelDimension`, *optional*):
619
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
620
+ image is used.
621
+ input_data_format (`ChannelDimension` or `str`, *optional*):
622
+ The channel dimension format of the input image. If not provided, it will be inferred from the input
623
+ image.
624
+ """
625
+ size = get_size_dict(size, default_to_square=False)
626
+ if "shortest_edge" in size and "longest_edge" in size:
627
+ size = get_resize_output_image_size(
628
+ image, size["shortest_edge"], size["longest_edge"], input_data_format=input_data_format
629
+ )
630
+ elif "height" in size and "width" in size:
631
+ size = (size["height"], size["width"])
632
+ else:
633
+ raise ValueError(
634
+ "Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got"
635
+ f" {size.keys()}."
636
+ )
637
+ image = resize(
638
+ image, size=size, resample=resample, data_format=data_format, input_data_format=input_data_format
639
+ )
640
+ return image
641
+
642
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.resize_annotation
643
+ def resize_annotation(
644
+ self,
645
+ annotation,
646
+ orig_size,
647
+ size,
648
+ resample: PILImageResampling = PILImageResampling.NEAREST,
649
+ ) -> Dict:
650
+ """
651
+ Resize the annotation to match the resized image. If size is an int, smaller edge of the mask will be matched
652
+ to this number.
653
+ """
654
+ return resize_annotation(annotation, orig_size=orig_size, target_size=size, resample=resample)
655
+
656
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.rescale
657
+ def rescale(
658
+ self,
659
+ image: np.ndarray,
660
+ rescale_factor: float,
661
+ data_format: Optional[Union[str, ChannelDimension]] = None,
662
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
663
+ ) -> np.ndarray:
664
+ """
665
+ Rescale the image by the given factor. image = image * rescale_factor.
666
+
667
+ Args:
668
+ image (`np.ndarray`):
669
+ Image to rescale.
670
+ rescale_factor (`float`):
671
+ The value to use for rescaling.
672
+ data_format (`str` or `ChannelDimension`, *optional*):
673
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
674
+ image is used. Can be one of:
675
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
676
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
677
+ input_data_format (`str` or `ChannelDimension`, *optional*):
678
+ The channel dimension format for the input image. If unset, is inferred from the input image. Can be
679
+ one of:
680
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
681
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
682
+ """
683
+ return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format)
684
+
685
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.normalize_annotation
686
+ def normalize_annotation(self, annotation: Dict, image_size: Tuple[int, int]) -> Dict:
687
+ """
688
+ Normalize the boxes in the annotation from `[top_left_x, top_left_y, bottom_right_x, bottom_right_y]` to
689
+ `[center_x, center_y, width, height]` format.
690
+ """
691
+ return normalize_annotation(annotation, image_size=image_size)
692
+
693
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._pad_image
694
+ def _pad_image(
695
+ self,
696
+ image: np.ndarray,
697
+ output_size: Tuple[int, int],
698
+ constant_values: Union[float, Iterable[float]] = 0,
699
+ data_format: Optional[ChannelDimension] = None,
700
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
701
+ ) -> np.ndarray:
702
+ """
703
+ Pad an image with zeros to the given size.
704
+ """
705
+ input_height, input_width = get_image_size(image, channel_dim=input_data_format)
706
+ output_height, output_width = output_size
707
+
708
+ pad_bottom = output_height - input_height
709
+ pad_right = output_width - input_width
710
+ padding = ((0, pad_bottom), (0, pad_right))
711
+ padded_image = pad(
712
+ image,
713
+ padding,
714
+ mode=PaddingMode.CONSTANT,
715
+ constant_values=constant_values,
716
+ data_format=data_format,
717
+ input_data_format=input_data_format,
718
+ )
719
+ return padded_image
720
+
721
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.pad
722
+ def pad(
723
+ self,
724
+ images: List[np.ndarray],
725
+ constant_values: Union[float, Iterable[float]] = 0,
726
+ return_pixel_mask: bool = True,
727
+ return_tensors: Optional[Union[str, TensorType]] = None,
728
+ data_format: Optional[ChannelDimension] = None,
729
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
730
+ ) -> BatchFeature:
731
+ """
732
+ Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width
733
+ in the batch and optionally returns their corresponding pixel mask.
734
+
735
+ Args:
736
+ image (`np.ndarray`):
737
+ Image to pad.
738
+ constant_values (`float` or `Iterable[float]`, *optional*):
739
+ The value to use for the padding if `mode` is `"constant"`.
740
+ return_pixel_mask (`bool`, *optional*, defaults to `True`):
741
+ Whether to return a pixel mask.
742
+ return_tensors (`str` or `TensorType`, *optional*):
743
+ The type of tensors to return. Can be one of:
744
+ - Unset: Return a list of `np.ndarray`.
745
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
746
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
747
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
748
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
749
+ data_format (`str` or `ChannelDimension`, *optional*):
750
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
751
+ input_data_format (`ChannelDimension` or `str`, *optional*):
752
+ The channel dimension format of the input image. If not provided, it will be inferred.
753
+ """
754
+ pad_size = get_max_height_width(images, input_data_format=input_data_format)
755
+
756
+ padded_images = [
757
+ self._pad_image(
758
+ image,
759
+ pad_size,
760
+ constant_values=constant_values,
761
+ data_format=data_format,
762
+ input_data_format=input_data_format,
763
+ )
764
+ for image in images
765
+ ]
766
+ data = {"pixel_values": padded_images}
767
+
768
+ if return_pixel_mask:
769
+ masks = [
770
+ make_pixel_mask(image=image, output_size=pad_size, input_data_format=input_data_format)
771
+ for image in images
772
+ ]
773
+ data["pixel_mask"] = masks
774
+
775
+ return BatchFeature(data=data, tensor_type=return_tensors)
776
+
777
+ def preprocess(
778
+ self,
779
+ images: ImageInput,
780
+ annotations: Optional[Union[List[Dict], List[List[Dict]]]] = None,
781
+ return_segmentation_masks: bool = None,
782
+ masks_path: Optional[Union[str, pathlib.Path]] = None,
783
+ do_resize: Optional[bool] = None,
784
+ size: Optional[Dict[str, int]] = None,
785
+ resample=None, # PILImageResampling
786
+ do_rescale: Optional[bool] = None,
787
+ rescale_factor: Optional[Union[int, float]] = None,
788
+ do_normalize: Optional[bool] = None,
789
+ image_mean: Optional[Union[float, List[float]]] = None,
790
+ image_std: Optional[Union[float, List[float]]] = None,
791
+ do_pad: Optional[bool] = None,
792
+ format: Optional[Union[str, AnnotionFormat]] = None,
793
+ return_tensors: Optional[Union[TensorType, str]] = None,
794
+ data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,
795
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
796
+ **kwargs,
797
+ ) -> BatchFeature:
798
+ """
799
+ Preprocess an image or a batch of images so that it can be used by the model.
800
+
801
+ Args:
802
+ images (`ImageInput`):
803
+ Image or batch of images to preprocess. Expects a single or batch of images with pixel values ranging
804
+ from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`.
805
+ annotations (`List[Dict]` or `List[List[Dict]]`, *optional*):
806
+ List of annotations associated with the image or batch of images. If annotionation is for object
807
+ detection, the annotations should be a dictionary with the following keys:
808
+ - "image_id" (`int`): The image id.
809
+ - "annotations" (`List[Dict]`): List of annotations for an image. Each annotation should be a
810
+ dictionary. An image can have no annotations, in which case the list should be empty.
811
+ If annotionation is for segmentation, the annotations should be a dictionary with the following keys:
812
+ - "image_id" (`int`): The image id.
813
+ - "segments_info" (`List[Dict]`): List of segments for an image. Each segment should be a dictionary.
814
+ An image can have no segments, in which case the list should be empty.
815
+ - "file_name" (`str`): The file name of the image.
816
+ return_segmentation_masks (`bool`, *optional*, defaults to self.return_segmentation_masks):
817
+ Whether to return segmentation masks.
818
+ masks_path (`str` or `pathlib.Path`, *optional*):
819
+ Path to the directory containing the segmentation masks.
820
+ do_resize (`bool`, *optional*, defaults to self.do_resize):
821
+ Whether to resize the image.
822
+ size (`Dict[str, int]`, *optional*, defaults to self.size):
823
+ Size of the image after resizing.
824
+ resample (`PILImageResampling`, *optional*, defaults to self.resample):
825
+ Resampling filter to use when resizing the image.
826
+ do_rescale (`bool`, *optional*, defaults to self.do_rescale):
827
+ Whether to rescale the image.
828
+ rescale_factor (`float`, *optional*, defaults to self.rescale_factor):
829
+ Rescale factor to use when rescaling the image.
830
+ do_normalize (`bool`, *optional*, defaults to self.do_normalize):
831
+ Whether to normalize the image.
832
+ image_mean (`float` or `List[float]`, *optional*, defaults to self.image_mean):
833
+ Mean to use when normalizing the image.
834
+ image_std (`float` or `List[float]`, *optional*, defaults to self.image_std):
835
+ Standard deviation to use when normalizing the image.
836
+ do_pad (`bool`, *optional*, defaults to self.do_pad):
837
+ Whether to pad the image.
838
+ format (`str` or `AnnotionFormat`, *optional*, defaults to self.format):
839
+ Format of the annotations.
840
+ return_tensors (`str` or `TensorType`, *optional*, defaults to self.return_tensors):
841
+ Type of tensors to return. If `None`, will return the list of images.
842
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
843
+ The channel dimension format for the output image. Can be one of:
844
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
845
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
846
+ - Unset: Use the channel dimension format of the input image.
847
+ input_data_format (`ChannelDimension` or `str`, *optional*):
848
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
849
+ from the input image. Can be one of:
850
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
851
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
852
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
853
+ """
854
+ if "pad_and_return_pixel_mask" in kwargs:
855
+ logger.warning_once(
856
+ "The `pad_and_return_pixel_mask` argument is deprecated and will be removed in a future version, "
857
+ "use `do_pad` instead.",
858
+ )
859
+ do_pad = kwargs.pop("pad_and_return_pixel_mask")
860
+
861
+ do_resize = self.do_resize if do_resize is None else do_resize
862
+ size = self.size if size is None else size
863
+ size = get_size_dict(size=size, default_to_square=False)
864
+ resample = self.resample if resample is None else resample
865
+ do_rescale = self.do_rescale if do_rescale is None else do_rescale
866
+ rescale_factor = self.rescale_factor if rescale_factor is None else rescale_factor
867
+ do_normalize = self.do_normalize if do_normalize is None else do_normalize
868
+ image_mean = self.image_mean if image_mean is None else image_mean
869
+ image_std = self.image_std if image_std is None else image_std
870
+ do_pad = self.do_pad if do_pad is None else do_pad
871
+ format = self.format if format is None else format
872
+
873
+ if do_resize is not None and size is None:
874
+ raise ValueError("Size and max_size must be specified if do_resize is True.")
875
+
876
+ if do_rescale is not None and rescale_factor is None:
877
+ raise ValueError("Rescale factor must be specified if do_rescale is True.")
878
+
879
+ if do_normalize is not None and (image_mean is None or image_std is None):
880
+ raise ValueError("Image mean and std must be specified if do_normalize is True.")
881
+
882
+ if not is_batched(images):
883
+ images = [images]
884
+ annotations = [annotations] if annotations is not None else None
885
+
886
+ if annotations is not None and len(images) != len(annotations):
887
+ raise ValueError(
888
+ f"The number of images ({len(images)}) and annotations ({len(annotations)}) do not match."
889
+ )
890
+
891
+ if not valid_images(images):
892
+ raise ValueError(
893
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
894
+ "torch.Tensor, tf.Tensor or jax.ndarray."
895
+ )
896
+
897
+ format = AnnotionFormat(format)
898
+ if annotations is not None:
899
+ if format == AnnotionFormat.COCO_DETECTION and not valid_coco_detection_annotations(annotations):
900
+ raise ValueError(
901
+ "Invalid COCO detection annotations. Annotations must a dict (single image) of list of dicts "
902
+ "(batch of images) with the following keys: `image_id` and `annotations`, with the latter "
903
+ "being a list of annotations in the COCO format."
904
+ )
905
+ elif format == AnnotionFormat.COCO_PANOPTIC and not valid_coco_panoptic_annotations(annotations):
906
+ raise ValueError(
907
+ "Invalid COCO panoptic annotations. Annotations must a dict (single image) of list of dicts "
908
+ "(batch of images) with the following keys: `image_id`, `file_name` and `segments_info`, with "
909
+ "the latter being a list of annotations in the COCO format."
910
+ )
911
+ elif format not in SUPPORTED_ANNOTATION_FORMATS:
912
+ raise ValueError(
913
+ f"Unsupported annotation format: {format} must be one of {SUPPORTED_ANNOTATION_FORMATS}"
914
+ )
915
+
916
+ if (
917
+ masks_path is not None
918
+ and format == AnnotionFormat.COCO_PANOPTIC
919
+ and not isinstance(masks_path, (pathlib.Path, str))
920
+ ):
921
+ raise ValueError(
922
+ "The path to the directory containing the mask PNG files should be provided as a"
923
+ f" `pathlib.Path` or string object, but is {type(masks_path)} instead."
924
+ )
925
+
926
+ # All transformations expect numpy arrays
927
+ images = [to_numpy_array(image) for image in images]
928
+
929
+ if is_scaled_image(images[0]) and do_rescale:
930
+ logger.warning_once(
931
+ "It looks like you are trying to rescale already rescaled images. If the input"
932
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
933
+ )
934
+
935
+ if input_data_format is None:
936
+ # We assume that all images have the same channel dimension format.
937
+ input_data_format = infer_channel_dimension_format(images[0])
938
+
939
+ # prepare (COCO annotations as a list of Dict -> DETR target as a single Dict per image)
940
+ if annotations is not None:
941
+ prepared_images = []
942
+ prepared_annotations = []
943
+ for image, target in zip(images, annotations):
944
+ target = self.prepare_annotation(
945
+ image,
946
+ target,
947
+ format,
948
+ return_segmentation_masks=return_segmentation_masks,
949
+ masks_path=masks_path,
950
+ input_data_format=input_data_format,
951
+ )
952
+ prepared_images.append(image)
953
+ prepared_annotations.append(target)
954
+ images = prepared_images
955
+ annotations = prepared_annotations
956
+ del prepared_images, prepared_annotations
957
+
958
+ # transformations
959
+ if do_resize:
960
+ if annotations is not None:
961
+ resized_images, resized_annotations = [], []
962
+ for image, target in zip(images, annotations):
963
+ orig_size = get_image_size(image, input_data_format)
964
+ resized_image = self.resize(
965
+ image, size=size, resample=resample, input_data_format=input_data_format
966
+ )
967
+ resized_annotation = self.resize_annotation(
968
+ target, orig_size, get_image_size(resized_image, input_data_format)
969
+ )
970
+ resized_images.append(resized_image)
971
+ resized_annotations.append(resized_annotation)
972
+ images = resized_images
973
+ annotations = resized_annotations
974
+ del resized_images, resized_annotations
975
+ else:
976
+ images = [
977
+ self.resize(image, size=size, resample=resample, input_data_format=input_data_format)
978
+ for image in images
979
+ ]
980
+
981
+ if do_rescale:
982
+ images = [self.rescale(image, rescale_factor, input_data_format=input_data_format) for image in images]
983
+
984
+ if do_normalize:
985
+ images = [
986
+ self.normalize(image, image_mean, image_std, input_data_format=input_data_format) for image in images
987
+ ]
988
+ if annotations is not None:
989
+ annotations = [
990
+ self.normalize_annotation(annotation, get_image_size(image, input_data_format))
991
+ for annotation, image in zip(annotations, images)
992
+ ]
993
+
994
+ if do_pad:
995
+ # Pads images and returns their mask: {'pixel_values': ..., 'pixel_mask': ...}
996
+ data = self.pad(
997
+ images, return_pixel_mask=True, data_format=data_format, input_data_format=input_data_format
998
+ )
999
+ else:
1000
+ images = [
1001
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
1002
+ for image in images
1003
+ ]
1004
+ data = {"pixel_values": images}
1005
+
1006
+ encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors)
1007
+ if annotations is not None:
1008
+ encoded_inputs["labels"] = [
1009
+ BatchFeature(annotation, tensor_type=return_tensors) for annotation in annotations
1010
+ ]
1011
+
1012
+ return encoded_inputs
1013
+
1014
+ def post_process_object_detection(
1015
+ self,
1016
+ outputs,
1017
+ threshold: float = 0.5,
1018
+ target_sizes: Union[TensorType, List[Tuple]] = None,
1019
+ nms_threshold: float = 0.7,
1020
+ ):
1021
+ """
1022
+ Converts the output of [`DetaForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,
1023
+ bottom_right_x, bottom_right_y) format. Only supports PyTorch.
1024
+
1025
+ Args:
1026
+ outputs ([`DetrObjectDetectionOutput`]):
1027
+ Raw outputs of the model.
1028
+ threshold (`float`, *optional*, defaults to 0.5):
1029
+ Score threshold to keep object detection predictions.
1030
+ target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*):
1031
+ Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size
1032
+ (height, width) of each image in the batch. If left to None, predictions will not be resized.
1033
+ nms_threshold (`float`, *optional*, defaults to 0.7):
1034
+ NMS threshold.
1035
+
1036
+ Returns:
1037
+ `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image
1038
+ in the batch as predicted by the model.
1039
+ """
1040
+ out_logits, out_bbox = outputs.logits, outputs.pred_boxes
1041
+ batch_size, num_queries, num_labels = out_logits.shape
1042
+
1043
+ if target_sizes is not None:
1044
+ if len(out_logits) != len(target_sizes):
1045
+ raise ValueError(
1046
+ "Make sure that you pass in as many target sizes as the batch dimension of the logits"
1047
+ )
1048
+
1049
+ prob = out_logits.sigmoid()
1050
+
1051
+ all_scores = prob.view(batch_size, num_queries * num_labels).to(out_logits.device)
1052
+ all_indexes = torch.arange(num_queries * num_labels)[None].repeat(batch_size, 1).to(out_logits.device)
1053
+ all_boxes = torch.div(all_indexes, out_logits.shape[2], rounding_mode="floor")
1054
+ all_labels = all_indexes % out_logits.shape[2]
1055
+
1056
+ boxes = center_to_corners_format(out_bbox)
1057
+ boxes = torch.gather(boxes, 1, all_boxes.unsqueeze(-1).repeat(1, 1, 4))
1058
+
1059
+ # and from relative [0, 1] to absolute [0, height] coordinates
1060
+ if target_sizes is not None:
1061
+ if isinstance(target_sizes, List):
1062
+ img_h = torch.Tensor([i[0] for i in target_sizes])
1063
+ img_w = torch.Tensor([i[1] for i in target_sizes])
1064
+ else:
1065
+ img_h, img_w = target_sizes.unbind(1)
1066
+
1067
+ scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)
1068
+ boxes = boxes * scale_fct[:, None, :]
1069
+
1070
+ results = []
1071
+ for b in range(batch_size):
1072
+ box = boxes[b]
1073
+ score = all_scores[b]
1074
+ lbls = all_labels[b]
1075
+
1076
+ pre_topk = score.topk(min(10000, len(score))).indices
1077
+ box = box[pre_topk]
1078
+ score = score[pre_topk]
1079
+ lbls = lbls[pre_topk]
1080
+
1081
+ # apply NMS
1082
+ keep_inds = batched_nms(box, score, lbls, nms_threshold)[:100]
1083
+ score = score[keep_inds]
1084
+ lbls = lbls[keep_inds]
1085
+ box = box[keep_inds]
1086
+
1087
+ results.append(
1088
+ {
1089
+ "scores": score[score > threshold],
1090
+ "labels": lbls[score > threshold],
1091
+ "boxes": box[score > threshold],
1092
+ }
1093
+ )
1094
+
1095
+ return results
mgm/lib/python3.10/site-packages/transformers/models/deta/modeling_deta.py ADDED
The diff for this file is too large to render. See raw diff
 
mgm/lib/python3.10/site-packages/transformers/models/imagegpt/__pycache__/modeling_imagegpt.cpython-310.pyc ADDED
Binary file (34.4 kB). View file
 
mgm/lib/python3.10/site-packages/transformers/models/imagegpt/feature_extraction_imagegpt.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Feature extractor class for ImageGPT."""
16
+
17
+ import warnings
18
+
19
+ from ...utils import logging
20
+ from .image_processing_imagegpt import ImageGPTImageProcessor
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ class ImageGPTFeatureExtractor(ImageGPTImageProcessor):
27
+ def __init__(self, *args, **kwargs) -> None:
28
+ warnings.warn(
29
+ "The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
30
+ " Please use ImageGPTImageProcessor instead.",
31
+ FutureWarning,
32
+ )
33
+ super().__init__(*args, **kwargs)
mgm/lib/python3.10/site-packages/transformers/models/imagegpt/image_processing_imagegpt.py ADDED
@@ -0,0 +1,293 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for ImageGPT."""
16
+
17
+ from typing import Dict, List, Optional, Union
18
+
19
+ import numpy as np
20
+
21
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
22
+ from ...image_transforms import rescale, resize, to_channel_dimension_format
23
+ from ...image_utils import (
24
+ ChannelDimension,
25
+ ImageInput,
26
+ PILImageResampling,
27
+ infer_channel_dimension_format,
28
+ is_scaled_image,
29
+ make_list_of_images,
30
+ to_numpy_array,
31
+ valid_images,
32
+ )
33
+ from ...utils import TensorType, is_vision_available, logging
34
+
35
+
36
+ if is_vision_available():
37
+ import PIL
38
+
39
+
40
+ logger = logging.get_logger(__name__)
41
+
42
+
43
+ def squared_euclidean_distance(a, b):
44
+ b = b.T
45
+ a2 = np.sum(np.square(a), axis=1)
46
+ b2 = np.sum(np.square(b), axis=0)
47
+ ab = np.matmul(a, b)
48
+ d = a2[:, None] - 2 * ab + b2[None, :]
49
+ return d
50
+
51
+
52
+ def color_quantize(x, clusters):
53
+ x = x.reshape(-1, 3)
54
+ d = squared_euclidean_distance(x, clusters)
55
+ return np.argmin(d, axis=1)
56
+
57
+
58
+ class ImageGPTImageProcessor(BaseImageProcessor):
59
+ r"""
60
+ Constructs a ImageGPT image processor. This image processor can be used to resize images to a smaller resolution
61
+ (such as 32x32 or 64x64), normalize them and finally color quantize them to obtain sequences of "pixel values"
62
+ (color clusters).
63
+
64
+ Args:
65
+ clusters (`np.ndarray` or `List[List[int]]`, *optional*):
66
+ The color clusters to use, of shape `(n_clusters, 3)` when color quantizing. Can be overriden by `clusters`
67
+ in `preprocess`.
68
+ do_resize (`bool`, *optional*, defaults to `True`):
69
+ Whether to resize the image's dimensions to `(size["height"], size["width"])`. Can be overridden by
70
+ `do_resize` in `preprocess`.
71
+ size (`Dict[str, int]` *optional*, defaults to `{"height": 256, "width": 256}`):
72
+ Size of the image after resizing. Can be overridden by `size` in `preprocess`.
73
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
74
+ Resampling filter to use if resizing the image. Can be overridden by `resample` in `preprocess`.
75
+ do_normalize (`bool`, *optional*, defaults to `True`):
76
+ Whether to normalize the image pixel value to between [-1, 1]. Can be overridden by `do_normalize` in
77
+ `preprocess`.
78
+ do_color_quantize (`bool`, *optional*, defaults to `True`):
79
+ Whether to color quantize the image. Can be overridden by `do_color_quantize` in `preprocess`.
80
+ """
81
+
82
+ model_input_names = ["pixel_values"]
83
+
84
+ def __init__(
85
+ self,
86
+ # clusters is a first argument to maintain backwards compatibility with the old ImageGPTImageProcessor
87
+ clusters: Optional[Union[List[List[int]], np.ndarray]] = None,
88
+ do_resize: bool = True,
89
+ size: Dict[str, int] = None,
90
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
91
+ do_normalize: bool = True,
92
+ do_color_quantize: bool = True,
93
+ **kwargs,
94
+ ) -> None:
95
+ super().__init__(**kwargs)
96
+ size = size if size is not None else {"height": 256, "width": 256}
97
+ size = get_size_dict(size)
98
+ self.clusters = np.array(clusters) if clusters is not None else None
99
+ self.do_resize = do_resize
100
+ self.size = size
101
+ self.resample = resample
102
+ self.do_normalize = do_normalize
103
+ self.do_color_quantize = do_color_quantize
104
+
105
+ # Copied from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize
106
+ def resize(
107
+ self,
108
+ image: np.ndarray,
109
+ size: Dict[str, int],
110
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
111
+ data_format: Optional[Union[str, ChannelDimension]] = None,
112
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
113
+ **kwargs,
114
+ ) -> np.ndarray:
115
+ """
116
+ Resize an image to `(size["height"], size["width"])`.
117
+
118
+ Args:
119
+ image (`np.ndarray`):
120
+ Image to resize.
121
+ size (`Dict[str, int]`):
122
+ Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
123
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
124
+ `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
125
+ data_format (`ChannelDimension` or `str`, *optional*):
126
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
127
+ image is used. Can be one of:
128
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
129
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
130
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
131
+ input_data_format (`ChannelDimension` or `str`, *optional*):
132
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
133
+ from the input image. Can be one of:
134
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
135
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
136
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
137
+
138
+ Returns:
139
+ `np.ndarray`: The resized image.
140
+ """
141
+ size = get_size_dict(size)
142
+ if "height" not in size or "width" not in size:
143
+ raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}")
144
+ output_size = (size["height"], size["width"])
145
+ return resize(
146
+ image,
147
+ size=output_size,
148
+ resample=resample,
149
+ data_format=data_format,
150
+ input_data_format=input_data_format,
151
+ **kwargs,
152
+ )
153
+
154
+ def normalize(
155
+ self,
156
+ image: np.ndarray,
157
+ data_format: Optional[Union[str, ChannelDimension]] = None,
158
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
159
+ ) -> np.ndarray:
160
+ """
161
+ Normalizes an images' pixel values to between [-1, 1].
162
+
163
+ Args:
164
+ image (`np.ndarray`):
165
+ Image to normalize.
166
+ data_format (`str` or `ChannelDimension`, *optional*):
167
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
168
+ input_data_format (`ChannelDimension` or `str`, *optional*):
169
+ The channel dimension format of the input image. If not provided, it will be inferred.
170
+ """
171
+ image = rescale(image=image, scale=1 / 127.5, data_format=data_format, input_data_format=input_data_format)
172
+ image = image - 1
173
+ return image
174
+
175
+ def preprocess(
176
+ self,
177
+ images: ImageInput,
178
+ do_resize: bool = None,
179
+ size: Dict[str, int] = None,
180
+ resample: PILImageResampling = None,
181
+ do_normalize: bool = None,
182
+ do_color_quantize: Optional[bool] = None,
183
+ clusters: Optional[Union[List[List[int]], np.ndarray]] = None,
184
+ return_tensors: Optional[Union[str, TensorType]] = None,
185
+ data_format: Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST,
186
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
187
+ **kwargs,
188
+ ) -> PIL.Image.Image:
189
+ """
190
+ Preprocess an image or batch of images.
191
+
192
+ Args:
193
+ images (`ImageInput`):
194
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
195
+ passing in images with pixel values between 0 and 1, set `do_normalize=False`.
196
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
197
+ Whether to resize the image.
198
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
199
+ Size of the image after resizing.
200
+ resample (`int`, *optional*, defaults to `self.resample`):
201
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
202
+ has an effect if `do_resize` is set to `True`.
203
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
204
+ Whether to normalize the image
205
+ do_color_quantize (`bool`, *optional*, defaults to `self.do_color_quantize`):
206
+ Whether to color quantize the image.
207
+ clusters (`np.ndarray` or `List[List[int]]`, *optional*, defaults to `self.clusters`):
208
+ Clusters used to quantize the image of shape `(n_clusters, 3)`. Only has an effect if
209
+ `do_color_quantize` is set to `True`.
210
+ return_tensors (`str` or `TensorType`, *optional*):
211
+ The type of tensors to return. Can be one of:
212
+ - Unset: Return a list of `np.ndarray`.
213
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
214
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
215
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
216
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
217
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
218
+ The channel dimension format for the output image. Can be one of:
219
+ - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
220
+ - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
221
+ Only has an effect if `do_color_quantize` is set to `False`.
222
+ input_data_format (`ChannelDimension` or `str`, *optional*):
223
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
224
+ from the input image. Can be one of:
225
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
226
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
227
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
228
+ """
229
+ do_resize = do_resize if do_resize is not None else self.do_resize
230
+ size = size if size is not None else self.size
231
+ size = get_size_dict(size)
232
+ resample = resample if resample is not None else self.resample
233
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
234
+ do_color_quantize = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
235
+ clusters = clusters if clusters is not None else self.clusters
236
+ clusters = np.array(clusters)
237
+
238
+ images = make_list_of_images(images)
239
+
240
+ if not valid_images(images):
241
+ raise ValueError(
242
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
243
+ "torch.Tensor, tf.Tensor or jax.ndarray."
244
+ )
245
+
246
+ if do_resize and size is None or resample is None:
247
+ raise ValueError("Size and resample must be specified if do_resize is True.")
248
+
249
+ if do_color_quantize and clusters is None:
250
+ raise ValueError("Clusters must be specified if do_color_quantize is True.")
251
+
252
+ # All transformations expect numpy arrays.
253
+ images = [to_numpy_array(image) for image in images]
254
+
255
+ if is_scaled_image(images[0]) and do_normalize:
256
+ logger.warning_once(
257
+ "It looks like you are trying to rescale already rescaled images. If you wish to do this, "
258
+ "make sure to set `do_normalize` to `False` and that pixel values are between [-1, 1].",
259
+ )
260
+
261
+ if input_data_format is None:
262
+ # We assume that all images have the same channel dimension format.
263
+ input_data_format = infer_channel_dimension_format(images[0])
264
+
265
+ if do_resize:
266
+ images = [
267
+ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
268
+ for image in images
269
+ ]
270
+
271
+ if do_normalize:
272
+ images = [self.normalize(image=image, input_data_format=input_data_format) for image in images]
273
+
274
+ if do_color_quantize:
275
+ images = [to_channel_dimension_format(image, ChannelDimension.LAST, input_data_format) for image in images]
276
+ # color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
277
+ images = np.array(images)
278
+ images = color_quantize(images, clusters).reshape(images.shape[:-1])
279
+
280
+ # flatten to (batch_size, height*width)
281
+ batch_size = images.shape[0]
282
+ images = images.reshape(batch_size, -1)
283
+
284
+ # We need to convert back to a list of images to keep consistent behaviour across processors.
285
+ images = list(images)
286
+ else:
287
+ images = [
288
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
289
+ for image in images
290
+ ]
291
+
292
+ data = {"input_ids": images}
293
+ return BatchFeature(data=data, tensor_type=return_tensors)
mgm/lib/python3.10/site-packages/transformers/models/layoutlmv3/__init__.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_tf_available,
21
+ is_tokenizers_available,
22
+ is_torch_available,
23
+ is_vision_available,
24
+ )
25
+
26
+
27
+ _import_structure = {
28
+ "configuration_layoutlmv3": [
29
+ "LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP",
30
+ "LayoutLMv3Config",
31
+ "LayoutLMv3OnnxConfig",
32
+ ],
33
+ "processing_layoutlmv3": ["LayoutLMv3Processor"],
34
+ "tokenization_layoutlmv3": ["LayoutLMv3Tokenizer"],
35
+ }
36
+
37
+ try:
38
+ if not is_tokenizers_available():
39
+ raise OptionalDependencyNotAvailable()
40
+ except OptionalDependencyNotAvailable:
41
+ pass
42
+ else:
43
+ _import_structure["tokenization_layoutlmv3_fast"] = ["LayoutLMv3TokenizerFast"]
44
+
45
+ try:
46
+ if not is_torch_available():
47
+ raise OptionalDependencyNotAvailable()
48
+ except OptionalDependencyNotAvailable:
49
+ pass
50
+ else:
51
+ _import_structure["modeling_layoutlmv3"] = [
52
+ "LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST",
53
+ "LayoutLMv3ForQuestionAnswering",
54
+ "LayoutLMv3ForSequenceClassification",
55
+ "LayoutLMv3ForTokenClassification",
56
+ "LayoutLMv3Model",
57
+ "LayoutLMv3PreTrainedModel",
58
+ ]
59
+
60
+ try:
61
+ if not is_tf_available():
62
+ raise OptionalDependencyNotAvailable()
63
+ except OptionalDependencyNotAvailable:
64
+ pass
65
+ else:
66
+ _import_structure["modeling_tf_layoutlmv3"] = [
67
+ "TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST",
68
+ "TFLayoutLMv3ForQuestionAnswering",
69
+ "TFLayoutLMv3ForSequenceClassification",
70
+ "TFLayoutLMv3ForTokenClassification",
71
+ "TFLayoutLMv3Model",
72
+ "TFLayoutLMv3PreTrainedModel",
73
+ ]
74
+
75
+ try:
76
+ if not is_vision_available():
77
+ raise OptionalDependencyNotAvailable()
78
+ except OptionalDependencyNotAvailable:
79
+ pass
80
+ else:
81
+ _import_structure["feature_extraction_layoutlmv3"] = ["LayoutLMv3FeatureExtractor"]
82
+ _import_structure["image_processing_layoutlmv3"] = ["LayoutLMv3ImageProcessor"]
83
+
84
+
85
+ if TYPE_CHECKING:
86
+ from .configuration_layoutlmv3 import (
87
+ LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
88
+ LayoutLMv3Config,
89
+ LayoutLMv3OnnxConfig,
90
+ )
91
+ from .processing_layoutlmv3 import LayoutLMv3Processor
92
+ from .tokenization_layoutlmv3 import LayoutLMv3Tokenizer
93
+
94
+ try:
95
+ if not is_tokenizers_available():
96
+ raise OptionalDependencyNotAvailable()
97
+ except OptionalDependencyNotAvailable:
98
+ pass
99
+ else:
100
+ from .tokenization_layoutlmv3_fast import LayoutLMv3TokenizerFast
101
+
102
+ try:
103
+ if not is_torch_available():
104
+ raise OptionalDependencyNotAvailable()
105
+ except OptionalDependencyNotAvailable:
106
+ pass
107
+ else:
108
+ from .modeling_layoutlmv3 import (
109
+ LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
110
+ LayoutLMv3ForQuestionAnswering,
111
+ LayoutLMv3ForSequenceClassification,
112
+ LayoutLMv3ForTokenClassification,
113
+ LayoutLMv3Model,
114
+ LayoutLMv3PreTrainedModel,
115
+ )
116
+
117
+ try:
118
+ if not is_tf_available():
119
+ raise OptionalDependencyNotAvailable()
120
+ except OptionalDependencyNotAvailable:
121
+ pass
122
+ else:
123
+ from .modeling_tf_layoutlmv3 import (
124
+ TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
125
+ TFLayoutLMv3ForQuestionAnswering,
126
+ TFLayoutLMv3ForSequenceClassification,
127
+ TFLayoutLMv3ForTokenClassification,
128
+ TFLayoutLMv3Model,
129
+ TFLayoutLMv3PreTrainedModel,
130
+ )
131
+
132
+ try:
133
+ if not is_vision_available():
134
+ raise OptionalDependencyNotAvailable()
135
+ except OptionalDependencyNotAvailable:
136
+ pass
137
+ else:
138
+ from .feature_extraction_layoutlmv3 import LayoutLMv3FeatureExtractor
139
+ from .image_processing_layoutlmv3 import LayoutLMv3ImageProcessor
140
+
141
+ else:
142
+ import sys
143
+
144
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
mgm/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.22 kB). View file
 
mgm/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/configuration_layoutlmv3.cpython-310.pyc ADDED
Binary file (11 kB). View file
 
mgm/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/feature_extraction_layoutlmv3.cpython-310.pyc ADDED
Binary file (1.03 kB). View file
 
mgm/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/image_processing_layoutlmv3.cpython-310.pyc ADDED
Binary file (15.6 kB). View file
 
mgm/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/modeling_layoutlmv3.cpython-310.pyc ADDED
Binary file (42.2 kB). View file
 
mgm/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/modeling_tf_layoutlmv3.cpython-310.pyc ADDED
Binary file (45.3 kB). View file
 
mgm/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/processing_layoutlmv3.cpython-310.pyc ADDED
Binary file (7.21 kB). View file
 
mgm/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/tokenization_layoutlmv3.cpython-310.pyc ADDED
Binary file (47.7 kB). View file
 
mgm/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/tokenization_layoutlmv3_fast.cpython-310.pyc ADDED
Binary file (22.6 kB). View file
 
mgm/lib/python3.10/site-packages/transformers/models/layoutlmv3/configuration_layoutlmv3.py ADDED
@@ -0,0 +1,294 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Microsoft Research and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ LayoutLMv3 model configuration"""
16
+
17
+ from collections import OrderedDict
18
+ from typing import TYPE_CHECKING, Any, Mapping, Optional
19
+
20
+ from packaging import version
21
+
22
+ from ...configuration_utils import PretrainedConfig
23
+ from ...onnx import OnnxConfig
24
+ from ...onnx.utils import compute_effective_axis_dimension
25
+ from ...utils import logging
26
+
27
+
28
+ if TYPE_CHECKING:
29
+ from ...processing_utils import ProcessorMixin
30
+ from ...utils import TensorType
31
+
32
+
33
+ logger = logging.get_logger(__name__)
34
+
35
+ LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP = {
36
+ "microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
37
+ }
38
+
39
+
40
+ class LayoutLMv3Config(PretrainedConfig):
41
+ r"""
42
+ This is the configuration class to store the configuration of a [`LayoutLMv3Model`]. It is used to instantiate an
43
+ LayoutLMv3 model according to the specified arguments, defining the model architecture. Instantiating a
44
+ configuration with the defaults will yield a similar configuration to that of the LayoutLMv3
45
+ [microsoft/layoutlmv3-base](https://huggingface.co/microsoft/layoutlmv3-base) architecture.
46
+
47
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
48
+ documentation from [`PretrainedConfig`] for more information.
49
+
50
+ Args:
51
+ vocab_size (`int`, *optional*, defaults to 50265):
52
+ Vocabulary size of the LayoutLMv3 model. Defines the number of different tokens that can be represented by
53
+ the `inputs_ids` passed when calling [`LayoutLMv3Model`].
54
+ hidden_size (`int`, *optional*, defaults to 768):
55
+ Dimension of the encoder layers and the pooler layer.
56
+ num_hidden_layers (`int`, *optional*, defaults to 12):
57
+ Number of hidden layers in the Transformer encoder.
58
+ num_attention_heads (`int`, *optional*, defaults to 12):
59
+ Number of attention heads for each attention layer in the Transformer encoder.
60
+ intermediate_size (`int`, *optional*, defaults to 3072):
61
+ Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
62
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
63
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
64
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
65
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
66
+ The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.
67
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
68
+ The dropout ratio for the attention probabilities.
69
+ max_position_embeddings (`int`, *optional*, defaults to 512):
70
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
71
+ just in case (e.g., 512 or 1024 or 2048).
72
+ type_vocab_size (`int`, *optional*, defaults to 2):
73
+ The vocabulary size of the `token_type_ids` passed when calling [`LayoutLMv3Model`].
74
+ initializer_range (`float`, *optional*, defaults to 0.02):
75
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
76
+ layer_norm_eps (`float`, *optional*, defaults to 1e-5):
77
+ The epsilon used by the layer normalization layers.
78
+ max_2d_position_embeddings (`int`, *optional*, defaults to 1024):
79
+ The maximum value that the 2D position embedding might ever be used with. Typically set this to something
80
+ large just in case (e.g., 1024).
81
+ coordinate_size (`int`, *optional*, defaults to `128`):
82
+ Dimension of the coordinate embeddings.
83
+ shape_size (`int`, *optional*, defaults to `128`):
84
+ Dimension of the width and height embeddings.
85
+ has_relative_attention_bias (`bool`, *optional*, defaults to `True`):
86
+ Whether or not to use a relative attention bias in the self-attention mechanism.
87
+ rel_pos_bins (`int`, *optional*, defaults to 32):
88
+ The number of relative position bins to be used in the self-attention mechanism.
89
+ max_rel_pos (`int`, *optional*, defaults to 128):
90
+ The maximum number of relative positions to be used in the self-attention mechanism.
91
+ max_rel_2d_pos (`int`, *optional*, defaults to 256):
92
+ The maximum number of relative 2D positions in the self-attention mechanism.
93
+ rel_2d_pos_bins (`int`, *optional*, defaults to 64):
94
+ The number of 2D relative position bins in the self-attention mechanism.
95
+ has_spatial_attention_bias (`bool`, *optional*, defaults to `True`):
96
+ Whether or not to use a spatial attention bias in the self-attention mechanism.
97
+ visual_embed (`bool`, *optional*, defaults to `True`):
98
+ Whether or not to add patch embeddings.
99
+ input_size (`int`, *optional*, defaults to `224`):
100
+ The size (resolution) of the images.
101
+ num_channels (`int`, *optional*, defaults to `3`):
102
+ The number of channels of the images.
103
+ patch_size (`int`, *optional*, defaults to `16`)
104
+ The size (resolution) of the patches.
105
+ classifier_dropout (`float`, *optional*):
106
+ The dropout ratio for the classification head.
107
+
108
+ Example:
109
+
110
+ ```python
111
+ >>> from transformers import LayoutLMv3Config, LayoutLMv3Model
112
+
113
+ >>> # Initializing a LayoutLMv3 microsoft/layoutlmv3-base style configuration
114
+ >>> configuration = LayoutLMv3Config()
115
+
116
+ >>> # Initializing a model (with random weights) from the microsoft/layoutlmv3-base style configuration
117
+ >>> model = LayoutLMv3Model(configuration)
118
+
119
+ >>> # Accessing the model configuration
120
+ >>> configuration = model.config
121
+ ```"""
122
+
123
+ model_type = "layoutlmv3"
124
+
125
+ def __init__(
126
+ self,
127
+ vocab_size=50265,
128
+ hidden_size=768,
129
+ num_hidden_layers=12,
130
+ num_attention_heads=12,
131
+ intermediate_size=3072,
132
+ hidden_act="gelu",
133
+ hidden_dropout_prob=0.1,
134
+ attention_probs_dropout_prob=0.1,
135
+ max_position_embeddings=512,
136
+ type_vocab_size=2,
137
+ initializer_range=0.02,
138
+ layer_norm_eps=1e-5,
139
+ pad_token_id=1,
140
+ bos_token_id=0,
141
+ eos_token_id=2,
142
+ max_2d_position_embeddings=1024,
143
+ coordinate_size=128,
144
+ shape_size=128,
145
+ has_relative_attention_bias=True,
146
+ rel_pos_bins=32,
147
+ max_rel_pos=128,
148
+ rel_2d_pos_bins=64,
149
+ max_rel_2d_pos=256,
150
+ has_spatial_attention_bias=True,
151
+ text_embed=True,
152
+ visual_embed=True,
153
+ input_size=224,
154
+ num_channels=3,
155
+ patch_size=16,
156
+ classifier_dropout=None,
157
+ **kwargs,
158
+ ):
159
+ super().__init__(
160
+ vocab_size=vocab_size,
161
+ hidden_size=hidden_size,
162
+ num_hidden_layers=num_hidden_layers,
163
+ num_attention_heads=num_attention_heads,
164
+ intermediate_size=intermediate_size,
165
+ hidden_act=hidden_act,
166
+ hidden_dropout_prob=hidden_dropout_prob,
167
+ attention_probs_dropout_prob=attention_probs_dropout_prob,
168
+ max_position_embeddings=max_position_embeddings,
169
+ type_vocab_size=type_vocab_size,
170
+ initializer_range=initializer_range,
171
+ layer_norm_eps=layer_norm_eps,
172
+ pad_token_id=pad_token_id,
173
+ bos_token_id=bos_token_id,
174
+ eos_token_id=eos_token_id,
175
+ **kwargs,
176
+ )
177
+ self.max_2d_position_embeddings = max_2d_position_embeddings
178
+ self.coordinate_size = coordinate_size
179
+ self.shape_size = shape_size
180
+ self.has_relative_attention_bias = has_relative_attention_bias
181
+ self.rel_pos_bins = rel_pos_bins
182
+ self.max_rel_pos = max_rel_pos
183
+ self.has_spatial_attention_bias = has_spatial_attention_bias
184
+ self.rel_2d_pos_bins = rel_2d_pos_bins
185
+ self.max_rel_2d_pos = max_rel_2d_pos
186
+ self.text_embed = text_embed
187
+ self.visual_embed = visual_embed
188
+ self.input_size = input_size
189
+ self.num_channels = num_channels
190
+ self.patch_size = patch_size
191
+ self.classifier_dropout = classifier_dropout
192
+
193
+
194
+ class LayoutLMv3OnnxConfig(OnnxConfig):
195
+ torch_onnx_minimum_version = version.parse("1.12")
196
+
197
+ @property
198
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
199
+ # The order of inputs is different for question answering and sequence classification
200
+ if self.task in ["question-answering", "sequence-classification"]:
201
+ return OrderedDict(
202
+ [
203
+ ("input_ids", {0: "batch", 1: "sequence"}),
204
+ ("attention_mask", {0: "batch", 1: "sequence"}),
205
+ ("bbox", {0: "batch", 1: "sequence"}),
206
+ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
207
+ ]
208
+ )
209
+ else:
210
+ return OrderedDict(
211
+ [
212
+ ("input_ids", {0: "batch", 1: "sequence"}),
213
+ ("bbox", {0: "batch", 1: "sequence"}),
214
+ ("attention_mask", {0: "batch", 1: "sequence"}),
215
+ ("pixel_values", {0: "batch", 1: "num_channels"}),
216
+ ]
217
+ )
218
+
219
+ @property
220
+ def atol_for_validation(self) -> float:
221
+ return 1e-5
222
+
223
+ @property
224
+ def default_onnx_opset(self) -> int:
225
+ return 12
226
+
227
+ def generate_dummy_inputs(
228
+ self,
229
+ processor: "ProcessorMixin",
230
+ batch_size: int = -1,
231
+ seq_length: int = -1,
232
+ is_pair: bool = False,
233
+ framework: Optional["TensorType"] = None,
234
+ num_channels: int = 3,
235
+ image_width: int = 40,
236
+ image_height: int = 40,
237
+ ) -> Mapping[str, Any]:
238
+ """
239
+ Generate inputs to provide to the ONNX exporter for the specific framework
240
+
241
+ Args:
242
+ processor ([`ProcessorMixin`]):
243
+ The processor associated with this model configuration.
244
+ batch_size (`int`, *optional*, defaults to -1):
245
+ The batch size to export the model for (-1 means dynamic axis).
246
+ seq_length (`int`, *optional*, defaults to -1):
247
+ The sequence length to export the model for (-1 means dynamic axis).
248
+ is_pair (`bool`, *optional*, defaults to `False`):
249
+ Indicate if the input is a pair (sentence 1, sentence 2).
250
+ framework (`TensorType`, *optional*, defaults to `None`):
251
+ The framework (PyTorch or TensorFlow) that the processor will generate tensors for.
252
+ num_channels (`int`, *optional*, defaults to 3):
253
+ The number of channels of the generated images.
254
+ image_width (`int`, *optional*, defaults to 40):
255
+ The width of the generated images.
256
+ image_height (`int`, *optional*, defaults to 40):
257
+ The height of the generated images.
258
+
259
+ Returns:
260
+ Mapping[str, Any]: holding the kwargs to provide to the model's forward function
261
+ """
262
+
263
+ # A dummy image is used so OCR should not be applied
264
+ setattr(processor.image_processor, "apply_ocr", False)
265
+
266
+ # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
267
+ batch_size = compute_effective_axis_dimension(
268
+ batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0
269
+ )
270
+ # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
271
+ token_to_add = processor.tokenizer.num_special_tokens_to_add(is_pair)
272
+ seq_length = compute_effective_axis_dimension(
273
+ seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add
274
+ )
275
+ # Generate dummy inputs according to compute batch and sequence
276
+ dummy_text = [[" ".join([processor.tokenizer.unk_token]) * seq_length]] * batch_size
277
+
278
+ # Generate dummy bounding boxes
279
+ dummy_bboxes = [[[48, 84, 73, 128]]] * batch_size
280
+
281
+ # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
282
+ # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
283
+ dummy_image = self._generate_dummy_images(batch_size, num_channels, image_height, image_width)
284
+
285
+ inputs = dict(
286
+ processor(
287
+ dummy_image,
288
+ text=dummy_text,
289
+ boxes=dummy_bboxes,
290
+ return_tensors=framework,
291
+ )
292
+ )
293
+
294
+ return inputs
mgm/lib/python3.10/site-packages/transformers/models/layoutlmv3/image_processing_layoutlmv3.py ADDED
@@ -0,0 +1,366 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for LayoutLMv3."""
16
+
17
+ from typing import Dict, Iterable, Optional, Union
18
+
19
+ import numpy as np
20
+
21
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
22
+ from ...image_transforms import resize, to_channel_dimension_format, to_pil_image
23
+ from ...image_utils import (
24
+ IMAGENET_STANDARD_MEAN,
25
+ IMAGENET_STANDARD_STD,
26
+ ChannelDimension,
27
+ ImageInput,
28
+ PILImageResampling,
29
+ infer_channel_dimension_format,
30
+ is_scaled_image,
31
+ make_list_of_images,
32
+ to_numpy_array,
33
+ valid_images,
34
+ )
35
+ from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
36
+
37
+
38
+ if is_vision_available():
39
+ import PIL
40
+
41
+ # soft dependency
42
+ if is_pytesseract_available():
43
+ import pytesseract
44
+
45
+ logger = logging.get_logger(__name__)
46
+
47
+
48
+ def normalize_box(box, width, height):
49
+ return [
50
+ int(1000 * (box[0] / width)),
51
+ int(1000 * (box[1] / height)),
52
+ int(1000 * (box[2] / width)),
53
+ int(1000 * (box[3] / height)),
54
+ ]
55
+
56
+
57
+ def apply_tesseract(
58
+ image: np.ndarray,
59
+ lang: Optional[str],
60
+ tesseract_config: Optional[str],
61
+ input_data_format: Optional[Union[ChannelDimension, str]] = None,
62
+ ):
63
+ """Applies Tesseract OCR on a document image, and returns recognized words + normalized bounding boxes."""
64
+
65
+ # apply OCR
66
+ pil_image = to_pil_image(image, input_data_format=input_data_format)
67
+ image_width, image_height = pil_image.size
68
+ data = pytesseract.image_to_data(pil_image, lang=lang, output_type="dict", config=tesseract_config)
69
+ words, left, top, width, height = data["text"], data["left"], data["top"], data["width"], data["height"]
70
+
71
+ # filter empty words and corresponding coordinates
72
+ irrelevant_indices = [idx for idx, word in enumerate(words) if not word.strip()]
73
+ words = [word for idx, word in enumerate(words) if idx not in irrelevant_indices]
74
+ left = [coord for idx, coord in enumerate(left) if idx not in irrelevant_indices]
75
+ top = [coord for idx, coord in enumerate(top) if idx not in irrelevant_indices]
76
+ width = [coord for idx, coord in enumerate(width) if idx not in irrelevant_indices]
77
+ height = [coord for idx, coord in enumerate(height) if idx not in irrelevant_indices]
78
+
79
+ # turn coordinates into (left, top, left+width, top+height) format
80
+ actual_boxes = []
81
+ for x, y, w, h in zip(left, top, width, height):
82
+ actual_box = [x, y, x + w, y + h]
83
+ actual_boxes.append(actual_box)
84
+
85
+ # finally, normalize the bounding boxes
86
+ normalized_boxes = []
87
+ for box in actual_boxes:
88
+ normalized_boxes.append(normalize_box(box, image_width, image_height))
89
+
90
+ assert len(words) == len(normalized_boxes), "Not as many words as there are bounding boxes"
91
+
92
+ return words, normalized_boxes
93
+
94
+
95
+ class LayoutLMv3ImageProcessor(BaseImageProcessor):
96
+ r"""
97
+ Constructs a LayoutLMv3 image processor.
98
+
99
+ Args:
100
+ do_resize (`bool`, *optional*, defaults to `True`):
101
+ Whether to resize the image's (height, width) dimensions to `(size["height"], size["width"])`. Can be
102
+ overridden by `do_resize` in `preprocess`.
103
+ size (`Dict[str, int]` *optional*, defaults to `{"height": 224, "width": 224}`):
104
+ Size of the image after resizing. Can be overridden by `size` in `preprocess`.
105
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
106
+ Resampling filter to use if resizing the image. Can be overridden by `resample` in `preprocess`.
107
+ do_rescale (`bool`, *optional*, defaults to `True`):
108
+ Whether to rescale the image's pixel values by the specified `rescale_value`. Can be overridden by
109
+ `do_rescale` in `preprocess`.
110
+ rescale_factor (`float`, *optional*, defaults to 1 / 255):
111
+ Value by which the image's pixel values are rescaled. Can be overridden by `rescale_factor` in
112
+ `preprocess`.
113
+ do_normalize (`bool`, *optional*, defaults to `True`):
114
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
115
+ method.
116
+ image_mean (`Iterable[float]` or `float`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
117
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
118
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
119
+ image_std (`Iterable[float]` or `float`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
120
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
121
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
122
+ apply_ocr (`bool`, *optional*, defaults to `True`):
123
+ Whether to apply the Tesseract OCR engine to get words + normalized bounding boxes. Can be overridden by
124
+ the `apply_ocr` parameter in the `preprocess` method.
125
+ ocr_lang (`str`, *optional*):
126
+ The language, specified by its ISO code, to be used by the Tesseract OCR engine. By default, English is
127
+ used. Can be overridden by the `ocr_lang` parameter in the `preprocess` method.
128
+ tesseract_config (`str`, *optional*):
129
+ Any additional custom configuration flags that are forwarded to the `config` parameter when calling
130
+ Tesseract. For example: '--psm 6'. Can be overridden by the `tesseract_config` parameter in the
131
+ `preprocess` method.
132
+ """
133
+
134
+ model_input_names = ["pixel_values"]
135
+
136
+ def __init__(
137
+ self,
138
+ do_resize: bool = True,
139
+ size: Dict[str, int] = None,
140
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
141
+ do_rescale: bool = True,
142
+ rescale_value: float = 1 / 255,
143
+ do_normalize: bool = True,
144
+ image_mean: Union[float, Iterable[float]] = None,
145
+ image_std: Union[float, Iterable[float]] = None,
146
+ apply_ocr: bool = True,
147
+ ocr_lang: Optional[str] = None,
148
+ tesseract_config: Optional[str] = "",
149
+ **kwargs,
150
+ ) -> None:
151
+ super().__init__(**kwargs)
152
+ size = size if size is not None else {"height": 224, "width": 224}
153
+ size = get_size_dict(size)
154
+
155
+ self.do_resize = do_resize
156
+ self.size = size
157
+ self.resample = resample
158
+ self.do_rescale = do_rescale
159
+ self.rescale_factor = rescale_value
160
+ self.do_normalize = do_normalize
161
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
162
+ self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
163
+ self.apply_ocr = apply_ocr
164
+ self.ocr_lang = ocr_lang
165
+ self.tesseract_config = tesseract_config
166
+
167
+ # Copied from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize
168
+ def resize(
169
+ self,
170
+ image: np.ndarray,
171
+ size: Dict[str, int],
172
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
173
+ data_format: Optional[Union[str, ChannelDimension]] = None,
174
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
175
+ **kwargs,
176
+ ) -> np.ndarray:
177
+ """
178
+ Resize an image to `(size["height"], size["width"])`.
179
+
180
+ Args:
181
+ image (`np.ndarray`):
182
+ Image to resize.
183
+ size (`Dict[str, int]`):
184
+ Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
185
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
186
+ `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
187
+ data_format (`ChannelDimension` or `str`, *optional*):
188
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
189
+ image is used. Can be one of:
190
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
191
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
192
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
193
+ input_data_format (`ChannelDimension` or `str`, *optional*):
194
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
195
+ from the input image. Can be one of:
196
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
197
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
198
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
199
+
200
+ Returns:
201
+ `np.ndarray`: The resized image.
202
+ """
203
+ size = get_size_dict(size)
204
+ if "height" not in size or "width" not in size:
205
+ raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}")
206
+ output_size = (size["height"], size["width"])
207
+ return resize(
208
+ image,
209
+ size=output_size,
210
+ resample=resample,
211
+ data_format=data_format,
212
+ input_data_format=input_data_format,
213
+ **kwargs,
214
+ )
215
+
216
+ def preprocess(
217
+ self,
218
+ images: ImageInput,
219
+ do_resize: bool = None,
220
+ size: Dict[str, int] = None,
221
+ resample=None,
222
+ do_rescale: bool = None,
223
+ rescale_factor: float = None,
224
+ do_normalize: bool = None,
225
+ image_mean: Union[float, Iterable[float]] = None,
226
+ image_std: Union[float, Iterable[float]] = None,
227
+ apply_ocr: bool = None,
228
+ ocr_lang: Optional[str] = None,
229
+ tesseract_config: Optional[str] = None,
230
+ return_tensors: Optional[Union[str, TensorType]] = None,
231
+ data_format: ChannelDimension = ChannelDimension.FIRST,
232
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
233
+ **kwargs,
234
+ ) -> PIL.Image.Image:
235
+ """
236
+ Preprocess an image or batch of images.
237
+
238
+ Args:
239
+ images (`ImageInput`):
240
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
241
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
242
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
243
+ Whether to resize the image.
244
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
245
+ Desired size of the output image after applying `resize`.
246
+ resample (`int`, *optional*, defaults to `self.resample`):
247
+ Resampling filter to use if resizing the image. This can be one of the `PILImageResampling` filters.
248
+ Only has an effect if `do_resize` is set to `True`.
249
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
250
+ Whether to rescale the image pixel values between [0, 1].
251
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
252
+ Rescale factor to apply to the image pixel values. Only has an effect if `do_rescale` is set to `True`.
253
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
254
+ Whether to normalize the image.
255
+ image_mean (`float` or `Iterable[float]`, *optional*, defaults to `self.image_mean`):
256
+ Mean values to be used for normalization. Only has an effect if `do_normalize` is set to `True`.
257
+ image_std (`float` or `Iterable[float]`, *optional*, defaults to `self.image_std`):
258
+ Standard deviation values to be used for normalization. Only has an effect if `do_normalize` is set to
259
+ `True`.
260
+ apply_ocr (`bool`, *optional*, defaults to `self.apply_ocr`):
261
+ Whether to apply the Tesseract OCR engine to get words + normalized bounding boxes.
262
+ ocr_lang (`str`, *optional*, defaults to `self.ocr_lang`):
263
+ The language, specified by its ISO code, to be used by the Tesseract OCR engine. By default, English is
264
+ used.
265
+ tesseract_config (`str`, *optional*, defaults to `self.tesseract_config`):
266
+ Any additional custom configuration flags that are forwarded to the `config` parameter when calling
267
+ Tesseract.
268
+ return_tensors (`str` or `TensorType`, *optional*):
269
+ The type of tensors to return. Can be one of:
270
+ - Unset: Return a list of `np.ndarray`.
271
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
272
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
273
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
274
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
275
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
276
+ The channel dimension format for the output image. Can be one of:
277
+ - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
278
+ - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
279
+ input_data_format (`ChannelDimension` or `str`, *optional*):
280
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
281
+ from the input image. Can be one of:
282
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
283
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
284
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
285
+ """
286
+ do_resize = do_resize if do_resize is not None else self.do_resize
287
+ size = size if size is not None else self.size
288
+ size = get_size_dict(size)
289
+ resample = resample if resample is not None else self.resample
290
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
291
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
292
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
293
+ image_mean = image_mean if image_mean is not None else self.image_mean
294
+ image_std = image_std if image_std is not None else self.image_std
295
+ apply_ocr = apply_ocr if apply_ocr is not None else self.apply_ocr
296
+ ocr_lang = ocr_lang if ocr_lang is not None else self.ocr_lang
297
+ tesseract_config = tesseract_config if tesseract_config is not None else self.tesseract_config
298
+
299
+ images = make_list_of_images(images)
300
+
301
+ if not valid_images(images):
302
+ raise ValueError(
303
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
304
+ "torch.Tensor, tf.Tensor or jax.ndarray."
305
+ )
306
+
307
+ if do_resize and size is None:
308
+ raise ValueError("Size must be specified if do_resize is True.")
309
+
310
+ if do_rescale and rescale_factor is None:
311
+ raise ValueError("Rescale factor must be specified if do_rescale is True.")
312
+
313
+ if do_normalize and (image_mean is None or image_std is None):
314
+ raise ValueError("If do_normalize is True, image_mean and image_std must be specified.")
315
+
316
+ # All transformations expect numpy arrays.
317
+ images = [to_numpy_array(image) for image in images]
318
+
319
+ if is_scaled_image(images[0]) and do_rescale:
320
+ logger.warning_once(
321
+ "It looks like you are trying to rescale already rescaled images. If the input"
322
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
323
+ )
324
+
325
+ if input_data_format is None:
326
+ # We assume that all images have the same channel dimension format.
327
+ input_data_format = infer_channel_dimension_format(images[0])
328
+
329
+ # Tesseract OCR to get words + normalized bounding boxes
330
+ if apply_ocr:
331
+ requires_backends(self, "pytesseract")
332
+ words_batch = []
333
+ boxes_batch = []
334
+ for image in images:
335
+ words, boxes = apply_tesseract(image, ocr_lang, tesseract_config, input_data_format=input_data_format)
336
+ words_batch.append(words)
337
+ boxes_batch.append(boxes)
338
+
339
+ if do_resize:
340
+ images = [
341
+ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
342
+ for image in images
343
+ ]
344
+
345
+ if do_rescale:
346
+ images = [
347
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
348
+ for image in images
349
+ ]
350
+
351
+ if do_normalize:
352
+ images = [
353
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
354
+ for image in images
355
+ ]
356
+
357
+ images = [
358
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
359
+ ]
360
+
361
+ data = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors)
362
+
363
+ if apply_ocr:
364
+ data["words"] = words_batch
365
+ data["boxes"] = boxes_batch
366
+ return data
mgm/lib/python3.10/site-packages/transformers/models/layoutlmv3/modeling_layoutlmv3.py ADDED
@@ -0,0 +1,1373 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Microsoft Research and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """PyTorch LayoutLMv3 model."""
16
+
17
+ import collections
18
+ import math
19
+ from typing import Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.nn as nn
23
+ import torch.nn.functional as F
24
+ import torch.utils.checkpoint
25
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
26
+
27
+ from ...activations import ACT2FN
28
+ from ...modeling_outputs import (
29
+ BaseModelOutput,
30
+ QuestionAnsweringModelOutput,
31
+ SequenceClassifierOutput,
32
+ TokenClassifierOutput,
33
+ )
34
+ from ...modeling_utils import PreTrainedModel
35
+ from ...pytorch_utils import apply_chunking_to_forward
36
+ from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
37
+ from .configuration_layoutlmv3 import LayoutLMv3Config
38
+
39
+
40
+ logger = logging.get_logger(__name__)
41
+
42
+ _CONFIG_FOR_DOC = "LayoutLMv3Config"
43
+
44
+ LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST = [
45
+ "microsoft/layoutlmv3-base",
46
+ "microsoft/layoutlmv3-large",
47
+ # See all LayoutLMv3 models at https://huggingface.co/models?filter=layoutlmv3
48
+ ]
49
+
50
+ LAYOUTLMV3_START_DOCSTRING = r"""
51
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
52
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
53
+ behavior.
54
+
55
+ Parameters:
56
+ config ([`LayoutLMv3Config`]): Model configuration class with all the parameters of the model.
57
+ Initializing with a config file does not load the weights associated with the model, only the
58
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
59
+ """
60
+
61
+ LAYOUTLMV3_MODEL_INPUTS_DOCSTRING = r"""
62
+ Args:
63
+ input_ids (`torch.LongTensor` of shape `({0})`):
64
+ Indices of input sequence tokens in the vocabulary.
65
+
66
+ Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]
67
+ token. See `pixel_values` for `patch_sequence_length`.
68
+
69
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
70
+ [`PreTrainedTokenizer.__call__`] for details.
71
+
72
+ [What are input IDs?](../glossary#input-ids)
73
+
74
+ bbox (`torch.LongTensor` of shape `({0}, 4)`, *optional*):
75
+ Bounding boxes of each input sequence tokens. Selected in the range `[0,
76
+ config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1)
77
+ format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1,
78
+ y1) represents the position of the lower right corner.
79
+
80
+ Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]
81
+ token. See `pixel_values` for `patch_sequence_length`.
82
+
83
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
84
+ Batch of document images. Each image is divided into patches of shape `(num_channels, config.patch_size,
85
+ config.patch_size)` and the total number of patches (=`patch_sequence_length`) equals to `((height /
86
+ config.patch_size) * (width / config.patch_size))`.
87
+
88
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
89
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
90
+
91
+ - 1 for tokens that are **not masked**,
92
+ - 0 for tokens that are **masked**.
93
+
94
+ Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]
95
+ token. See `pixel_values` for `patch_sequence_length`.
96
+
97
+ [What are attention masks?](../glossary#attention-mask)
98
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
99
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
100
+ 1]`:
101
+
102
+ - 0 corresponds to a *sentence A* token,
103
+ - 1 corresponds to a *sentence B* token.
104
+
105
+ Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]
106
+ token. See `pixel_values` for `patch_sequence_length`.
107
+
108
+ [What are token type IDs?](../glossary#token-type-ids)
109
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
110
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
111
+ config.max_position_embeddings - 1]`.
112
+
113
+ Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]
114
+ token. See `pixel_values` for `patch_sequence_length`.
115
+
116
+ [What are position IDs?](../glossary#position-ids)
117
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
118
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
119
+
120
+ - 1 indicates the head is **not masked**,
121
+ - 0 indicates the head is **masked**.
122
+
123
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
124
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
125
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
126
+ model's internal embedding lookup matrix.
127
+ output_attentions (`bool`, *optional*):
128
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
129
+ tensors for more detail.
130
+ output_hidden_states (`bool`, *optional*):
131
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
132
+ more detail.
133
+ return_dict (`bool`, *optional*):
134
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
135
+ """
136
+
137
+ LAYOUTLMV3_DOWNSTREAM_INPUTS_DOCSTRING = r"""
138
+ Args:
139
+ input_ids (`torch.LongTensor` of shape `({0})`):
140
+ Indices of input sequence tokens in the vocabulary.
141
+
142
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
143
+ [`PreTrainedTokenizer.__call__`] for details.
144
+
145
+ [What are input IDs?](../glossary#input-ids)
146
+
147
+ bbox (`torch.LongTensor` of shape `({0}, 4)`, *optional*):
148
+ Bounding boxes of each input sequence tokens. Selected in the range `[0,
149
+ config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1)
150
+ format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1,
151
+ y1) represents the position of the lower right corner.
152
+
153
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
154
+ Batch of document images. Each image is divided into patches of shape `(num_channels, config.patch_size,
155
+ config.patch_size)` and the total number of patches (=`patch_sequence_length`) equals to `((height /
156
+ config.patch_size) * (width / config.patch_size))`.
157
+
158
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
159
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
160
+
161
+ - 1 for tokens that are **not masked**,
162
+ - 0 for tokens that are **masked**.
163
+
164
+ [What are attention masks?](../glossary#attention-mask)
165
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
166
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
167
+ 1]`:
168
+
169
+ - 0 corresponds to a *sentence A* token,
170
+ - 1 corresponds to a *sentence B* token.
171
+
172
+ [What are token type IDs?](../glossary#token-type-ids)
173
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
174
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
175
+ config.max_position_embeddings - 1]`.
176
+
177
+ [What are position IDs?](../glossary#position-ids)
178
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
179
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
180
+
181
+ - 1 indicates the head is **not masked**,
182
+ - 0 indicates the head is **masked**.
183
+
184
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
185
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
186
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
187
+ model's internal embedding lookup matrix.
188
+ output_attentions (`bool`, *optional*):
189
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
190
+ tensors for more detail.
191
+ output_hidden_states (`bool`, *optional*):
192
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
193
+ more detail.
194
+ return_dict (`bool`, *optional*):
195
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
196
+ """
197
+
198
+
199
+ class LayoutLMv3PatchEmbeddings(nn.Module):
200
+ """LayoutLMv3 image (patch) embeddings. This class also automatically interpolates the position embeddings for varying
201
+ image sizes."""
202
+
203
+ def __init__(self, config):
204
+ super().__init__()
205
+
206
+ image_size = (
207
+ config.input_size
208
+ if isinstance(config.input_size, collections.abc.Iterable)
209
+ else (config.input_size, config.input_size)
210
+ )
211
+ patch_size = (
212
+ config.patch_size
213
+ if isinstance(config.patch_size, collections.abc.Iterable)
214
+ else (config.patch_size, config.patch_size)
215
+ )
216
+ self.patch_shape = (image_size[0] // patch_size[0], image_size[1] // patch_size[1])
217
+ self.proj = nn.Conv2d(config.num_channels, config.hidden_size, kernel_size=patch_size, stride=patch_size)
218
+
219
+ def forward(self, pixel_values, position_embedding=None):
220
+ embeddings = self.proj(pixel_values)
221
+
222
+ if position_embedding is not None:
223
+ # interpolate the position embedding to the corresponding size
224
+ position_embedding = position_embedding.view(1, self.patch_shape[0], self.patch_shape[1], -1)
225
+ position_embedding = position_embedding.permute(0, 3, 1, 2)
226
+ patch_height, patch_width = embeddings.shape[2], embeddings.shape[3]
227
+ position_embedding = F.interpolate(position_embedding, size=(patch_height, patch_width), mode="bicubic")
228
+ embeddings = embeddings + position_embedding
229
+
230
+ embeddings = embeddings.flatten(2).transpose(1, 2)
231
+ return embeddings
232
+
233
+
234
+ class LayoutLMv3TextEmbeddings(nn.Module):
235
+ """
236
+ LayoutLMv3 text embeddings. Same as `RobertaEmbeddings` but with added spatial (layout) embeddings.
237
+ """
238
+
239
+ def __init__(self, config):
240
+ super().__init__()
241
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
242
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
243
+
244
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
245
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
246
+
247
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
248
+ self.register_buffer(
249
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
250
+ )
251
+
252
+ self.padding_idx = config.pad_token_id
253
+ self.position_embeddings = nn.Embedding(
254
+ config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
255
+ )
256
+
257
+ self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.coordinate_size)
258
+ self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.coordinate_size)
259
+ self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.shape_size)
260
+ self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.shape_size)
261
+
262
+ def calculate_spatial_position_embeddings(self, bbox):
263
+ try:
264
+ left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0])
265
+ upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1])
266
+ right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2])
267
+ lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3])
268
+ except IndexError as e:
269
+ raise IndexError("The `bbox` coordinate values should be within 0-1000 range.") from e
270
+
271
+ h_position_embeddings = self.h_position_embeddings(torch.clip(bbox[:, :, 3] - bbox[:, :, 1], 0, 1023))
272
+ w_position_embeddings = self.w_position_embeddings(torch.clip(bbox[:, :, 2] - bbox[:, :, 0], 0, 1023))
273
+
274
+ # below is the difference between LayoutLMEmbeddingsV2 (torch.cat) and LayoutLMEmbeddingsV1 (add)
275
+ spatial_position_embeddings = torch.cat(
276
+ [
277
+ left_position_embeddings,
278
+ upper_position_embeddings,
279
+ right_position_embeddings,
280
+ lower_position_embeddings,
281
+ h_position_embeddings,
282
+ w_position_embeddings,
283
+ ],
284
+ dim=-1,
285
+ )
286
+ return spatial_position_embeddings
287
+
288
+ def create_position_ids_from_input_ids(self, input_ids, padding_idx):
289
+ """
290
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding
291
+ symbols are ignored. This is modified from fairseq's `utils.make_positions`.
292
+ """
293
+ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
294
+ mask = input_ids.ne(padding_idx).int()
295
+ incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask)) * mask
296
+ return incremental_indices.long() + padding_idx
297
+
298
+ def create_position_ids_from_inputs_embeds(self, inputs_embeds):
299
+ """
300
+ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
301
+ """
302
+ input_shape = inputs_embeds.size()[:-1]
303
+ sequence_length = input_shape[1]
304
+
305
+ position_ids = torch.arange(
306
+ self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
307
+ )
308
+ return position_ids.unsqueeze(0).expand(input_shape)
309
+
310
+ def forward(
311
+ self,
312
+ input_ids=None,
313
+ bbox=None,
314
+ token_type_ids=None,
315
+ position_ids=None,
316
+ inputs_embeds=None,
317
+ ):
318
+ if position_ids is None:
319
+ if input_ids is not None:
320
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
321
+ position_ids = self.create_position_ids_from_input_ids(input_ids, self.padding_idx).to(
322
+ input_ids.device
323
+ )
324
+ else:
325
+ position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
326
+
327
+ if input_ids is not None:
328
+ input_shape = input_ids.size()
329
+ else:
330
+ input_shape = inputs_embeds.size()[:-1]
331
+
332
+ if token_type_ids is None:
333
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
334
+
335
+ if inputs_embeds is None:
336
+ inputs_embeds = self.word_embeddings(input_ids)
337
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
338
+
339
+ embeddings = inputs_embeds + token_type_embeddings
340
+ position_embeddings = self.position_embeddings(position_ids)
341
+ embeddings += position_embeddings
342
+
343
+ spatial_position_embeddings = self.calculate_spatial_position_embeddings(bbox)
344
+
345
+ embeddings = embeddings + spatial_position_embeddings
346
+
347
+ embeddings = self.LayerNorm(embeddings)
348
+ embeddings = self.dropout(embeddings)
349
+ return embeddings
350
+
351
+
352
+ class LayoutLMv3PreTrainedModel(PreTrainedModel):
353
+ """
354
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
355
+ models.
356
+ """
357
+
358
+ config_class = LayoutLMv3Config
359
+ base_model_prefix = "layoutlmv3"
360
+
361
+ def _init_weights(self, module):
362
+ """Initialize the weights"""
363
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
364
+ # Slightly different from the TF version which uses truncated_normal for initialization
365
+ # cf https://github.com/pytorch/pytorch/pull/5617
366
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
367
+ if module.bias is not None:
368
+ module.bias.data.zero_()
369
+ elif isinstance(module, nn.Embedding):
370
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
371
+ if module.padding_idx is not None:
372
+ module.weight.data[module.padding_idx].zero_()
373
+ elif isinstance(module, nn.LayerNorm):
374
+ module.bias.data.zero_()
375
+ module.weight.data.fill_(1.0)
376
+
377
+
378
+ class LayoutLMv3SelfAttention(nn.Module):
379
+ def __init__(self, config):
380
+ super().__init__()
381
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
382
+ raise ValueError(
383
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
384
+ f"heads ({config.num_attention_heads})"
385
+ )
386
+
387
+ self.num_attention_heads = config.num_attention_heads
388
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
389
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
390
+
391
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
392
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
393
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
394
+
395
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
396
+ self.has_relative_attention_bias = config.has_relative_attention_bias
397
+ self.has_spatial_attention_bias = config.has_spatial_attention_bias
398
+
399
+ def transpose_for_scores(self, x):
400
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
401
+ x = x.view(*new_x_shape)
402
+ return x.permute(0, 2, 1, 3)
403
+
404
+ def cogview_attention(self, attention_scores, alpha=32):
405
+ """
406
+ https://arxiv.org/abs/2105.13290 Section 2.4 Stabilization of training: Precision Bottleneck Relaxation
407
+ (PB-Relax). A replacement of the original nn.Softmax(dim=-1)(attention_scores). Seems the new attention_probs
408
+ will result in a slower speed and a little bias. Can use torch.allclose(standard_attention_probs,
409
+ cogview_attention_probs, atol=1e-08) for comparison. The smaller atol (e.g., 1e-08), the better.
410
+ """
411
+ scaled_attention_scores = attention_scores / alpha
412
+ max_value = scaled_attention_scores.amax(dim=(-1)).unsqueeze(-1)
413
+ new_attention_scores = (scaled_attention_scores - max_value) * alpha
414
+ return nn.Softmax(dim=-1)(new_attention_scores)
415
+
416
+ def forward(
417
+ self,
418
+ hidden_states,
419
+ attention_mask=None,
420
+ head_mask=None,
421
+ output_attentions=False,
422
+ rel_pos=None,
423
+ rel_2d_pos=None,
424
+ ):
425
+ mixed_query_layer = self.query(hidden_states)
426
+
427
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
428
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
429
+ query_layer = self.transpose_for_scores(mixed_query_layer)
430
+
431
+ # Take the dot product between "query" and "key" to get the raw attention scores.
432
+ # The attention scores QT K/√d could be significantly larger than input elements, and result in overflow.
433
+ # Changing the computational order into QT(K/√d) alleviates the problem. (https://arxiv.org/pdf/2105.13290.pdf)
434
+ attention_scores = torch.matmul(query_layer / math.sqrt(self.attention_head_size), key_layer.transpose(-1, -2))
435
+
436
+ if self.has_relative_attention_bias and self.has_spatial_attention_bias:
437
+ attention_scores += (rel_pos + rel_2d_pos) / math.sqrt(self.attention_head_size)
438
+ elif self.has_relative_attention_bias:
439
+ attention_scores += rel_pos / math.sqrt(self.attention_head_size)
440
+
441
+ if attention_mask is not None:
442
+ # Apply the attention mask is (precomputed for all layers in RobertaModel forward() function)
443
+ attention_scores = attention_scores + attention_mask
444
+
445
+ # Normalize the attention scores to probabilities.
446
+ # Use the trick of the CogView paper to stablize training
447
+ attention_probs = self.cogview_attention(attention_scores)
448
+
449
+ # This is actually dropping out entire tokens to attend to, which might
450
+ # seem a bit unusual, but is taken from the original Transformer paper.
451
+ attention_probs = self.dropout(attention_probs)
452
+
453
+ # Mask heads if we want to
454
+ if head_mask is not None:
455
+ attention_probs = attention_probs * head_mask
456
+
457
+ context_layer = torch.matmul(attention_probs, value_layer)
458
+
459
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
460
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
461
+ context_layer = context_layer.view(*new_context_layer_shape)
462
+
463
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
464
+
465
+ return outputs
466
+
467
+
468
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaSelfOutput
469
+ class LayoutLMv3SelfOutput(nn.Module):
470
+ def __init__(self, config):
471
+ super().__init__()
472
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
473
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
474
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
475
+
476
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
477
+ hidden_states = self.dense(hidden_states)
478
+ hidden_states = self.dropout(hidden_states)
479
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
480
+ return hidden_states
481
+
482
+
483
+ # Copied from transformers.models.layoutlmv2.modeling_layoutlmv2.LayoutLMv2Attention with LayoutLMv2->LayoutLMv3
484
+ class LayoutLMv3Attention(nn.Module):
485
+ def __init__(self, config):
486
+ super().__init__()
487
+ self.self = LayoutLMv3SelfAttention(config)
488
+ self.output = LayoutLMv3SelfOutput(config)
489
+
490
+ def forward(
491
+ self,
492
+ hidden_states,
493
+ attention_mask=None,
494
+ head_mask=None,
495
+ output_attentions=False,
496
+ rel_pos=None,
497
+ rel_2d_pos=None,
498
+ ):
499
+ self_outputs = self.self(
500
+ hidden_states,
501
+ attention_mask,
502
+ head_mask,
503
+ output_attentions,
504
+ rel_pos=rel_pos,
505
+ rel_2d_pos=rel_2d_pos,
506
+ )
507
+ attention_output = self.output(self_outputs[0], hidden_states)
508
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
509
+ return outputs
510
+
511
+
512
+ # Copied from transformers.models.layoutlmv2.modeling_layoutlmv2.LayoutLMv2Layer with LayoutLMv2->LayoutLMv3
513
+ class LayoutLMv3Layer(nn.Module):
514
+ def __init__(self, config):
515
+ super().__init__()
516
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
517
+ self.seq_len_dim = 1
518
+ self.attention = LayoutLMv3Attention(config)
519
+ self.intermediate = LayoutLMv3Intermediate(config)
520
+ self.output = LayoutLMv3Output(config)
521
+
522
+ def forward(
523
+ self,
524
+ hidden_states,
525
+ attention_mask=None,
526
+ head_mask=None,
527
+ output_attentions=False,
528
+ rel_pos=None,
529
+ rel_2d_pos=None,
530
+ ):
531
+ self_attention_outputs = self.attention(
532
+ hidden_states,
533
+ attention_mask,
534
+ head_mask,
535
+ output_attentions=output_attentions,
536
+ rel_pos=rel_pos,
537
+ rel_2d_pos=rel_2d_pos,
538
+ )
539
+ attention_output = self_attention_outputs[0]
540
+
541
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
542
+
543
+ layer_output = apply_chunking_to_forward(
544
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
545
+ )
546
+ outputs = (layer_output,) + outputs
547
+
548
+ return outputs
549
+
550
+ def feed_forward_chunk(self, attention_output):
551
+ intermediate_output = self.intermediate(attention_output)
552
+ layer_output = self.output(intermediate_output, attention_output)
553
+ return layer_output
554
+
555
+
556
+ class LayoutLMv3Encoder(nn.Module):
557
+ def __init__(self, config):
558
+ super().__init__()
559
+ self.config = config
560
+ self.layer = nn.ModuleList([LayoutLMv3Layer(config) for _ in range(config.num_hidden_layers)])
561
+ self.gradient_checkpointing = False
562
+
563
+ self.has_relative_attention_bias = config.has_relative_attention_bias
564
+ self.has_spatial_attention_bias = config.has_spatial_attention_bias
565
+
566
+ if self.has_relative_attention_bias:
567
+ self.rel_pos_bins = config.rel_pos_bins
568
+ self.max_rel_pos = config.max_rel_pos
569
+ self.rel_pos_bias = nn.Linear(self.rel_pos_bins, config.num_attention_heads, bias=False)
570
+
571
+ if self.has_spatial_attention_bias:
572
+ self.max_rel_2d_pos = config.max_rel_2d_pos
573
+ self.rel_2d_pos_bins = config.rel_2d_pos_bins
574
+ self.rel_pos_x_bias = nn.Linear(self.rel_2d_pos_bins, config.num_attention_heads, bias=False)
575
+ self.rel_pos_y_bias = nn.Linear(self.rel_2d_pos_bins, config.num_attention_heads, bias=False)
576
+
577
+ def relative_position_bucket(self, relative_position, bidirectional=True, num_buckets=32, max_distance=128):
578
+ ret = 0
579
+ if bidirectional:
580
+ num_buckets //= 2
581
+ ret += (relative_position > 0).long() * num_buckets
582
+ n = torch.abs(relative_position)
583
+ else:
584
+ n = torch.max(-relative_position, torch.zeros_like(relative_position))
585
+ # now n is in the range [0, inf)
586
+
587
+ # half of the buckets are for exact increments in positions
588
+ max_exact = num_buckets // 2
589
+ is_small = n < max_exact
590
+
591
+ # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
592
+ val_if_large = max_exact + (
593
+ torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)
594
+ ).to(torch.long)
595
+ val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
596
+
597
+ ret += torch.where(is_small, n, val_if_large)
598
+ return ret
599
+
600
+ def _cal_1d_pos_emb(self, position_ids):
601
+ rel_pos_mat = position_ids.unsqueeze(-2) - position_ids.unsqueeze(-1)
602
+
603
+ rel_pos = self.relative_position_bucket(
604
+ rel_pos_mat,
605
+ num_buckets=self.rel_pos_bins,
606
+ max_distance=self.max_rel_pos,
607
+ )
608
+ rel_pos = self.rel_pos_bias.weight.t()[rel_pos].permute(0, 3, 1, 2)
609
+ rel_pos = rel_pos.contiguous()
610
+ return rel_pos
611
+
612
+ def _cal_2d_pos_emb(self, bbox):
613
+ position_coord_x = bbox[:, :, 0]
614
+ position_coord_y = bbox[:, :, 3]
615
+ rel_pos_x_2d_mat = position_coord_x.unsqueeze(-2) - position_coord_x.unsqueeze(-1)
616
+ rel_pos_y_2d_mat = position_coord_y.unsqueeze(-2) - position_coord_y.unsqueeze(-1)
617
+ rel_pos_x = self.relative_position_bucket(
618
+ rel_pos_x_2d_mat,
619
+ num_buckets=self.rel_2d_pos_bins,
620
+ max_distance=self.max_rel_2d_pos,
621
+ )
622
+ rel_pos_y = self.relative_position_bucket(
623
+ rel_pos_y_2d_mat,
624
+ num_buckets=self.rel_2d_pos_bins,
625
+ max_distance=self.max_rel_2d_pos,
626
+ )
627
+ rel_pos_x = self.rel_pos_x_bias.weight.t()[rel_pos_x].permute(0, 3, 1, 2)
628
+ rel_pos_y = self.rel_pos_y_bias.weight.t()[rel_pos_y].permute(0, 3, 1, 2)
629
+ rel_pos_x = rel_pos_x.contiguous()
630
+ rel_pos_y = rel_pos_y.contiguous()
631
+ rel_2d_pos = rel_pos_x + rel_pos_y
632
+ return rel_2d_pos
633
+
634
+ def forward(
635
+ self,
636
+ hidden_states,
637
+ bbox=None,
638
+ attention_mask=None,
639
+ head_mask=None,
640
+ output_attentions=False,
641
+ output_hidden_states=False,
642
+ return_dict=True,
643
+ position_ids=None,
644
+ patch_height=None,
645
+ patch_width=None,
646
+ ):
647
+ all_hidden_states = () if output_hidden_states else None
648
+ all_self_attentions = () if output_attentions else None
649
+
650
+ rel_pos = self._cal_1d_pos_emb(position_ids) if self.has_relative_attention_bias else None
651
+ rel_2d_pos = self._cal_2d_pos_emb(bbox) if self.has_spatial_attention_bias else None
652
+
653
+ for i, layer_module in enumerate(self.layer):
654
+ if output_hidden_states:
655
+ all_hidden_states = all_hidden_states + (hidden_states,)
656
+
657
+ layer_head_mask = head_mask[i] if head_mask is not None else None
658
+
659
+ if self.gradient_checkpointing and self.training:
660
+ layer_outputs = self._gradient_checkpointing_func(
661
+ layer_module.__call__,
662
+ hidden_states,
663
+ attention_mask,
664
+ layer_head_mask,
665
+ output_attentions,
666
+ rel_pos,
667
+ rel_2d_pos,
668
+ )
669
+ else:
670
+ layer_outputs = layer_module(
671
+ hidden_states,
672
+ attention_mask,
673
+ layer_head_mask,
674
+ output_attentions,
675
+ rel_pos=rel_pos,
676
+ rel_2d_pos=rel_2d_pos,
677
+ )
678
+
679
+ hidden_states = layer_outputs[0]
680
+ if output_attentions:
681
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
682
+
683
+ if output_hidden_states:
684
+ all_hidden_states = all_hidden_states + (hidden_states,)
685
+
686
+ if not return_dict:
687
+ return tuple(
688
+ v
689
+ for v in [
690
+ hidden_states,
691
+ all_hidden_states,
692
+ all_self_attentions,
693
+ ]
694
+ if v is not None
695
+ )
696
+ return BaseModelOutput(
697
+ last_hidden_state=hidden_states,
698
+ hidden_states=all_hidden_states,
699
+ attentions=all_self_attentions,
700
+ )
701
+
702
+
703
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaIntermediate
704
+ class LayoutLMv3Intermediate(nn.Module):
705
+ def __init__(self, config):
706
+ super().__init__()
707
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
708
+ if isinstance(config.hidden_act, str):
709
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
710
+ else:
711
+ self.intermediate_act_fn = config.hidden_act
712
+
713
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
714
+ hidden_states = self.dense(hidden_states)
715
+ hidden_states = self.intermediate_act_fn(hidden_states)
716
+ return hidden_states
717
+
718
+
719
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaOutput
720
+ class LayoutLMv3Output(nn.Module):
721
+ def __init__(self, config):
722
+ super().__init__()
723
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
724
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
725
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
726
+
727
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
728
+ hidden_states = self.dense(hidden_states)
729
+ hidden_states = self.dropout(hidden_states)
730
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
731
+ return hidden_states
732
+
733
+
734
+ @add_start_docstrings(
735
+ "The bare LayoutLMv3 Model transformer outputting raw hidden-states without any specific head on top.",
736
+ LAYOUTLMV3_START_DOCSTRING,
737
+ )
738
+ class LayoutLMv3Model(LayoutLMv3PreTrainedModel):
739
+ def __init__(self, config):
740
+ super().__init__(config)
741
+ self.config = config
742
+
743
+ if config.text_embed:
744
+ self.embeddings = LayoutLMv3TextEmbeddings(config)
745
+
746
+ if config.visual_embed:
747
+ # use the default pre-training parameters for fine-tuning (e.g., input_size)
748
+ # when the input_size is larger in fine-tuning, we will interpolate the position embeddings in forward
749
+ self.patch_embed = LayoutLMv3PatchEmbeddings(config)
750
+
751
+ size = int(config.input_size / config.patch_size)
752
+ self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
753
+ self.pos_embed = nn.Parameter(torch.zeros(1, size * size + 1, config.hidden_size))
754
+ self.pos_drop = nn.Dropout(p=0.0)
755
+
756
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
757
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
758
+
759
+ if self.config.has_relative_attention_bias or self.config.has_spatial_attention_bias:
760
+ self.init_visual_bbox(image_size=(size, size))
761
+
762
+ self.norm = nn.LayerNorm(config.hidden_size, eps=1e-6)
763
+
764
+ self.encoder = LayoutLMv3Encoder(config)
765
+
766
+ self.init_weights()
767
+
768
+ def get_input_embeddings(self):
769
+ return self.embeddings.word_embeddings
770
+
771
+ def set_input_embeddings(self, value):
772
+ self.embeddings.word_embeddings = value
773
+
774
+ def _prune_heads(self, heads_to_prune):
775
+ """
776
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
777
+ class PreTrainedModel
778
+ """
779
+ for layer, heads in heads_to_prune.items():
780
+ self.encoder.layer[layer].attention.prune_heads(heads)
781
+
782
+ def init_visual_bbox(self, image_size=(14, 14), max_len=1000):
783
+ """
784
+ Create the bounding boxes for the visual (patch) tokens.
785
+ """
786
+ visual_bbox_x = torch.div(
787
+ torch.arange(0, max_len * (image_size[1] + 1), max_len), image_size[1], rounding_mode="trunc"
788
+ )
789
+ visual_bbox_y = torch.div(
790
+ torch.arange(0, max_len * (image_size[0] + 1), max_len), image_size[0], rounding_mode="trunc"
791
+ )
792
+ visual_bbox = torch.stack(
793
+ [
794
+ visual_bbox_x[:-1].repeat(image_size[0], 1),
795
+ visual_bbox_y[:-1].repeat(image_size[1], 1).transpose(0, 1),
796
+ visual_bbox_x[1:].repeat(image_size[0], 1),
797
+ visual_bbox_y[1:].repeat(image_size[1], 1).transpose(0, 1),
798
+ ],
799
+ dim=-1,
800
+ ).view(-1, 4)
801
+
802
+ cls_token_box = torch.tensor([[0 + 1, 0 + 1, max_len - 1, max_len - 1]])
803
+ self.visual_bbox = torch.cat([cls_token_box, visual_bbox], dim=0)
804
+
805
+ def calculate_visual_bbox(self, device, dtype, batch_size):
806
+ visual_bbox = self.visual_bbox.repeat(batch_size, 1, 1)
807
+ visual_bbox = visual_bbox.to(device).type(dtype)
808
+ return visual_bbox
809
+
810
+ def forward_image(self, pixel_values):
811
+ embeddings = self.patch_embed(pixel_values)
812
+
813
+ # add [CLS] token
814
+ batch_size, seq_len, _ = embeddings.size()
815
+ cls_tokens = self.cls_token.expand(batch_size, -1, -1)
816
+ embeddings = torch.cat((cls_tokens, embeddings), dim=1)
817
+
818
+ # add position embeddings
819
+ if self.pos_embed is not None:
820
+ embeddings = embeddings + self.pos_embed
821
+
822
+ embeddings = self.pos_drop(embeddings)
823
+ embeddings = self.norm(embeddings)
824
+
825
+ return embeddings
826
+
827
+ @add_start_docstrings_to_model_forward(
828
+ LAYOUTLMV3_MODEL_INPUTS_DOCSTRING.format("batch_size, token_sequence_length")
829
+ )
830
+ @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC)
831
+ def forward(
832
+ self,
833
+ input_ids: Optional[torch.LongTensor] = None,
834
+ bbox: Optional[torch.LongTensor] = None,
835
+ attention_mask: Optional[torch.FloatTensor] = None,
836
+ token_type_ids: Optional[torch.LongTensor] = None,
837
+ position_ids: Optional[torch.LongTensor] = None,
838
+ head_mask: Optional[torch.FloatTensor] = None,
839
+ inputs_embeds: Optional[torch.FloatTensor] = None,
840
+ pixel_values: Optional[torch.FloatTensor] = None,
841
+ output_attentions: Optional[bool] = None,
842
+ output_hidden_states: Optional[bool] = None,
843
+ return_dict: Optional[bool] = None,
844
+ ) -> Union[Tuple, BaseModelOutput]:
845
+ r"""
846
+ Returns:
847
+
848
+ Examples:
849
+
850
+ ```python
851
+ >>> from transformers import AutoProcessor, AutoModel
852
+ >>> from datasets import load_dataset
853
+
854
+ >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False)
855
+ >>> model = AutoModel.from_pretrained("microsoft/layoutlmv3-base")
856
+
857
+ >>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
858
+ >>> example = dataset[0]
859
+ >>> image = example["image"]
860
+ >>> words = example["tokens"]
861
+ >>> boxes = example["bboxes"]
862
+
863
+ >>> encoding = processor(image, words, boxes=boxes, return_tensors="pt")
864
+
865
+ >>> outputs = model(**encoding)
866
+ >>> last_hidden_states = outputs.last_hidden_state
867
+ ```"""
868
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
869
+ output_hidden_states = (
870
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
871
+ )
872
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
873
+
874
+ if input_ids is not None:
875
+ input_shape = input_ids.size()
876
+ batch_size, seq_length = input_shape
877
+ device = input_ids.device
878
+ elif inputs_embeds is not None:
879
+ input_shape = inputs_embeds.size()[:-1]
880
+ batch_size, seq_length = input_shape
881
+ device = inputs_embeds.device
882
+ elif pixel_values is not None:
883
+ batch_size = len(pixel_values)
884
+ device = pixel_values.device
885
+ else:
886
+ raise ValueError("You have to specify either input_ids or inputs_embeds or pixel_values")
887
+
888
+ if input_ids is not None or inputs_embeds is not None:
889
+ if attention_mask is None:
890
+ attention_mask = torch.ones(((batch_size, seq_length)), device=device)
891
+ if token_type_ids is None:
892
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
893
+ if bbox is None:
894
+ bbox = torch.zeros(tuple(list(input_shape) + [4]), dtype=torch.long, device=device)
895
+
896
+ embedding_output = self.embeddings(
897
+ input_ids=input_ids,
898
+ bbox=bbox,
899
+ position_ids=position_ids,
900
+ token_type_ids=token_type_ids,
901
+ inputs_embeds=inputs_embeds,
902
+ )
903
+
904
+ final_bbox = final_position_ids = None
905
+ patch_height = patch_width = None
906
+ if pixel_values is not None:
907
+ patch_height, patch_width = (
908
+ int(pixel_values.shape[2] / self.config.patch_size),
909
+ int(pixel_values.shape[3] / self.config.patch_size),
910
+ )
911
+ visual_embeddings = self.forward_image(pixel_values)
912
+ visual_attention_mask = torch.ones(
913
+ (batch_size, visual_embeddings.shape[1]), dtype=torch.long, device=device
914
+ )
915
+ if attention_mask is not None:
916
+ attention_mask = torch.cat([attention_mask, visual_attention_mask], dim=1)
917
+ else:
918
+ attention_mask = visual_attention_mask
919
+
920
+ if self.config.has_relative_attention_bias or self.config.has_spatial_attention_bias:
921
+ if self.config.has_spatial_attention_bias:
922
+ visual_bbox = self.calculate_visual_bbox(device, dtype=torch.long, batch_size=batch_size)
923
+ if bbox is not None:
924
+ final_bbox = torch.cat([bbox, visual_bbox], dim=1)
925
+ else:
926
+ final_bbox = visual_bbox
927
+
928
+ visual_position_ids = torch.arange(
929
+ 0, visual_embeddings.shape[1], dtype=torch.long, device=device
930
+ ).repeat(batch_size, 1)
931
+ if input_ids is not None or inputs_embeds is not None:
932
+ position_ids = torch.arange(0, input_shape[1], device=device).unsqueeze(0)
933
+ position_ids = position_ids.expand(input_shape)
934
+ final_position_ids = torch.cat([position_ids, visual_position_ids], dim=1)
935
+ else:
936
+ final_position_ids = visual_position_ids
937
+
938
+ if input_ids is not None or inputs_embeds is not None:
939
+ embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)
940
+ else:
941
+ embedding_output = visual_embeddings
942
+
943
+ embedding_output = self.LayerNorm(embedding_output)
944
+ embedding_output = self.dropout(embedding_output)
945
+ elif self.config.has_relative_attention_bias or self.config.has_spatial_attention_bias:
946
+ if self.config.has_spatial_attention_bias:
947
+ final_bbox = bbox
948
+ if self.config.has_relative_attention_bias:
949
+ position_ids = self.embeddings.position_ids[:, : input_shape[1]]
950
+ position_ids = position_ids.expand_as(input_ids)
951
+ final_position_ids = position_ids
952
+
953
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(
954
+ attention_mask, None, device, dtype=embedding_output.dtype
955
+ )
956
+
957
+ # Prepare head mask if needed
958
+ # 1.0 in head_mask indicate we keep the head
959
+ # attention_probs has shape bsz x n_heads x N x N
960
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
961
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
962
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
963
+
964
+ encoder_outputs = self.encoder(
965
+ embedding_output,
966
+ bbox=final_bbox,
967
+ position_ids=final_position_ids,
968
+ attention_mask=extended_attention_mask,
969
+ head_mask=head_mask,
970
+ output_attentions=output_attentions,
971
+ output_hidden_states=output_hidden_states,
972
+ return_dict=return_dict,
973
+ patch_height=patch_height,
974
+ patch_width=patch_width,
975
+ )
976
+
977
+ sequence_output = encoder_outputs[0]
978
+
979
+ if not return_dict:
980
+ return (sequence_output,) + encoder_outputs[1:]
981
+
982
+ return BaseModelOutput(
983
+ last_hidden_state=sequence_output,
984
+ hidden_states=encoder_outputs.hidden_states,
985
+ attentions=encoder_outputs.attentions,
986
+ )
987
+
988
+
989
+ class LayoutLMv3ClassificationHead(nn.Module):
990
+ """
991
+ Head for sentence-level classification tasks. Reference: RobertaClassificationHead
992
+ """
993
+
994
+ def __init__(self, config, pool_feature=False):
995
+ super().__init__()
996
+ self.pool_feature = pool_feature
997
+ if pool_feature:
998
+ self.dense = nn.Linear(config.hidden_size * 3, config.hidden_size)
999
+ else:
1000
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
1001
+ classifier_dropout = (
1002
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1003
+ )
1004
+ self.dropout = nn.Dropout(classifier_dropout)
1005
+ self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
1006
+
1007
+ def forward(self, x):
1008
+ x = self.dropout(x)
1009
+ x = self.dense(x)
1010
+ x = torch.tanh(x)
1011
+ x = self.dropout(x)
1012
+ x = self.out_proj(x)
1013
+ return x
1014
+
1015
+
1016
+ @add_start_docstrings(
1017
+ """
1018
+ LayoutLMv3 Model with a token classification head on top (a linear layer on top of the final hidden states) e.g.
1019
+ for sequence labeling (information extraction) tasks such as [FUNSD](https://guillaumejaume.github.io/FUNSD/),
1020
+ [SROIE](https://rrc.cvc.uab.es/?ch=13), [CORD](https://github.com/clovaai/cord) and
1021
+ [Kleister-NDA](https://github.com/applicaai/kleister-nda).
1022
+ """,
1023
+ LAYOUTLMV3_START_DOCSTRING,
1024
+ )
1025
+ class LayoutLMv3ForTokenClassification(LayoutLMv3PreTrainedModel):
1026
+ def __init__(self, config):
1027
+ super().__init__(config)
1028
+ self.num_labels = config.num_labels
1029
+
1030
+ self.layoutlmv3 = LayoutLMv3Model(config)
1031
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1032
+ if config.num_labels < 10:
1033
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1034
+ else:
1035
+ self.classifier = LayoutLMv3ClassificationHead(config, pool_feature=False)
1036
+
1037
+ self.init_weights()
1038
+
1039
+ @add_start_docstrings_to_model_forward(
1040
+ LAYOUTLMV3_DOWNSTREAM_INPUTS_DOCSTRING.format("batch_size, sequence_length")
1041
+ )
1042
+ @replace_return_docstrings(output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC)
1043
+ def forward(
1044
+ self,
1045
+ input_ids: Optional[torch.LongTensor] = None,
1046
+ bbox: Optional[torch.LongTensor] = None,
1047
+ attention_mask: Optional[torch.FloatTensor] = None,
1048
+ token_type_ids: Optional[torch.LongTensor] = None,
1049
+ position_ids: Optional[torch.LongTensor] = None,
1050
+ head_mask: Optional[torch.FloatTensor] = None,
1051
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1052
+ labels: Optional[torch.LongTensor] = None,
1053
+ output_attentions: Optional[bool] = None,
1054
+ output_hidden_states: Optional[bool] = None,
1055
+ return_dict: Optional[bool] = None,
1056
+ pixel_values: Optional[torch.LongTensor] = None,
1057
+ ) -> Union[Tuple, TokenClassifierOutput]:
1058
+ r"""
1059
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1060
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1061
+
1062
+ Returns:
1063
+
1064
+ Examples:
1065
+
1066
+ ```python
1067
+ >>> from transformers import AutoProcessor, AutoModelForTokenClassification
1068
+ >>> from datasets import load_dataset
1069
+
1070
+ >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False)
1071
+ >>> model = AutoModelForTokenClassification.from_pretrained("microsoft/layoutlmv3-base", num_labels=7)
1072
+
1073
+ >>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
1074
+ >>> example = dataset[0]
1075
+ >>> image = example["image"]
1076
+ >>> words = example["tokens"]
1077
+ >>> boxes = example["bboxes"]
1078
+ >>> word_labels = example["ner_tags"]
1079
+
1080
+ >>> encoding = processor(image, words, boxes=boxes, word_labels=word_labels, return_tensors="pt")
1081
+
1082
+ >>> outputs = model(**encoding)
1083
+ >>> loss = outputs.loss
1084
+ >>> logits = outputs.logits
1085
+ ```"""
1086
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1087
+
1088
+ outputs = self.layoutlmv3(
1089
+ input_ids,
1090
+ bbox=bbox,
1091
+ attention_mask=attention_mask,
1092
+ token_type_ids=token_type_ids,
1093
+ position_ids=position_ids,
1094
+ head_mask=head_mask,
1095
+ inputs_embeds=inputs_embeds,
1096
+ output_attentions=output_attentions,
1097
+ output_hidden_states=output_hidden_states,
1098
+ return_dict=return_dict,
1099
+ pixel_values=pixel_values,
1100
+ )
1101
+ if input_ids is not None:
1102
+ input_shape = input_ids.size()
1103
+ else:
1104
+ input_shape = inputs_embeds.size()[:-1]
1105
+
1106
+ seq_length = input_shape[1]
1107
+ # only take the text part of the output representations
1108
+ sequence_output = outputs[0][:, :seq_length]
1109
+ sequence_output = self.dropout(sequence_output)
1110
+ logits = self.classifier(sequence_output)
1111
+
1112
+ loss = None
1113
+ if labels is not None:
1114
+ loss_fct = CrossEntropyLoss()
1115
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1116
+
1117
+ if not return_dict:
1118
+ output = (logits,) + outputs[1:]
1119
+ return ((loss,) + output) if loss is not None else output
1120
+
1121
+ return TokenClassifierOutput(
1122
+ loss=loss,
1123
+ logits=logits,
1124
+ hidden_states=outputs.hidden_states,
1125
+ attentions=outputs.attentions,
1126
+ )
1127
+
1128
+
1129
+ @add_start_docstrings(
1130
+ """
1131
+ LayoutLMv3 Model with a span classification head on top for extractive question-answering tasks such as
1132
+ [DocVQA](https://rrc.cvc.uab.es/?ch=17) (a linear layer on top of the text part of the hidden-states output to
1133
+ compute `span start logits` and `span end logits`).
1134
+ """,
1135
+ LAYOUTLMV3_START_DOCSTRING,
1136
+ )
1137
+ class LayoutLMv3ForQuestionAnswering(LayoutLMv3PreTrainedModel):
1138
+ def __init__(self, config):
1139
+ super().__init__(config)
1140
+ self.num_labels = config.num_labels
1141
+
1142
+ self.layoutlmv3 = LayoutLMv3Model(config)
1143
+ self.qa_outputs = LayoutLMv3ClassificationHead(config, pool_feature=False)
1144
+
1145
+ self.init_weights()
1146
+
1147
+ @add_start_docstrings_to_model_forward(
1148
+ LAYOUTLMV3_DOWNSTREAM_INPUTS_DOCSTRING.format("batch_size, sequence_length")
1149
+ )
1150
+ @replace_return_docstrings(output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC)
1151
+ def forward(
1152
+ self,
1153
+ input_ids: Optional[torch.LongTensor] = None,
1154
+ attention_mask: Optional[torch.FloatTensor] = None,
1155
+ token_type_ids: Optional[torch.LongTensor] = None,
1156
+ position_ids: Optional[torch.LongTensor] = None,
1157
+ head_mask: Optional[torch.FloatTensor] = None,
1158
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1159
+ start_positions: Optional[torch.LongTensor] = None,
1160
+ end_positions: Optional[torch.LongTensor] = None,
1161
+ output_attentions: Optional[bool] = None,
1162
+ output_hidden_states: Optional[bool] = None,
1163
+ return_dict: Optional[bool] = None,
1164
+ bbox: Optional[torch.LongTensor] = None,
1165
+ pixel_values: Optional[torch.LongTensor] = None,
1166
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
1167
+ r"""
1168
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1169
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1170
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1171
+ are not taken into account for computing the loss.
1172
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1173
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1174
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1175
+ are not taken into account for computing the loss.
1176
+
1177
+ Returns:
1178
+
1179
+ Examples:
1180
+
1181
+ ```python
1182
+ >>> from transformers import AutoProcessor, AutoModelForQuestionAnswering
1183
+ >>> from datasets import load_dataset
1184
+ >>> import torch
1185
+
1186
+ >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False)
1187
+ >>> model = AutoModelForQuestionAnswering.from_pretrained("microsoft/layoutlmv3-base")
1188
+
1189
+ >>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
1190
+ >>> example = dataset[0]
1191
+ >>> image = example["image"]
1192
+ >>> question = "what's his name?"
1193
+ >>> words = example["tokens"]
1194
+ >>> boxes = example["bboxes"]
1195
+
1196
+ >>> encoding = processor(image, question, words, boxes=boxes, return_tensors="pt")
1197
+ >>> start_positions = torch.tensor([1])
1198
+ >>> end_positions = torch.tensor([3])
1199
+
1200
+ >>> outputs = model(**encoding, start_positions=start_positions, end_positions=end_positions)
1201
+ >>> loss = outputs.loss
1202
+ >>> start_scores = outputs.start_logits
1203
+ >>> end_scores = outputs.end_logits
1204
+ ```"""
1205
+
1206
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1207
+
1208
+ outputs = self.layoutlmv3(
1209
+ input_ids,
1210
+ attention_mask=attention_mask,
1211
+ token_type_ids=token_type_ids,
1212
+ position_ids=position_ids,
1213
+ head_mask=head_mask,
1214
+ inputs_embeds=inputs_embeds,
1215
+ output_attentions=output_attentions,
1216
+ output_hidden_states=output_hidden_states,
1217
+ return_dict=return_dict,
1218
+ bbox=bbox,
1219
+ pixel_values=pixel_values,
1220
+ )
1221
+
1222
+ sequence_output = outputs[0]
1223
+
1224
+ logits = self.qa_outputs(sequence_output)
1225
+ start_logits, end_logits = logits.split(1, dim=-1)
1226
+ start_logits = start_logits.squeeze(-1).contiguous()
1227
+ end_logits = end_logits.squeeze(-1).contiguous()
1228
+
1229
+ total_loss = None
1230
+ if start_positions is not None and end_positions is not None:
1231
+ # If we are on multi-GPU, split add a dimension
1232
+ if len(start_positions.size()) > 1:
1233
+ start_positions = start_positions.squeeze(-1)
1234
+ if len(end_positions.size()) > 1:
1235
+ end_positions = end_positions.squeeze(-1)
1236
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1237
+ ignored_index = start_logits.size(1)
1238
+ start_positions = start_positions.clamp(0, ignored_index)
1239
+ end_positions = end_positions.clamp(0, ignored_index)
1240
+
1241
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1242
+ start_loss = loss_fct(start_logits, start_positions)
1243
+ end_loss = loss_fct(end_logits, end_positions)
1244
+ total_loss = (start_loss + end_loss) / 2
1245
+
1246
+ if not return_dict:
1247
+ output = (start_logits, end_logits) + outputs[1:]
1248
+ return ((total_loss,) + output) if total_loss is not None else output
1249
+
1250
+ return QuestionAnsweringModelOutput(
1251
+ loss=total_loss,
1252
+ start_logits=start_logits,
1253
+ end_logits=end_logits,
1254
+ hidden_states=outputs.hidden_states,
1255
+ attentions=outputs.attentions,
1256
+ )
1257
+
1258
+
1259
+ @add_start_docstrings(
1260
+ """
1261
+ LayoutLMv3 Model with a sequence classification head on top (a linear layer on top of the final hidden state of the
1262
+ [CLS] token) e.g. for document image classification tasks such as the
1263
+ [RVL-CDIP](https://www.cs.cmu.edu/~aharley/rvl-cdip/) dataset.
1264
+ """,
1265
+ LAYOUTLMV3_START_DOCSTRING,
1266
+ )
1267
+ class LayoutLMv3ForSequenceClassification(LayoutLMv3PreTrainedModel):
1268
+ def __init__(self, config):
1269
+ super().__init__(config)
1270
+ self.num_labels = config.num_labels
1271
+ self.config = config
1272
+ self.layoutlmv3 = LayoutLMv3Model(config)
1273
+ self.classifier = LayoutLMv3ClassificationHead(config, pool_feature=False)
1274
+
1275
+ self.init_weights()
1276
+
1277
+ @add_start_docstrings_to_model_forward(
1278
+ LAYOUTLMV3_DOWNSTREAM_INPUTS_DOCSTRING.format("batch_size, sequence_length")
1279
+ )
1280
+ @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
1281
+ def forward(
1282
+ self,
1283
+ input_ids: Optional[torch.LongTensor] = None,
1284
+ attention_mask: Optional[torch.FloatTensor] = None,
1285
+ token_type_ids: Optional[torch.LongTensor] = None,
1286
+ position_ids: Optional[torch.LongTensor] = None,
1287
+ head_mask: Optional[torch.FloatTensor] = None,
1288
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1289
+ labels: Optional[torch.LongTensor] = None,
1290
+ output_attentions: Optional[bool] = None,
1291
+ output_hidden_states: Optional[bool] = None,
1292
+ return_dict: Optional[bool] = None,
1293
+ bbox: Optional[torch.LongTensor] = None,
1294
+ pixel_values: Optional[torch.LongTensor] = None,
1295
+ ) -> Union[Tuple, SequenceClassifierOutput]:
1296
+ """
1297
+ Returns:
1298
+
1299
+ Examples:
1300
+
1301
+ ```python
1302
+ >>> from transformers import AutoProcessor, AutoModelForSequenceClassification
1303
+ >>> from datasets import load_dataset
1304
+ >>> import torch
1305
+
1306
+ >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False)
1307
+ >>> model = AutoModelForSequenceClassification.from_pretrained("microsoft/layoutlmv3-base")
1308
+
1309
+ >>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
1310
+ >>> example = dataset[0]
1311
+ >>> image = example["image"]
1312
+ >>> words = example["tokens"]
1313
+ >>> boxes = example["bboxes"]
1314
+
1315
+ >>> encoding = processor(image, words, boxes=boxes, return_tensors="pt")
1316
+ >>> sequence_label = torch.tensor([1])
1317
+
1318
+ >>> outputs = model(**encoding, labels=sequence_label)
1319
+ >>> loss = outputs.loss
1320
+ >>> logits = outputs.logits
1321
+ ```"""
1322
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1323
+
1324
+ outputs = self.layoutlmv3(
1325
+ input_ids,
1326
+ attention_mask=attention_mask,
1327
+ token_type_ids=token_type_ids,
1328
+ position_ids=position_ids,
1329
+ head_mask=head_mask,
1330
+ inputs_embeds=inputs_embeds,
1331
+ output_attentions=output_attentions,
1332
+ output_hidden_states=output_hidden_states,
1333
+ return_dict=return_dict,
1334
+ bbox=bbox,
1335
+ pixel_values=pixel_values,
1336
+ )
1337
+
1338
+ sequence_output = outputs[0][:, 0, :]
1339
+ logits = self.classifier(sequence_output)
1340
+
1341
+ loss = None
1342
+ if labels is not None:
1343
+ if self.config.problem_type is None:
1344
+ if self.num_labels == 1:
1345
+ self.config.problem_type = "regression"
1346
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1347
+ self.config.problem_type = "single_label_classification"
1348
+ else:
1349
+ self.config.problem_type = "multi_label_classification"
1350
+
1351
+ if self.config.problem_type == "regression":
1352
+ loss_fct = MSELoss()
1353
+ if self.num_labels == 1:
1354
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1355
+ else:
1356
+ loss = loss_fct(logits, labels)
1357
+ elif self.config.problem_type == "single_label_classification":
1358
+ loss_fct = CrossEntropyLoss()
1359
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1360
+ elif self.config.problem_type == "multi_label_classification":
1361
+ loss_fct = BCEWithLogitsLoss()
1362
+ loss = loss_fct(logits, labels)
1363
+
1364
+ if not return_dict:
1365
+ output = (logits,) + outputs[1:]
1366
+ return ((loss,) + output) if loss is not None else output
1367
+
1368
+ return SequenceClassifierOutput(
1369
+ loss=loss,
1370
+ logits=logits,
1371
+ hidden_states=outputs.hidden_states,
1372
+ attentions=outputs.attentions,
1373
+ )
mgm/lib/python3.10/site-packages/transformers/models/layoutlmv3/modeling_tf_layoutlmv3.py ADDED
@@ -0,0 +1,1569 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Microsoft Research and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """TF 2.0 LayoutLMv3 model."""
16
+
17
+
18
+ from __future__ import annotations
19
+
20
+ import collections
21
+ import math
22
+ from typing import List, Optional, Tuple, Union
23
+
24
+ import tensorflow as tf
25
+
26
+ from ...activations_tf import get_tf_activation
27
+ from ...modeling_tf_outputs import (
28
+ TFBaseModelOutput,
29
+ TFQuestionAnsweringModelOutput,
30
+ TFSequenceClassifierOutput,
31
+ TFTokenClassifierOutput,
32
+ )
33
+ from ...modeling_tf_utils import (
34
+ TFPreTrainedModel,
35
+ TFQuestionAnsweringLoss,
36
+ TFSequenceClassificationLoss,
37
+ TFTokenClassificationLoss,
38
+ get_initializer,
39
+ keras_serializable,
40
+ unpack_inputs,
41
+ )
42
+ from ...tf_utils import check_embeddings_within_bounds
43
+ from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
44
+ from .configuration_layoutlmv3 import LayoutLMv3Config
45
+
46
+
47
+ _CONFIG_FOR_DOC = "LayoutLMv3Config"
48
+
49
+ _DUMMY_INPUT_IDS = [
50
+ [7, 6, 1],
51
+ [1, 2, 0],
52
+ ]
53
+
54
+ _DUMMY_BBOX = [
55
+ [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]],
56
+ [[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]],
57
+ ]
58
+
59
+ TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST = [
60
+ "microsoft/layoutlmv3-base",
61
+ "microsoft/layoutlmv3-large",
62
+ # See all LayoutLMv3 models at https://huggingface.co/models?filter=layoutlmv3
63
+ ]
64
+
65
+ LARGE_NEGATIVE = -1e8
66
+
67
+
68
+ class TFLayoutLMv3PatchEmbeddings(tf.keras.layers.Layer):
69
+ """LayoutLMv3 image (patch) embeddings."""
70
+
71
+ def __init__(self, config: LayoutLMv3Config, **kwargs):
72
+ super().__init__(**kwargs)
73
+ patch_sizes = (
74
+ config.patch_size
75
+ if isinstance(config.patch_size, collections.abc.Iterable)
76
+ else (config.patch_size, config.patch_size)
77
+ )
78
+ self.proj = tf.keras.layers.Conv2D(
79
+ filters=config.hidden_size,
80
+ kernel_size=patch_sizes,
81
+ strides=patch_sizes,
82
+ padding="valid",
83
+ data_format="channels_last",
84
+ use_bias=True,
85
+ kernel_initializer=get_initializer(config.initializer_range),
86
+ name="proj",
87
+ )
88
+ self.hidden_size = config.hidden_size
89
+ self.num_patches = (config.input_size**2) // (patch_sizes[0] * patch_sizes[1])
90
+
91
+ def call(self, pixel_values: tf.Tensor) -> tf.Tensor:
92
+ # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
93
+ # So change the input format from `NCHW` to `NHWC`.
94
+ pixel_values = tf.transpose(pixel_values, perm=[0, 2, 3, 1])
95
+
96
+ embeddings = self.proj(pixel_values)
97
+ embeddings = tf.reshape(embeddings, (-1, self.num_patches, self.hidden_size))
98
+ return embeddings
99
+
100
+
101
+ class TFLayoutLMv3TextEmbeddings(tf.keras.layers.Layer):
102
+ """
103
+ LayoutLMv3 text embeddings. Same as `RobertaEmbeddings` but with added spatial (layout) embeddings.
104
+ """
105
+
106
+ def __init__(self, config: LayoutLMv3Config, **kwargs):
107
+ super().__init__(**kwargs)
108
+ self.word_embeddings = tf.keras.layers.Embedding(
109
+ config.vocab_size,
110
+ config.hidden_size,
111
+ embeddings_initializer=get_initializer(config.initializer_range),
112
+ name="word_embeddings",
113
+ )
114
+ self.token_type_embeddings = tf.keras.layers.Embedding(
115
+ config.type_vocab_size,
116
+ config.hidden_size,
117
+ embeddings_initializer=get_initializer(config.initializer_range),
118
+ name="token_type_embeddings",
119
+ )
120
+ self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
121
+ self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
122
+ self.padding_token_index = config.pad_token_id
123
+ self.position_embeddings = tf.keras.layers.Embedding(
124
+ config.max_position_embeddings,
125
+ config.hidden_size,
126
+ embeddings_initializer=get_initializer(config.initializer_range),
127
+ name="position_embeddings",
128
+ )
129
+ self.x_position_embeddings = tf.keras.layers.Embedding(
130
+ config.max_2d_position_embeddings,
131
+ config.coordinate_size,
132
+ embeddings_initializer=get_initializer(config.initializer_range),
133
+ name="x_position_embeddings",
134
+ )
135
+ self.y_position_embeddings = tf.keras.layers.Embedding(
136
+ config.max_2d_position_embeddings,
137
+ config.coordinate_size,
138
+ embeddings_initializer=get_initializer(config.initializer_range),
139
+ name="y_position_embeddings",
140
+ )
141
+ self.h_position_embeddings = tf.keras.layers.Embedding(
142
+ config.max_2d_position_embeddings,
143
+ config.shape_size,
144
+ embeddings_initializer=get_initializer(config.initializer_range),
145
+ name="h_position_embeddings",
146
+ )
147
+ self.w_position_embeddings = tf.keras.layers.Embedding(
148
+ config.max_2d_position_embeddings,
149
+ config.shape_size,
150
+ embeddings_initializer=get_initializer(config.initializer_range),
151
+ name="w_position_embeddings",
152
+ )
153
+ self.max_2d_positions = config.max_2d_position_embeddings
154
+
155
+ def calculate_spatial_position_embeddings(self, bbox: tf.Tensor) -> tf.Tensor:
156
+ try:
157
+ left_position_ids = bbox[:, :, 0]
158
+ upper_position_ids = bbox[:, :, 1]
159
+ right_position_ids = bbox[:, :, 2]
160
+ lower_position_ids = bbox[:, :, 3]
161
+ except IndexError as exception:
162
+ raise IndexError("Bounding box is not of shape (batch_size, seq_length, 4).") from exception
163
+
164
+ try:
165
+ left_position_embeddings = self.x_position_embeddings(left_position_ids)
166
+ upper_position_embeddings = self.y_position_embeddings(upper_position_ids)
167
+ right_position_embeddings = self.x_position_embeddings(right_position_ids)
168
+ lower_position_embeddings = self.y_position_embeddings(lower_position_ids)
169
+ except IndexError as exception:
170
+ raise IndexError(
171
+ f"The `bbox` coordinate values should be within 0-{self.max_2d_positions} range."
172
+ ) from exception
173
+
174
+ max_position_id = self.max_2d_positions - 1
175
+ h_position_embeddings = self.h_position_embeddings(
176
+ tf.clip_by_value(bbox[:, :, 3] - bbox[:, :, 1], 0, max_position_id)
177
+ )
178
+ w_position_embeddings = self.w_position_embeddings(
179
+ tf.clip_by_value(bbox[:, :, 2] - bbox[:, :, 0], 0, max_position_id)
180
+ )
181
+
182
+ # LayoutLMv1 sums the spatial embeddings, but LayoutLMv3 concatenates them.
183
+ spatial_position_embeddings = tf.concat(
184
+ [
185
+ left_position_embeddings,
186
+ upper_position_embeddings,
187
+ right_position_embeddings,
188
+ lower_position_embeddings,
189
+ h_position_embeddings,
190
+ w_position_embeddings,
191
+ ],
192
+ axis=-1,
193
+ )
194
+ return spatial_position_embeddings
195
+
196
+ def create_position_ids_from_inputs_embeds(self, inputs_embds: tf.Tensor) -> tf.Tensor:
197
+ """
198
+ We are provided embeddings directly. We cannot infer which are padded, so just generate sequential position
199
+ ids.
200
+ """
201
+ input_shape = tf.shape(inputs_embds)
202
+ sequence_length = input_shape[1]
203
+ start_index = self.padding_token_index + 1
204
+ end_index = self.padding_token_index + sequence_length + 1
205
+ position_ids = tf.range(start_index, end_index, dtype=tf.int32)
206
+ batch_size = input_shape[0]
207
+ position_ids = tf.reshape(position_ids, (1, sequence_length))
208
+ position_ids = tf.tile(position_ids, (batch_size, 1))
209
+ return position_ids
210
+
211
+ def create_position_ids_from_input_ids(self, input_ids: tf.Tensor) -> tf.Tensor:
212
+ """
213
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_token_index + 1.
214
+ """
215
+ mask = tf.cast(tf.not_equal(input_ids, self.padding_token_index), input_ids.dtype)
216
+ position_ids = tf.cumsum(mask, axis=1) * mask
217
+ position_ids = position_ids + self.padding_token_index
218
+ return position_ids
219
+
220
+ def create_position_ids(self, input_ids: tf.Tensor, inputs_embeds: tf.Tensor) -> tf.Tensor:
221
+ if input_ids is None:
222
+ return self.create_position_ids_from_inputs_embeds(inputs_embeds)
223
+ else:
224
+ return self.create_position_ids_from_input_ids(input_ids)
225
+
226
+ def call(
227
+ self,
228
+ input_ids: tf.Tensor | None = None,
229
+ bbox: tf.Tensor = None,
230
+ token_type_ids: tf.Tensor | None = None,
231
+ position_ids: tf.Tensor | None = None,
232
+ inputs_embeds: tf.Tensor | None = None,
233
+ training: bool = False,
234
+ ) -> tf.Tensor:
235
+ if position_ids is None:
236
+ position_ids = self.create_position_ids(input_ids, inputs_embeds)
237
+
238
+ if input_ids is not None:
239
+ input_shape = tf.shape(input_ids)
240
+ else:
241
+ input_shape = tf.shape(inputs_embeds)[:-1]
242
+
243
+ if token_type_ids is None:
244
+ token_type_ids = tf.zeros(input_shape, dtype=position_ids.dtype)
245
+
246
+ if inputs_embeds is None:
247
+ check_embeddings_within_bounds(input_ids, self.word_embeddings.input_dim)
248
+ inputs_embeds = self.word_embeddings(input_ids)
249
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
250
+
251
+ embeddings = inputs_embeds + token_type_embeddings
252
+ position_embeddings = self.position_embeddings(position_ids)
253
+ embeddings += position_embeddings
254
+
255
+ spatial_position_embeddings = self.calculate_spatial_position_embeddings(bbox)
256
+
257
+ embeddings += spatial_position_embeddings
258
+
259
+ embeddings = self.LayerNorm(embeddings)
260
+ embeddings = self.dropout(embeddings, training=training)
261
+ return embeddings
262
+
263
+
264
+ class TFLayoutLMv3SelfAttention(tf.keras.layers.Layer):
265
+ def __init__(self, config: LayoutLMv3Config, **kwargs):
266
+ super().__init__(**kwargs)
267
+ if config.hidden_size % config.num_attention_heads != 0:
268
+ raise ValueError(
269
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
270
+ f"heads ({config.num_attention_heads})"
271
+ )
272
+
273
+ self.num_attention_heads = config.num_attention_heads
274
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
275
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
276
+ self.attention_score_normaliser = math.sqrt(self.attention_head_size)
277
+
278
+ self.query = tf.keras.layers.Dense(
279
+ self.all_head_size,
280
+ kernel_initializer=get_initializer(config.initializer_range),
281
+ name="query",
282
+ )
283
+ self.key = tf.keras.layers.Dense(
284
+ self.all_head_size,
285
+ kernel_initializer=get_initializer(config.initializer_range),
286
+ name="key",
287
+ )
288
+ self.value = tf.keras.layers.Dense(
289
+ self.all_head_size,
290
+ kernel_initializer=get_initializer(config.initializer_range),
291
+ name="value",
292
+ )
293
+
294
+ self.dropout = tf.keras.layers.Dropout(config.attention_probs_dropout_prob)
295
+ self.has_relative_attention_bias = config.has_relative_attention_bias
296
+ self.has_spatial_attention_bias = config.has_spatial_attention_bias
297
+
298
+ def transpose_for_scores(self, x: tf.Tensor):
299
+ shape = tf.shape(x)
300
+ new_shape = (
301
+ shape[0], # batch_size
302
+ shape[1], # seq_length
303
+ self.num_attention_heads,
304
+ self.attention_head_size,
305
+ )
306
+ x = tf.reshape(x, new_shape)
307
+ return tf.transpose(x, perm=[0, 2, 1, 3]) # batch_size, num_heads, seq_length, attention_head_size
308
+
309
+ def cogview_attention(self, attention_scores: tf.Tensor, alpha: Union[float, int] = 32):
310
+ """
311
+ https://arxiv.org/abs/2105.13290 Section 2.4 Stabilization of training: Precision Bottleneck Relaxation
312
+ (PB-Relax). A replacement of the original tf.keras.layers.Softmax(axis=-1)(attention_scores). Seems the new
313
+ attention_probs will result in a slower speed and a little bias. Can use
314
+ tf.debugging.assert_near(standard_attention_probs, cogview_attention_probs, atol=1e-08) for comparison. The
315
+ smaller atol (e.g., 1e-08), the better.
316
+ """
317
+ scaled_attention_scores = attention_scores / alpha
318
+ max_value = tf.expand_dims(tf.reduce_max(scaled_attention_scores, axis=-1), axis=-1)
319
+ new_attention_scores = (scaled_attention_scores - max_value) * alpha
320
+ return tf.math.softmax(new_attention_scores, axis=-1)
321
+
322
+ def call(
323
+ self,
324
+ hidden_states: tf.Tensor,
325
+ attention_mask: tf.Tensor | None,
326
+ head_mask: tf.Tensor | None,
327
+ output_attentions: bool,
328
+ rel_pos: tf.Tensor | None = None,
329
+ rel_2d_pos: tf.Tensor | None = None,
330
+ training: bool = False,
331
+ ) -> Union[Tuple[tf.Tensor], Tuple[tf.Tensor, tf.Tensor]]:
332
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
333
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
334
+ query_layer = self.transpose_for_scores(self.query(hidden_states))
335
+
336
+ # Take the dot product between "query" and "key" to get the raw attention scores.
337
+ normalised_query_layer = query_layer / self.attention_score_normaliser
338
+ transposed_key_layer = tf.transpose(
339
+ key_layer, perm=[0, 1, 3, 2]
340
+ ) # batch_size, num_heads, attention_head_size, seq_length
341
+ attention_scores = tf.matmul(normalised_query_layer, transposed_key_layer)
342
+
343
+ if self.has_relative_attention_bias and self.has_spatial_attention_bias:
344
+ attention_scores += (rel_pos + rel_2d_pos) / self.attention_score_normaliser
345
+ elif self.has_relative_attention_bias:
346
+ attention_scores += rel_pos / self.attention_score_normaliser
347
+
348
+ if attention_mask is not None:
349
+ # Apply the attention mask (is precomputed for all layers in TFLayoutLMv3Model call() function)
350
+ attention_scores += attention_mask
351
+
352
+ # Normalize the attention scores to probabilities.
353
+ # Use the trick of CogView paper to stabilize training.
354
+ attention_probs = self.cogview_attention(attention_scores)
355
+
356
+ attention_probs = self.dropout(attention_probs, training=training)
357
+
358
+ # Mask heads if we want to.
359
+ if head_mask is not None:
360
+ attention_probs = attention_probs * head_mask
361
+
362
+ context_layer = tf.matmul(attention_probs, value_layer)
363
+ context_layer = tf.transpose(
364
+ context_layer, perm=[0, 2, 1, 3]
365
+ ) # batch_size, seq_length, num_heads, attention_head_size
366
+ shape = tf.shape(context_layer)
367
+ context_layer = tf.reshape(
368
+ context_layer, (shape[0], shape[1], self.all_head_size)
369
+ ) # batch_size, seq_length, num_heads * attention_head_size
370
+
371
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
372
+
373
+ return outputs
374
+
375
+
376
+ # Copied from models.roberta.modeling_tf_roberta.TFRobertaSelfOutput
377
+ class TFLayoutLMv3SelfOutput(tf.keras.layers.Layer):
378
+ def __init__(self, config: LayoutLMv3Config, **kwargs):
379
+ super().__init__(**kwargs)
380
+
381
+ self.dense = tf.keras.layers.Dense(
382
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
383
+ )
384
+ self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
385
+ self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
386
+
387
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
388
+ hidden_states = self.dense(inputs=hidden_states)
389
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
390
+ hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
391
+
392
+ return hidden_states
393
+
394
+
395
+ class TFLayoutLMv3Attention(tf.keras.layers.Layer):
396
+ def __init__(self, config: LayoutLMv3Config, **kwargs):
397
+ super().__init__(**kwargs)
398
+ self.self_attention = TFLayoutLMv3SelfAttention(config, name="self")
399
+ self.self_output = TFLayoutLMv3SelfOutput(config, name="output")
400
+
401
+ def call(
402
+ self,
403
+ hidden_states: tf.Tensor,
404
+ attention_mask: tf.Tensor | None,
405
+ head_mask: tf.Tensor | None,
406
+ output_attentions: bool,
407
+ rel_pos: tf.Tensor | None = None,
408
+ rel_2d_pos: tf.Tensor | None = None,
409
+ training: bool = False,
410
+ ) -> Union[Tuple[tf.Tensor], Tuple[tf.Tensor, tf.Tensor]]:
411
+ self_outputs = self.self_attention(
412
+ hidden_states,
413
+ attention_mask,
414
+ head_mask,
415
+ output_attentions,
416
+ rel_pos,
417
+ rel_2d_pos,
418
+ training=training,
419
+ )
420
+ attention_output = self.self_output(self_outputs[0], hidden_states, training=training)
421
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
422
+ return outputs
423
+
424
+
425
+ # Copied from models.roberta.modeling_tf_bert.TFRobertaIntermediate
426
+ class TFLayoutLMv3Intermediate(tf.keras.layers.Layer):
427
+ def __init__(self, config: LayoutLMv3Config, **kwargs):
428
+ super().__init__(**kwargs)
429
+
430
+ self.dense = tf.keras.layers.Dense(
431
+ units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
432
+ )
433
+
434
+ if isinstance(config.hidden_act, str):
435
+ self.intermediate_act_fn = get_tf_activation(config.hidden_act)
436
+ else:
437
+ self.intermediate_act_fn = config.hidden_act
438
+
439
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
440
+ hidden_states = self.dense(inputs=hidden_states)
441
+ hidden_states = self.intermediate_act_fn(hidden_states)
442
+
443
+ return hidden_states
444
+
445
+
446
+ # Copied from models.roberta.modeling_tf_bert.TFRobertaOutput
447
+ class TFLayoutLMv3Output(tf.keras.layers.Layer):
448
+ def __init__(self, config: LayoutLMv3Config, **kwargs):
449
+ super().__init__(**kwargs)
450
+
451
+ self.dense = tf.keras.layers.Dense(
452
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
453
+ )
454
+ self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
455
+ self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
456
+
457
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
458
+ hidden_states = self.dense(inputs=hidden_states)
459
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
460
+ hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
461
+
462
+ return hidden_states
463
+
464
+
465
+ class TFLayoutLMv3Layer(tf.keras.layers.Layer):
466
+ def __init__(self, config: LayoutLMv3Config, **kwargs):
467
+ super().__init__(**kwargs)
468
+ self.attention = TFLayoutLMv3Attention(config, name="attention")
469
+ self.intermediate = TFLayoutLMv3Intermediate(config, name="intermediate")
470
+ self.bert_output = TFLayoutLMv3Output(config, name="output")
471
+
472
+ def call(
473
+ self,
474
+ hidden_states: tf.Tensor,
475
+ attention_mask: tf.Tensor | None,
476
+ head_mask: tf.Tensor | None,
477
+ output_attentions: bool,
478
+ rel_pos: tf.Tensor | None = None,
479
+ rel_2d_pos: tf.Tensor | None = None,
480
+ training: bool = False,
481
+ ) -> Union[Tuple[tf.Tensor], Tuple[tf.Tensor, tf.Tensor]]:
482
+ self_attention_outputs = self.attention(
483
+ hidden_states,
484
+ attention_mask,
485
+ head_mask,
486
+ output_attentions=output_attentions,
487
+ rel_pos=rel_pos,
488
+ rel_2d_pos=rel_2d_pos,
489
+ training=training,
490
+ )
491
+ attention_output = self_attention_outputs[0]
492
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
493
+ intermediate_output = self.intermediate(attention_output)
494
+ layer_output = self.bert_output(intermediate_output, attention_output, training=training)
495
+ outputs = (layer_output,) + outputs
496
+ return outputs
497
+
498
+
499
+ class TFLayoutLMv3Encoder(tf.keras.layers.Layer):
500
+ def __init__(self, config: LayoutLMv3Config, **kwargs):
501
+ super().__init__(**kwargs)
502
+ self.config = config
503
+ self.layer = [TFLayoutLMv3Layer(config, name=f"layer.{i}") for i in range(config.num_hidden_layers)]
504
+
505
+ self.has_relative_attention_bias = config.has_relative_attention_bias
506
+ self.has_spatial_attention_bias = config.has_spatial_attention_bias
507
+
508
+ if self.has_relative_attention_bias:
509
+ self.rel_pos_bins = config.rel_pos_bins
510
+ self.max_rel_pos = config.max_rel_pos
511
+ self.rel_pos_bias = tf.keras.layers.Dense(
512
+ units=config.num_attention_heads,
513
+ kernel_initializer=get_initializer(config.initializer_range),
514
+ use_bias=False,
515
+ name="rel_pos_bias",
516
+ )
517
+
518
+ if self.has_spatial_attention_bias:
519
+ self.max_rel_2d_pos = config.max_rel_2d_pos
520
+ self.rel_2d_pos_bins = config.rel_2d_pos_bins
521
+ self.rel_pos_x_bias = tf.keras.layers.Dense(
522
+ units=config.num_attention_heads,
523
+ kernel_initializer=get_initializer(config.initializer_range),
524
+ use_bias=False,
525
+ name="rel_pos_x_bias",
526
+ )
527
+ self.rel_pos_y_bias = tf.keras.layers.Dense(
528
+ units=config.num_attention_heads,
529
+ kernel_initializer=get_initializer(config.initializer_range),
530
+ use_bias=False,
531
+ name="rel_pos_y_bias",
532
+ )
533
+
534
+ def relative_position_bucket(self, relative_positions: tf.Tensor, num_buckets: int, max_distance: int):
535
+ # the negative relative positions are assigned to the interval [0, num_buckets / 2]
536
+ # we deal with this by assigning absolute relative positions to the interval [0, num_buckets / 2]
537
+ # and then offsetting the positive relative positions by num_buckets / 2 at the end
538
+ num_buckets = num_buckets // 2
539
+ buckets = tf.abs(relative_positions)
540
+
541
+ # half of the buckets are for exact increments in positions
542
+ max_exact_buckets = num_buckets // 2
543
+ is_small = buckets < max_exact_buckets
544
+
545
+ # the other half of the buckets are for logarithmically bigger bins in positions up to max_distance
546
+ buckets_log_ratio = tf.math.log(tf.cast(buckets, tf.float32) / max_exact_buckets)
547
+ distance_log_ratio = math.log(max_distance / max_exact_buckets)
548
+ buckets_big_offset = (
549
+ buckets_log_ratio / distance_log_ratio * (num_buckets - max_exact_buckets)
550
+ ) # scale is [0, num_buckets - max_exact_buckets]
551
+ buckets_big = max_exact_buckets + buckets_big_offset # scale is [max_exact_buckets, num_buckets]
552
+ buckets_big = tf.cast(buckets_big, buckets.dtype)
553
+ buckets_big = tf.minimum(buckets_big, num_buckets - 1)
554
+
555
+ return (tf.cast(relative_positions > 0, buckets.dtype) * num_buckets) + tf.where(
556
+ is_small, buckets, buckets_big
557
+ )
558
+
559
+ def _cal_pos_emb(
560
+ self,
561
+ dense_layer: tf.keras.layers.Dense,
562
+ position_ids: tf.Tensor,
563
+ num_buckets: int,
564
+ max_distance: int,
565
+ ):
566
+ rel_pos_matrix = tf.expand_dims(position_ids, axis=-2) - tf.expand_dims(position_ids, axis=-1)
567
+ rel_pos = self.relative_position_bucket(rel_pos_matrix, num_buckets, max_distance)
568
+ rel_pos_one_hot = tf.one_hot(rel_pos, depth=num_buckets, dtype=self.compute_dtype)
569
+ embedding = dense_layer(rel_pos_one_hot)
570
+ # batch_size, seq_length, seq_length, num_heads --> batch_size, num_heads, seq_length, seq_length
571
+ embedding = tf.transpose(embedding, [0, 3, 1, 2])
572
+ embedding = tf.cast(embedding, dtype=self.compute_dtype)
573
+ return embedding
574
+
575
+ def _cal_1d_pos_emb(self, position_ids: tf.Tensor):
576
+ return self._cal_pos_emb(self.rel_pos_bias, position_ids, self.rel_pos_bins, self.max_rel_pos)
577
+
578
+ def _cal_2d_pos_emb(self, bbox: tf.Tensor):
579
+ position_coord_x = bbox[:, :, 0] # left
580
+ position_coord_y = bbox[:, :, 3] # bottom
581
+ rel_pos_x = self._cal_pos_emb(
582
+ self.rel_pos_x_bias,
583
+ position_coord_x,
584
+ self.rel_2d_pos_bins,
585
+ self.max_rel_2d_pos,
586
+ )
587
+ rel_pos_y = self._cal_pos_emb(
588
+ self.rel_pos_y_bias,
589
+ position_coord_y,
590
+ self.rel_2d_pos_bins,
591
+ self.max_rel_2d_pos,
592
+ )
593
+ rel_2d_pos = rel_pos_x + rel_pos_y
594
+ return rel_2d_pos
595
+
596
+ def call(
597
+ self,
598
+ hidden_states: tf.Tensor,
599
+ bbox: tf.Tensor | None = None,
600
+ attention_mask: tf.Tensor | None = None,
601
+ head_mask: tf.Tensor | None = None,
602
+ output_attentions: bool = False,
603
+ output_hidden_states: bool = False,
604
+ return_dict: bool = True,
605
+ position_ids: tf.Tensor | None = None,
606
+ training: bool = False,
607
+ ) -> Union[
608
+ TFBaseModelOutput,
609
+ Tuple[tf.Tensor],
610
+ Tuple[tf.Tensor, tf.Tensor],
611
+ Tuple[tf.Tensor, tf.Tensor, tf.Tensor],
612
+ ]:
613
+ all_hidden_states = () if output_hidden_states else None
614
+ all_self_attentions = () if output_attentions else None
615
+
616
+ rel_pos = self._cal_1d_pos_emb(position_ids) if self.has_relative_attention_bias else None
617
+ rel_2d_pos = self._cal_2d_pos_emb(bbox) if self.has_spatial_attention_bias else None
618
+
619
+ for i, layer_module in enumerate(self.layer):
620
+ if output_hidden_states:
621
+ all_hidden_states = all_hidden_states + (hidden_states,)
622
+
623
+ layer_head_mask = head_mask[i] if head_mask is not None else None
624
+
625
+ layer_outputs = layer_module(
626
+ hidden_states,
627
+ attention_mask,
628
+ layer_head_mask,
629
+ output_attentions,
630
+ rel_pos=rel_pos,
631
+ rel_2d_pos=rel_2d_pos,
632
+ training=training,
633
+ )
634
+
635
+ hidden_states = layer_outputs[0]
636
+ if output_attentions:
637
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
638
+
639
+ if output_hidden_states:
640
+ all_hidden_states = all_hidden_states + (hidden_states,)
641
+
642
+ if return_dict:
643
+ return TFBaseModelOutput(
644
+ last_hidden_state=hidden_states,
645
+ hidden_states=all_hidden_states,
646
+ attentions=all_self_attentions,
647
+ )
648
+ else:
649
+ return tuple(
650
+ value for value in [hidden_states, all_hidden_states, all_self_attentions] if value is not None
651
+ )
652
+
653
+
654
+ @keras_serializable
655
+ class TFLayoutLMv3MainLayer(tf.keras.layers.Layer):
656
+ config_class = LayoutLMv3Config
657
+
658
+ def __init__(self, config: LayoutLMv3Config, **kwargs):
659
+ super().__init__(**kwargs)
660
+
661
+ self.config = config
662
+
663
+ if config.text_embed:
664
+ self.embeddings = TFLayoutLMv3TextEmbeddings(config, name="embeddings")
665
+
666
+ if config.visual_embed:
667
+ self.patch_embed = TFLayoutLMv3PatchEmbeddings(config, name="patch_embed")
668
+ self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
669
+ self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob, name="dropout")
670
+
671
+ if config.has_relative_attention_bias or config.has_spatial_attention_bias:
672
+ image_size = config.input_size // config.patch_size
673
+ self.init_visual_bbox(image_size=(image_size, image_size))
674
+
675
+ self.norm = tf.keras.layers.LayerNormalization(epsilon=1e-6, name="norm")
676
+
677
+ self.encoder = TFLayoutLMv3Encoder(config, name="encoder")
678
+
679
+ def build(self, input_shape: tf.TensorShape):
680
+ if self.config.visual_embed:
681
+ image_size = self.config.input_size // self.config.patch_size
682
+ self.cls_token = self.add_weight(
683
+ shape=(1, 1, self.config.hidden_size),
684
+ initializer="zeros",
685
+ trainable=True,
686
+ dtype=tf.float32,
687
+ name="cls_token",
688
+ )
689
+ self.pos_embed = self.add_weight(
690
+ shape=(1, image_size * image_size + 1, self.config.hidden_size),
691
+ initializer="zeros",
692
+ trainable=True,
693
+ dtype=tf.float32,
694
+ name="pos_embed",
695
+ )
696
+
697
+ super().build(input_shape)
698
+
699
+ def get_input_embeddings(self) -> tf.keras.layers.Layer:
700
+ return self.embeddings.word_embeddings
701
+
702
+ def set_input_embeddings(self, value: tf.Variable):
703
+ self.embeddings.word_embeddings.weight = value
704
+
705
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer._prune_heads
706
+ def _prune_heads(self, heads_to_prune):
707
+ """
708
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
709
+ class PreTrainedModel
710
+ """
711
+ raise NotImplementedError
712
+
713
+ def init_visual_bbox(self, image_size: Tuple[int, int], max_len: int = 1000):
714
+ # We should not hardcode max_len to 1000, but it is done by the reference implementation,
715
+ # so we keep it for compatibility with the pretrained weights. The more correct approach
716
+ # would have been to pass on max_len=config.max_2d_position_embeddings - 1.
717
+ height, width = image_size
718
+
719
+ visual_bbox_x = tf.range(0, max_len * (width + 1), max_len) // width
720
+ visual_bbox_x = tf.expand_dims(visual_bbox_x, axis=0)
721
+ visual_bbox_x = tf.tile(visual_bbox_x, [width, 1]) # (width, width + 1)
722
+
723
+ visual_bbox_y = tf.range(0, max_len * (height + 1), max_len) // height
724
+ visual_bbox_y = tf.expand_dims(visual_bbox_y, axis=1)
725
+ visual_bbox_y = tf.tile(visual_bbox_y, [1, height]) # (height + 1, height)
726
+
727
+ visual_bbox = tf.stack(
728
+ [visual_bbox_x[:, :-1], visual_bbox_y[:-1], visual_bbox_x[:, 1:], visual_bbox_y[1:]],
729
+ axis=-1,
730
+ )
731
+ visual_bbox = tf.reshape(visual_bbox, [-1, 4])
732
+
733
+ cls_token_box = tf.constant([[1, 1, max_len - 1, max_len - 1]], dtype=tf.int32)
734
+ self.visual_bbox = tf.concat([cls_token_box, visual_bbox], axis=0)
735
+
736
+ def calculate_visual_bbox(self, batch_size: int, dtype: tf.DType):
737
+ visual_bbox = tf.expand_dims(self.visual_bbox, axis=0)
738
+ visual_bbox = tf.tile(visual_bbox, [batch_size, 1, 1])
739
+ visual_bbox = tf.cast(visual_bbox, dtype=dtype)
740
+ return visual_bbox
741
+
742
+ def embed_image(self, pixel_values: tf.Tensor) -> tf.Tensor:
743
+ embeddings = self.patch_embed(pixel_values)
744
+
745
+ # add [CLS] token
746
+ batch_size = tf.shape(embeddings)[0]
747
+ cls_tokens = tf.tile(self.cls_token, [batch_size, 1, 1])
748
+ embeddings = tf.concat([cls_tokens, embeddings], axis=1)
749
+
750
+ # add position embeddings
751
+ if getattr(self, "pos_embed", None) is not None:
752
+ embeddings += self.pos_embed
753
+
754
+ embeddings = self.norm(embeddings)
755
+ return embeddings
756
+
757
+ def get_extended_attention_mask(self, attention_mask: tf.Tensor) -> tf.Tensor:
758
+ # Adapted from transformers.modelling_utils.ModuleUtilsMixin.get_extended_attention_mask
759
+
760
+ n_dims = len(attention_mask.shape)
761
+
762
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
763
+ # ourselves in which case we just need to make it broadcastable to all heads.
764
+ if n_dims == 3:
765
+ extended_attention_mask = tf.expand_dims(attention_mask, axis=1)
766
+ elif n_dims == 2:
767
+ # Provided a padding mask of dimensions [batch_size, seq_length].
768
+ # Make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length].
769
+ extended_attention_mask = tf.expand_dims(attention_mask, axis=1) # (batch_size, 1, seq_length)
770
+ extended_attention_mask = tf.expand_dims(extended_attention_mask, axis=1) # (batch_size, 1, 1, seq_length)
771
+ else:
772
+ raise ValueError(f"Wrong shape for attention_mask (shape {attention_mask.shape}).")
773
+
774
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
775
+ # masked positions, this operation will create a tensor which is 0.0 for
776
+ # positions we want to attend and -10000.0 for masked positions.
777
+ # Since we are adding it to the raw scores before the softmax, this is
778
+ # effectively the same as removing these entirely.
779
+ extended_attention_mask = tf.cast(extended_attention_mask, self.compute_dtype)
780
+ extended_attention_mask = (1.0 - extended_attention_mask) * LARGE_NEGATIVE
781
+
782
+ return extended_attention_mask
783
+
784
+ def get_head_mask(self, head_mask: tf.Tensor | None) -> Union[tf.Tensor, List[tf.Tensor | None]]:
785
+ if head_mask is None:
786
+ return [None] * self.config.num_hidden_layers
787
+
788
+ n_dims = tf.rank(head_mask)
789
+ if n_dims == 1:
790
+ # Gets a tensor with masks for each head (H).
791
+ head_mask = tf.expand_dims(head_mask, axis=0) # 1, num_heads
792
+ head_mask = tf.expand_dims(head_mask, axis=0) # 1, 1, num_heads
793
+ head_mask = tf.expand_dims(head_mask, axis=-1) # 1, 1, num_heads, 1
794
+ head_mask = tf.expand_dims(head_mask, axis=-1) # 1, 1, num_heads, 1, 1
795
+ head_mask = tf.tile(
796
+ head_mask, [self.config.num_hidden_layers, 1, 1, 1, 1]
797
+ ) # seq_length, 1, num_heads, 1, 1
798
+ elif n_dims == 2:
799
+ # Gets a tensor with masks for each layer (L) and head (H).
800
+ head_mask = tf.expand_dims(head_mask, axis=1) # seq_length, 1, num_heads
801
+ head_mask = tf.expand_dims(head_mask, axis=-1) # seq_length, 1, num_heads, 1
802
+ head_mask = tf.expand_dims(head_mask, axis=-1) # seq_length, 1, num_heads, 1, 1
803
+ elif n_dims != 5:
804
+ raise ValueError(f"Wrong shape for head_mask (shape {head_mask.shape}).")
805
+ assert tf.rank(head_mask) == 5, f"Got head_mask rank of {tf.rank(head_mask)}, but require 5."
806
+ head_mask = tf.cast(head_mask, self.compute_dtype)
807
+ return head_mask
808
+
809
+ @unpack_inputs
810
+ def call(
811
+ self,
812
+ input_ids: tf.Tensor | None = None,
813
+ bbox: tf.Tensor | None = None,
814
+ attention_mask: tf.Tensor | None = None,
815
+ token_type_ids: tf.Tensor | None = None,
816
+ position_ids: tf.Tensor | None = None,
817
+ head_mask: tf.Tensor | None = None,
818
+ inputs_embeds: tf.Tensor | None = None,
819
+ pixel_values: tf.Tensor | None = None,
820
+ output_attentions: Optional[bool] = None,
821
+ output_hidden_states: Optional[bool] = None,
822
+ return_dict: Optional[bool] = None,
823
+ training: bool = False,
824
+ ) -> Union[
825
+ TFBaseModelOutput,
826
+ Tuple[tf.Tensor],
827
+ Tuple[tf.Tensor, tf.Tensor],
828
+ Tuple[tf.Tensor, tf.Tensor, tf.Tensor],
829
+ ]:
830
+ # This method can be called with a variety of modalities:
831
+ # 1. text + layout
832
+ # 2. text + layout + image
833
+ # 3. image
834
+ # The complexity of this method is mostly just due to handling of these different modalities.
835
+
836
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
837
+ output_hidden_states = (
838
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
839
+ )
840
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
841
+
842
+ if input_ids is not None:
843
+ input_shape = tf.shape(input_ids)
844
+ batch_size = input_shape[0]
845
+ seq_length = input_shape[1]
846
+ elif inputs_embeds is not None:
847
+ input_shape = tf.shape(inputs_embeds)
848
+ batch_size = input_shape[0]
849
+ seq_length = input_shape[1]
850
+ elif pixel_values is not None:
851
+ batch_size = tf.shape(pixel_values)[0]
852
+ else:
853
+ raise ValueError("You have to specify either input_ids or inputs_embeds or pixel_values")
854
+
855
+ # Determine which integer dtype to use.
856
+ if input_ids is not None:
857
+ int_dtype = input_ids.dtype
858
+ elif bbox is not None:
859
+ int_dtype = bbox.dtype
860
+ elif attention_mask is not None:
861
+ int_dtype = attention_mask.dtype
862
+ elif token_type_ids is not None:
863
+ int_dtype = token_type_ids.dtype
864
+ else:
865
+ int_dtype = tf.int32
866
+
867
+ if input_ids is not None or inputs_embeds is not None:
868
+ if attention_mask is None:
869
+ attention_mask = tf.ones((batch_size, seq_length), dtype=int_dtype)
870
+ if token_type_ids is None:
871
+ token_type_ids = tf.zeros((batch_size, seq_length), dtype=int_dtype)
872
+ if bbox is None:
873
+ bbox = tf.zeros((batch_size, seq_length, 4), dtype=int_dtype)
874
+
875
+ embedding_output = self.embeddings(
876
+ input_ids=input_ids,
877
+ bbox=bbox,
878
+ position_ids=position_ids,
879
+ token_type_ids=token_type_ids,
880
+ inputs_embeds=inputs_embeds,
881
+ training=training,
882
+ )
883
+
884
+ final_bbox = None
885
+ final_position_ids = None
886
+ if pixel_values is not None:
887
+ # embed image
888
+ visual_embeddings = self.embed_image(pixel_values)
889
+
890
+ # calculate attention mask
891
+ visual_attention_mask = tf.ones((batch_size, tf.shape(visual_embeddings)[1]), dtype=int_dtype)
892
+ if attention_mask is None:
893
+ attention_mask = visual_attention_mask
894
+ else:
895
+ attention_mask = tf.concat([attention_mask, visual_attention_mask], axis=1)
896
+
897
+ # calculate bounding boxes
898
+ if self.config.has_spatial_attention_bias:
899
+ visual_bbox = self.calculate_visual_bbox(batch_size, int_dtype)
900
+ if bbox is None:
901
+ final_bbox = visual_bbox
902
+ else:
903
+ final_bbox = tf.concat([bbox, visual_bbox], axis=1)
904
+
905
+ # calculate position IDs
906
+ if self.config.has_relative_attention_bias or self.config.has_spatial_attention_bias:
907
+ visual_position_ids = tf.range(0, tf.shape(visual_embeddings)[1], dtype=int_dtype)
908
+ visual_position_ids = tf.expand_dims(visual_position_ids, axis=0)
909
+ visual_position_ids = tf.tile(visual_position_ids, [batch_size, 1])
910
+
911
+ if input_ids is not None or inputs_embeds is not None:
912
+ position_ids = tf.expand_dims(tf.range(0, seq_length, dtype=int_dtype), axis=0)
913
+ position_ids = tf.tile(position_ids, [batch_size, 1])
914
+ final_position_ids = tf.concat([position_ids, visual_position_ids], axis=1)
915
+ else:
916
+ final_position_ids = visual_position_ids
917
+
918
+ # calculate embeddings
919
+ if input_ids is None and inputs_embeds is None:
920
+ embedding_output = visual_embeddings
921
+ else:
922
+ embedding_output = tf.concat([embedding_output, visual_embeddings], axis=1)
923
+ embedding_output = self.LayerNorm(embedding_output)
924
+ embedding_output = self.dropout(embedding_output, training=training)
925
+
926
+ elif self.config.has_relative_attention_bias or self.config.has_spatial_attention_bias:
927
+ if self.config.has_relative_attention_bias:
928
+ position_ids = tf.expand_dims(tf.range(0, seq_length, dtype=int_dtype), axis=0)
929
+ position_ids = tf.tile(position_ids, [batch_size, 1])
930
+ final_position_ids = position_ids
931
+
932
+ if self.config.has_spatial_attention_bias:
933
+ final_bbox = bbox
934
+
935
+ extended_attention_mask = self.get_extended_attention_mask(attention_mask)
936
+
937
+ # Prepare head mask if needed
938
+ # 1.0 in head_mask indicate we keep the head
939
+ # attention_probs has shape batch_size x num_heads x seq_length x seq_length
940
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
941
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
942
+ head_mask = self.get_head_mask(head_mask)
943
+
944
+ encoder_outputs = self.encoder(
945
+ embedding_output,
946
+ bbox=final_bbox,
947
+ position_ids=final_position_ids,
948
+ attention_mask=extended_attention_mask,
949
+ head_mask=head_mask,
950
+ output_attentions=output_attentions,
951
+ output_hidden_states=output_hidden_states,
952
+ return_dict=return_dict,
953
+ )
954
+
955
+ sequence_output = encoder_outputs[0]
956
+
957
+ if not return_dict:
958
+ return (sequence_output,) + encoder_outputs[1:]
959
+
960
+ return TFBaseModelOutput(
961
+ last_hidden_state=sequence_output,
962
+ hidden_states=encoder_outputs.hidden_states,
963
+ attentions=encoder_outputs.attentions,
964
+ )
965
+
966
+ return TFBaseModelOutput(
967
+ last_hidden_state=sequence_output,
968
+ hidden_states=encoder_outputs.hidden_states,
969
+ attentions=encoder_outputs.attentions,
970
+ )
971
+
972
+
973
+ class TFLayoutLMv3PreTrainedModel(TFPreTrainedModel):
974
+ """
975
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
976
+ models.
977
+ """
978
+
979
+ config_class = LayoutLMv3Config
980
+ base_model_prefix = "layoutlmv3"
981
+
982
+ @property
983
+ def input_signature(self):
984
+ sig = super().input_signature
985
+ sig["bbox"] = tf.TensorSpec((None, None, 4), tf.int32, name="bbox")
986
+ return sig
987
+
988
+
989
+ LAYOUTLMV3_START_DOCSTRING = r"""
990
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
991
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
992
+ etc.)
993
+
994
+ This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
995
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
996
+ behavior.
997
+
998
+ <Tip>
999
+
1000
+ TensorFlow models and layers in `transformers` accept two formats as input:
1001
+
1002
+ - having all inputs as keyword arguments (like PyTorch models), or
1003
+ - having all inputs as a list, tuple or dict in the first positional argument.
1004
+
1005
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
1006
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
1007
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
1008
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
1009
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
1010
+ positional argument:
1011
+
1012
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
1013
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
1014
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
1015
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
1016
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
1017
+
1018
+ Note that when creating models and layers with
1019
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
1020
+ about any of this, as you can just pass inputs like you would to any other Python function!
1021
+
1022
+ </Tip>
1023
+
1024
+ Parameters:
1025
+ config ([`LayoutLMv3Config`]): Model configuration class with all the parameters of the model.
1026
+ Initializing with a config file does not load the weights associated with the model, only the
1027
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
1028
+ """
1029
+
1030
+ LAYOUTLMV3_INPUTS_DOCSTRING = r"""
1031
+ Args:
1032
+ input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`):
1033
+ Indices of input sequence tokens in the vocabulary.
1034
+
1035
+ Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]
1036
+ token. See `pixel_values` for `patch_sequence_length`.
1037
+
1038
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1039
+ [`PreTrainedTokenizer.__call__`] for details.
1040
+
1041
+ [What are input IDs?](../glossary#input-ids)
1042
+
1043
+ bbox (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length, 4)`, *optional*):
1044
+ Bounding boxes of each input sequence tokens. Selected in the range `[0,
1045
+ config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1)
1046
+ format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1,
1047
+ y1) represents the position of the lower right corner.
1048
+
1049
+ Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]
1050
+ token. See `pixel_values` for `patch_sequence_length`.
1051
+
1052
+ pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
1053
+ Batch of document images. Each image is divided into patches of shape `(num_channels, config.patch_size,
1054
+ config.patch_size)` and the total number of patches (=`patch_sequence_length`) equals to `((height /
1055
+ config.patch_size) * (width / config.patch_size))`.
1056
+
1057
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1058
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1059
+
1060
+ - 1 for tokens that are **not masked**,
1061
+ - 0 for tokens that are **masked**.
1062
+
1063
+ Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]
1064
+ token. See `pixel_values` for `patch_sequence_length`.
1065
+
1066
+ [What are attention masks?](../glossary#attention-mask)
1067
+ token_type_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1068
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1069
+ 1]`:
1070
+
1071
+ - 0 corresponds to a *sentence A* token,
1072
+ - 1 corresponds to a *sentence B* token.
1073
+
1074
+ Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]
1075
+ token. See `pixel_values` for `patch_sequence_length`.
1076
+
1077
+ [What are token type IDs?](../glossary#token-type-ids)
1078
+ position_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1079
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
1080
+ config.max_position_embeddings - 1]`.
1081
+
1082
+ Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]
1083
+ token. See `pixel_values` for `patch_sequence_length`.
1084
+
1085
+ [What are position IDs?](../glossary#position-ids)
1086
+ head_mask (`tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
1087
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
1088
+
1089
+ - 1 indicates the head is **not masked**,
1090
+ - 0 indicates the head is **masked**.
1091
+
1092
+ inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1093
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1094
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
1095
+ model's internal embedding lookup matrix.
1096
+ output_attentions (`bool`, *optional*):
1097
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1098
+ tensors for more detail.
1099
+ output_hidden_states (`bool`, *optional*):
1100
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1101
+ more detail.
1102
+ return_dict (`bool`, *optional*):
1103
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1104
+ """
1105
+
1106
+
1107
+ @add_start_docstrings(
1108
+ "The bare LayoutLMv3 Model transformer outputting raw hidden-states without any specific head on top.",
1109
+ LAYOUTLMV3_START_DOCSTRING,
1110
+ )
1111
+ class TFLayoutLMv3Model(TFLayoutLMv3PreTrainedModel):
1112
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1113
+ _keys_to_ignore_on_load_unexpected = [r"position_ids"]
1114
+
1115
+ def __init__(self, config, *inputs, **kwargs):
1116
+ super().__init__(config, *inputs, **kwargs)
1117
+ self.layoutlmv3 = TFLayoutLMv3MainLayer(config, name="layoutlmv3")
1118
+
1119
+ @unpack_inputs
1120
+ @add_start_docstrings_to_model_forward(LAYOUTLMV3_INPUTS_DOCSTRING)
1121
+ @replace_return_docstrings(output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC)
1122
+ def call(
1123
+ self,
1124
+ input_ids: tf.Tensor | None = None,
1125
+ bbox: tf.Tensor | None = None,
1126
+ attention_mask: tf.Tensor | None = None,
1127
+ token_type_ids: tf.Tensor | None = None,
1128
+ position_ids: tf.Tensor | None = None,
1129
+ head_mask: tf.Tensor | None = None,
1130
+ inputs_embeds: tf.Tensor | None = None,
1131
+ pixel_values: tf.Tensor | None = None,
1132
+ output_attentions: Optional[bool] = None,
1133
+ output_hidden_states: Optional[bool] = None,
1134
+ return_dict: Optional[bool] = None,
1135
+ training: bool = False,
1136
+ ) -> Union[
1137
+ TFBaseModelOutput,
1138
+ Tuple[tf.Tensor],
1139
+ Tuple[tf.Tensor, tf.Tensor],
1140
+ Tuple[tf.Tensor, tf.Tensor, tf.Tensor],
1141
+ ]:
1142
+ r"""
1143
+ Returns:
1144
+
1145
+ Examples:
1146
+
1147
+ ```python
1148
+ >>> from transformers import AutoProcessor, TFAutoModel
1149
+ >>> from datasets import load_dataset
1150
+
1151
+ >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False)
1152
+ >>> model = TFAutoModel.from_pretrained("microsoft/layoutlmv3-base")
1153
+
1154
+ >>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
1155
+ >>> example = dataset[0]
1156
+ >>> image = example["image"]
1157
+ >>> words = example["tokens"]
1158
+ >>> boxes = example["bboxes"]
1159
+
1160
+ >>> encoding = processor(image, words, boxes=boxes, return_tensors="tf")
1161
+
1162
+ >>> outputs = model(**encoding)
1163
+ >>> last_hidden_states = outputs.last_hidden_state
1164
+ ```"""
1165
+
1166
+ outputs = self.layoutlmv3(
1167
+ input_ids=input_ids,
1168
+ bbox=bbox,
1169
+ attention_mask=attention_mask,
1170
+ token_type_ids=token_type_ids,
1171
+ position_ids=position_ids,
1172
+ head_mask=head_mask,
1173
+ inputs_embeds=inputs_embeds,
1174
+ pixel_values=pixel_values,
1175
+ output_attentions=output_attentions,
1176
+ output_hidden_states=output_hidden_states,
1177
+ return_dict=return_dict,
1178
+ training=training,
1179
+ )
1180
+
1181
+ return outputs
1182
+
1183
+
1184
+ class TFLayoutLMv3ClassificationHead(tf.keras.layers.Layer):
1185
+ """
1186
+ Head for sentence-level classification tasks. Reference: RobertaClassificationHead
1187
+ """
1188
+
1189
+ def __init__(self, config: LayoutLMv3Config, **kwargs):
1190
+ super().__init__(**kwargs)
1191
+ self.dense = tf.keras.layers.Dense(
1192
+ config.hidden_size,
1193
+ activation="tanh",
1194
+ kernel_initializer=get_initializer(config.initializer_range),
1195
+ name="dense",
1196
+ )
1197
+ classifier_dropout = (
1198
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1199
+ )
1200
+ self.dropout = tf.keras.layers.Dropout(
1201
+ classifier_dropout,
1202
+ name="dropout",
1203
+ )
1204
+ self.out_proj = tf.keras.layers.Dense(
1205
+ config.num_labels,
1206
+ kernel_initializer=get_initializer(config.initializer_range),
1207
+ name="out_proj",
1208
+ )
1209
+
1210
+ def call(self, inputs: tf.Tensor, training: bool = False) -> tf.Tensor:
1211
+ outputs = self.dropout(inputs, training=training)
1212
+ outputs = self.dense(outputs)
1213
+ outputs = self.dropout(outputs, training=training)
1214
+ outputs = self.out_proj(outputs)
1215
+ return outputs
1216
+
1217
+
1218
+ @add_start_docstrings(
1219
+ """
1220
+ LayoutLMv3 Model with a sequence classification head on top (a linear layer on top of the final hidden state of the
1221
+ [CLS] token) e.g. for document image classification tasks such as the
1222
+ [RVL-CDIP](https://www.cs.cmu.edu/~aharley/rvl-cdip/) dataset.
1223
+ """,
1224
+ LAYOUTLMV3_START_DOCSTRING,
1225
+ )
1226
+ class TFLayoutLMv3ForSequenceClassification(TFLayoutLMv3PreTrainedModel, TFSequenceClassificationLoss):
1227
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1228
+ _keys_to_ignore_on_load_unexpected = [r"position_ids"]
1229
+
1230
+ def __init__(self, config: LayoutLMv3Config, **kwargs):
1231
+ super().__init__(config, **kwargs)
1232
+ self.config = config
1233
+ self.layoutlmv3 = TFLayoutLMv3MainLayer(config, name="layoutlmv3")
1234
+ self.classifier = TFLayoutLMv3ClassificationHead(config, name="classifier")
1235
+
1236
+ @unpack_inputs
1237
+ @add_start_docstrings_to_model_forward(LAYOUTLMV3_INPUTS_DOCSTRING)
1238
+ @replace_return_docstrings(output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
1239
+ def call(
1240
+ self,
1241
+ input_ids: tf.Tensor | None = None,
1242
+ attention_mask: tf.Tensor | None = None,
1243
+ token_type_ids: tf.Tensor | None = None,
1244
+ position_ids: tf.Tensor | None = None,
1245
+ head_mask: tf.Tensor | None = None,
1246
+ inputs_embeds: tf.Tensor | None = None,
1247
+ labels: tf.Tensor | None = None,
1248
+ output_attentions: Optional[bool] = None,
1249
+ output_hidden_states: Optional[bool] = None,
1250
+ return_dict: Optional[bool] = None,
1251
+ bbox: tf.Tensor | None = None,
1252
+ pixel_values: tf.Tensor | None = None,
1253
+ training: Optional[bool] = False,
1254
+ ) -> Union[
1255
+ TFSequenceClassifierOutput,
1256
+ Tuple[tf.Tensor],
1257
+ Tuple[tf.Tensor, tf.Tensor],
1258
+ Tuple[tf.Tensor, tf.Tensor, tf.Tensor],
1259
+ Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor],
1260
+ ]:
1261
+ """
1262
+ Returns:
1263
+
1264
+ Examples:
1265
+
1266
+ ```python
1267
+ >>> from transformers import AutoProcessor, TFAutoModelForSequenceClassification
1268
+ >>> from datasets import load_dataset
1269
+ >>> import tensorflow as tf
1270
+
1271
+ >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False)
1272
+ >>> model = TFAutoModelForSequenceClassification.from_pretrained("microsoft/layoutlmv3-base")
1273
+
1274
+ >>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
1275
+ >>> example = dataset[0]
1276
+ >>> image = example["image"]
1277
+ >>> words = example["tokens"]
1278
+ >>> boxes = example["bboxes"]
1279
+
1280
+ >>> encoding = processor(image, words, boxes=boxes, return_tensors="tf")
1281
+ >>> sequence_label = tf.convert_to_tensor([1])
1282
+
1283
+ >>> outputs = model(**encoding, labels=sequence_label)
1284
+ >>> loss = outputs.loss
1285
+ >>> logits = outputs.logits
1286
+ ```"""
1287
+
1288
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1289
+
1290
+ outputs = self.layoutlmv3(
1291
+ input_ids,
1292
+ attention_mask=attention_mask,
1293
+ token_type_ids=token_type_ids,
1294
+ position_ids=position_ids,
1295
+ head_mask=head_mask,
1296
+ inputs_embeds=inputs_embeds,
1297
+ output_attentions=output_attentions,
1298
+ output_hidden_states=output_hidden_states,
1299
+ return_dict=return_dict,
1300
+ bbox=bbox,
1301
+ pixel_values=pixel_values,
1302
+ training=training,
1303
+ )
1304
+ sequence_output = outputs[0][:, 0, :]
1305
+ logits = self.classifier(sequence_output, training=training)
1306
+
1307
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
1308
+
1309
+ if not return_dict:
1310
+ output = (logits,) + outputs[1:]
1311
+ return ((loss,) + output) if loss is not None else output
1312
+
1313
+ return TFSequenceClassifierOutput(
1314
+ loss=loss,
1315
+ logits=logits,
1316
+ hidden_states=outputs.hidden_states,
1317
+ attentions=outputs.attentions,
1318
+ )
1319
+
1320
+
1321
+ @add_start_docstrings(
1322
+ """
1323
+ LayoutLMv3 Model with a token classification head on top (a linear layer on top of the final hidden states) e.g.
1324
+ for sequence labeling (information extraction) tasks such as [FUNSD](https://guillaumejaume.github.io/FUNSD/),
1325
+ [SROIE](https://rrc.cvc.uab.es/?ch=13), [CORD](https://github.com/clovaai/cord) and
1326
+ [Kleister-NDA](https://github.com/applicaai/kleister-nda).
1327
+ """,
1328
+ LAYOUTLMV3_START_DOCSTRING,
1329
+ )
1330
+ class TFLayoutLMv3ForTokenClassification(TFLayoutLMv3PreTrainedModel, TFTokenClassificationLoss):
1331
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1332
+ _keys_to_ignore_on_load_unexpected = [r"position_ids"]
1333
+
1334
+ def __init__(self, config: LayoutLMv3Config, **kwargs):
1335
+ super().__init__(config, **kwargs)
1336
+ self.num_labels = config.num_labels
1337
+
1338
+ self.layoutlmv3 = TFLayoutLMv3MainLayer(config, name="layoutlmv3")
1339
+ self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob, name="dropout")
1340
+ if config.num_labels < 10:
1341
+ self.classifier = tf.keras.layers.Dense(
1342
+ config.num_labels,
1343
+ kernel_initializer=get_initializer(config.initializer_range),
1344
+ name="classifier",
1345
+ )
1346
+ else:
1347
+ self.classifier = TFLayoutLMv3ClassificationHead(config, name="classifier")
1348
+
1349
+ @unpack_inputs
1350
+ @add_start_docstrings_to_model_forward(LAYOUTLMV3_INPUTS_DOCSTRING)
1351
+ @replace_return_docstrings(output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC)
1352
+ def call(
1353
+ self,
1354
+ input_ids: tf.Tensor | None = None,
1355
+ bbox: tf.Tensor | None = None,
1356
+ attention_mask: tf.Tensor | None = None,
1357
+ token_type_ids: tf.Tensor | None = None,
1358
+ position_ids: tf.Tensor | None = None,
1359
+ head_mask: tf.Tensor | None = None,
1360
+ inputs_embeds: tf.Tensor | None = None,
1361
+ labels: tf.Tensor | None = None,
1362
+ output_attentions: Optional[bool] = None,
1363
+ output_hidden_states: Optional[bool] = None,
1364
+ return_dict: Optional[bool] = None,
1365
+ pixel_values: tf.Tensor | None = None,
1366
+ training: Optional[bool] = False,
1367
+ ) -> Union[
1368
+ TFTokenClassifierOutput,
1369
+ Tuple[tf.Tensor],
1370
+ Tuple[tf.Tensor, tf.Tensor],
1371
+ Tuple[tf.Tensor, tf.Tensor, tf.Tensor],
1372
+ Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor],
1373
+ ]:
1374
+ r"""
1375
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1376
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1377
+
1378
+ Returns:
1379
+
1380
+ Examples:
1381
+
1382
+ ```python
1383
+ >>> from transformers import AutoProcessor, TFAutoModelForTokenClassification
1384
+ >>> from datasets import load_dataset
1385
+
1386
+ >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False)
1387
+ >>> model = TFAutoModelForTokenClassification.from_pretrained("microsoft/layoutlmv3-base", num_labels=7)
1388
+
1389
+ >>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
1390
+ >>> example = dataset[0]
1391
+ >>> image = example["image"]
1392
+ >>> words = example["tokens"]
1393
+ >>> boxes = example["bboxes"]
1394
+ >>> word_labels = example["ner_tags"]
1395
+
1396
+ >>> encoding = processor(image, words, boxes=boxes, word_labels=word_labels, return_tensors="tf")
1397
+
1398
+ >>> outputs = model(**encoding)
1399
+ >>> loss = outputs.loss
1400
+ >>> logits = outputs.logits
1401
+ ```"""
1402
+
1403
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1404
+
1405
+ outputs = self.layoutlmv3(
1406
+ input_ids,
1407
+ bbox=bbox,
1408
+ attention_mask=attention_mask,
1409
+ token_type_ids=token_type_ids,
1410
+ position_ids=position_ids,
1411
+ head_mask=head_mask,
1412
+ inputs_embeds=inputs_embeds,
1413
+ output_attentions=output_attentions,
1414
+ output_hidden_states=output_hidden_states,
1415
+ return_dict=return_dict,
1416
+ pixel_values=pixel_values,
1417
+ training=training,
1418
+ )
1419
+ if input_ids is not None:
1420
+ input_shape = tf.shape(input_ids)
1421
+ else:
1422
+ input_shape = tf.shape(inputs_embeds)[:-1]
1423
+
1424
+ seq_length = input_shape[1]
1425
+ # only take the text part of the output representations
1426
+ sequence_output = outputs[0][:, :seq_length]
1427
+ sequence_output = self.dropout(sequence_output, training=training)
1428
+ logits = self.classifier(sequence_output)
1429
+
1430
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
1431
+
1432
+ if not return_dict:
1433
+ output = (logits,) + outputs[1:]
1434
+ return ((loss,) + output) if loss is not None else output
1435
+
1436
+ return TFTokenClassifierOutput(
1437
+ loss=loss,
1438
+ logits=logits,
1439
+ hidden_states=outputs.hidden_states,
1440
+ attentions=outputs.attentions,
1441
+ )
1442
+
1443
+
1444
+ @add_start_docstrings(
1445
+ """
1446
+ LayoutLMv3 Model with a span classification head on top for extractive question-answering tasks such as
1447
+ [DocVQA](https://rrc.cvc.uab.es/?ch=17) (a linear layer on top of the text part of the hidden-states output to
1448
+ compute `span start logits` and `span end logits`).
1449
+ """,
1450
+ LAYOUTLMV3_START_DOCSTRING,
1451
+ )
1452
+ class TFLayoutLMv3ForQuestionAnswering(TFLayoutLMv3PreTrainedModel, TFQuestionAnsweringLoss):
1453
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1454
+ _keys_to_ignore_on_load_unexpected = [r"position_ids"]
1455
+
1456
+ def __init__(self, config: LayoutLMv3Config, **kwargs):
1457
+ super().__init__(config, **kwargs)
1458
+
1459
+ self.num_labels = config.num_labels
1460
+
1461
+ self.layoutlmv3 = TFLayoutLMv3MainLayer(config, name="layoutlmv3")
1462
+ self.qa_outputs = TFLayoutLMv3ClassificationHead(config, name="qa_outputs")
1463
+
1464
+ @unpack_inputs
1465
+ @add_start_docstrings_to_model_forward(LAYOUTLMV3_INPUTS_DOCSTRING)
1466
+ @replace_return_docstrings(output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC)
1467
+ def call(
1468
+ self,
1469
+ input_ids: tf.Tensor | None = None,
1470
+ attention_mask: tf.Tensor | None = None,
1471
+ token_type_ids: tf.Tensor | None = None,
1472
+ position_ids: tf.Tensor | None = None,
1473
+ head_mask: tf.Tensor | None = None,
1474
+ inputs_embeds: tf.Tensor | None = None,
1475
+ start_positions: tf.Tensor | None = None,
1476
+ end_positions: tf.Tensor | None = None,
1477
+ output_attentions: Optional[bool] = None,
1478
+ output_hidden_states: Optional[bool] = None,
1479
+ bbox: tf.Tensor | None = None,
1480
+ pixel_values: tf.Tensor | None = None,
1481
+ return_dict: Optional[bool] = None,
1482
+ training: bool = False,
1483
+ ) -> Union[
1484
+ TFQuestionAnsweringModelOutput,
1485
+ Tuple[tf.Tensor],
1486
+ Tuple[tf.Tensor, tf.Tensor],
1487
+ Tuple[tf.Tensor, tf.Tensor, tf.Tensor],
1488
+ Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor],
1489
+ ]:
1490
+ r"""
1491
+ start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1492
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1493
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1494
+ are not taken into account for computing the loss.
1495
+ end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1496
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1497
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1498
+ are not taken into account for computing the loss.
1499
+
1500
+ Returns:
1501
+
1502
+ Examples:
1503
+
1504
+ ```python
1505
+ >>> from transformers import AutoProcessor, TFAutoModelForQuestionAnswering
1506
+ >>> from datasets import load_dataset
1507
+ >>> import tensorflow as tf
1508
+
1509
+ >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False)
1510
+ >>> model = TFAutoModelForQuestionAnswering.from_pretrained("microsoft/layoutlmv3-base")
1511
+
1512
+ >>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
1513
+ >>> example = dataset[0]
1514
+ >>> image = example["image"]
1515
+ >>> question = "what's his name?"
1516
+ >>> words = example["tokens"]
1517
+ >>> boxes = example["bboxes"]
1518
+
1519
+ >>> encoding = processor(image, question, words, boxes=boxes, return_tensors="tf")
1520
+ >>> start_positions = tf.convert_to_tensor([1])
1521
+ >>> end_positions = tf.convert_to_tensor([3])
1522
+
1523
+ >>> outputs = model(**encoding, start_positions=start_positions, end_positions=end_positions)
1524
+ >>> loss = outputs.loss
1525
+ >>> start_scores = outputs.start_logits
1526
+ >>> end_scores = outputs.end_logits
1527
+ ```"""
1528
+
1529
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1530
+
1531
+ outputs = self.layoutlmv3(
1532
+ input_ids,
1533
+ attention_mask=attention_mask,
1534
+ token_type_ids=token_type_ids,
1535
+ position_ids=position_ids,
1536
+ head_mask=head_mask,
1537
+ inputs_embeds=inputs_embeds,
1538
+ output_attentions=output_attentions,
1539
+ output_hidden_states=output_hidden_states,
1540
+ return_dict=return_dict,
1541
+ bbox=bbox,
1542
+ pixel_values=pixel_values,
1543
+ training=training,
1544
+ )
1545
+
1546
+ sequence_output = outputs[0]
1547
+
1548
+ logits = self.qa_outputs(sequence_output, training=training)
1549
+ start_logits, end_logits = tf.split(value=logits, num_or_size_splits=2, axis=-1)
1550
+ start_logits = tf.squeeze(input=start_logits, axis=-1)
1551
+ end_logits = tf.squeeze(input=end_logits, axis=-1)
1552
+
1553
+ loss = None
1554
+
1555
+ if start_positions is not None and end_positions is not None:
1556
+ labels = {"start_position": start_positions, "end_position": end_positions}
1557
+ loss = self.hf_compute_loss(labels, logits=(start_logits, end_logits))
1558
+
1559
+ if not return_dict:
1560
+ output = (start_logits, end_logits) + outputs[1:]
1561
+ return ((loss,) + output) if loss is not None else output
1562
+
1563
+ return TFQuestionAnsweringModelOutput(
1564
+ loss=loss,
1565
+ start_logits=start_logits,
1566
+ end_logits=end_logits,
1567
+ hidden_states=outputs.hidden_states,
1568
+ attentions=outputs.attentions,
1569
+ )
mgm/lib/python3.10/site-packages/transformers/models/layoutlmv3/tokenization_layoutlmv3.py ADDED
@@ -0,0 +1,1479 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization class for LayoutLMv3. Same as LayoutLMv2, but RoBERTa-like BPE tokenization instead of WordPiece."""
16
+
17
+ import json
18
+ import os
19
+ from functools import lru_cache
20
+ from typing import Dict, List, Optional, Tuple, Union
21
+
22
+ import regex as re
23
+
24
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer
25
+ from ...tokenization_utils_base import (
26
+ BatchEncoding,
27
+ EncodedInput,
28
+ PreTokenizedInput,
29
+ TextInput,
30
+ TextInputPair,
31
+ TruncationStrategy,
32
+ )
33
+ from ...utils import PaddingStrategy, TensorType, add_end_docstrings, logging
34
+
35
+
36
+ logger = logging.get_logger(__name__)
37
+
38
+ VOCAB_FILES_NAMES = {
39
+ "vocab_file": "vocab.json",
40
+ "merges_file": "merges.txt",
41
+ }
42
+
43
+ PRETRAINED_VOCAB_FILES_MAP = {
44
+ "vocab_file": {
45
+ "microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/raw/main/vocab.json",
46
+ "microsoft/layoutlmv3-large": "https://huggingface.co/microsoft/layoutlmv3-large/raw/main/vocab.json",
47
+ },
48
+ "merges_file": {
49
+ "microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/raw/main/merges.txt",
50
+ "microsoft/layoutlmv3-large": "https://huggingface.co/microsoft/layoutlmv3-large/raw/main/merges.txt",
51
+ },
52
+ }
53
+
54
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
55
+ "microsoft/layoutlmv3-base": 512,
56
+ "microsoft/layoutlmv3-large": 512,
57
+ }
58
+
59
+
60
+ LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING = r"""
61
+ add_special_tokens (`bool`, *optional*, defaults to `True`):
62
+ Whether or not to encode the sequences with the special tokens relative to their model.
63
+ padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`):
64
+ Activates and controls padding. Accepts the following values:
65
+
66
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
67
+ sequence if provided).
68
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
69
+ acceptable input length for the model if that argument is not provided.
70
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
71
+ lengths).
72
+ truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
73
+ Activates and controls truncation. Accepts the following values:
74
+
75
+ - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or
76
+ to the maximum acceptable input length for the model if that argument is not provided. This will
77
+ truncate token by token, removing a token from the longest sequence in the pair if a pair of
78
+ sequences (or a batch of pairs) is provided.
79
+ - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
80
+ maximum acceptable input length for the model if that argument is not provided. This will only
81
+ truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
82
+ - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
83
+ maximum acceptable input length for the model if that argument is not provided. This will only
84
+ truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
85
+ - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
86
+ greater than the model maximum admissible input size).
87
+ max_length (`int`, *optional*):
88
+ Controls the maximum length to use by one of the truncation/padding parameters.
89
+
90
+ If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
91
+ is required by one of the truncation/padding parameters. If the model has no specific maximum input
92
+ length (like XLNet) truncation/padding to a maximum length will be deactivated.
93
+ stride (`int`, *optional*, defaults to 0):
94
+ If set to a number along with `max_length`, the overflowing tokens returned when
95
+ `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence
96
+ returned to provide some overlap between truncated and overflowing sequences. The value of this
97
+ argument defines the number of overlapping tokens.
98
+ pad_to_multiple_of (`int`, *optional*):
99
+ If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
100
+ the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta).
101
+ return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
102
+ If set, will return tensors instead of list of python integers. Acceptable values are:
103
+
104
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
105
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
106
+ - `'np'`: Return Numpy `np.ndarray` objects.
107
+ """
108
+
109
+
110
+ LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING = r"""
111
+ add_special_tokens (`bool`, *optional*, defaults to `True`):
112
+ Whether or not to encode the sequences with the special tokens relative to their model.
113
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
114
+ Activates and controls padding. Accepts the following values:
115
+
116
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
117
+ sequence if provided).
118
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
119
+ acceptable input length for the model if that argument is not provided.
120
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
121
+ lengths).
122
+ truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
123
+ Activates and controls truncation. Accepts the following values:
124
+
125
+ - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or
126
+ to the maximum acceptable input length for the model if that argument is not provided. This will
127
+ truncate token by token, removing a token from the longest sequence in the pair if a pair of
128
+ sequences (or a batch of pairs) is provided.
129
+ - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
130
+ maximum acceptable input length for the model if that argument is not provided. This will only
131
+ truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
132
+ - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
133
+ maximum acceptable input length for the model if that argument is not provided. This will only
134
+ truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
135
+ - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
136
+ greater than the model maximum admissible input size).
137
+ max_length (`int`, *optional*):
138
+ Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to
139
+ `None`, this will use the predefined model maximum length if a maximum length is required by one of the
140
+ truncation/padding parameters. If the model has no specific maximum input length (like XLNet)
141
+ truncation/padding to a maximum length will be deactivated.
142
+ stride (`int`, *optional*, defaults to 0):
143
+ If set to a number along with `max_length`, the overflowing tokens returned when
144
+ `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence
145
+ returned to provide some overlap between truncated and overflowing sequences. The value of this
146
+ argument defines the number of overlapping tokens.
147
+ pad_to_multiple_of (`int`, *optional*):
148
+ If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
149
+ the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta).
150
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
151
+ If set, will return tensors instead of list of python integers. Acceptable values are:
152
+
153
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
154
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
155
+ - `'np'`: Return Numpy `np.ndarray` objects.
156
+ """
157
+
158
+
159
+ @lru_cache()
160
+ # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
161
+ def bytes_to_unicode():
162
+ """
163
+ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
164
+ characters the bpe code barfs on.
165
+
166
+ The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
167
+ if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
168
+ decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
169
+ tables between utf-8 bytes and unicode strings.
170
+ """
171
+ bs = (
172
+ list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
173
+ )
174
+ cs = bs[:]
175
+ n = 0
176
+ for b in range(2**8):
177
+ if b not in bs:
178
+ bs.append(b)
179
+ cs.append(2**8 + n)
180
+ n += 1
181
+ cs = [chr(n) for n in cs]
182
+ return dict(zip(bs, cs))
183
+
184
+
185
+ # Copied from transformers.models.roberta.tokenization_roberta.get_pairs
186
+ def get_pairs(word):
187
+ """
188
+ Return set of symbol pairs in a word.
189
+
190
+ Word is represented as tuple of symbols (symbols being variable-length strings).
191
+ """
192
+ pairs = set()
193
+ prev_char = word[0]
194
+ for char in word[1:]:
195
+ pairs.add((prev_char, char))
196
+ prev_char = char
197
+ return pairs
198
+
199
+
200
+ class LayoutLMv3Tokenizer(PreTrainedTokenizer):
201
+ r"""
202
+ Construct a LayoutLMv3 tokenizer. Based on [`RoBERTatokenizer`] (Byte Pair Encoding or BPE).
203
+ [`LayoutLMv3Tokenizer`] can be used to turn words, word-level bounding boxes and optional word labels to
204
+ token-level `input_ids`, `attention_mask`, `token_type_ids`, `bbox`, and optional `labels` (for token
205
+ classification).
206
+
207
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
208
+ this superclass for more information regarding those methods.
209
+
210
+ [`LayoutLMv3Tokenizer`] runs end-to-end tokenization: punctuation splitting and wordpiece. It also turns the
211
+ word-level bounding boxes into token-level bounding boxes.
212
+
213
+ Args:
214
+ vocab_file (`str`):
215
+ Path to the vocabulary file.
216
+ merges_file (`str`):
217
+ Path to the merges file.
218
+ errors (`str`, *optional*, defaults to `"replace"`):
219
+ Paradigm to follow when decoding bytes to UTF-8. See
220
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
221
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
222
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
223
+
224
+ <Tip>
225
+
226
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
227
+ sequence. The token used is the `cls_token`.
228
+
229
+ </Tip>
230
+
231
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
232
+ The end of sequence token.
233
+
234
+ <Tip>
235
+
236
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
237
+ The token used is the `sep_token`.
238
+
239
+ </Tip>
240
+
241
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
242
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
243
+ sequence classification or for a text and a question for question answering. It is also used as the last
244
+ token of a sequence built with special tokens.
245
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
246
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
247
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
248
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
249
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
250
+ token instead.
251
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
252
+ The token used for padding, for example when batching sequences of different lengths.
253
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
254
+ The token used for masking values. This is the token used when training this model with masked language
255
+ modeling. This is the token which the model will try to predict.
256
+ add_prefix_space (`bool`, *optional*, defaults to `True`):
257
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
258
+ other word. (RoBERTa tokenizer detect beginning of words by the preceding space).
259
+ cls_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
260
+ The bounding box to use for the special [CLS] token.
261
+ sep_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
262
+ The bounding box to use for the special [SEP] token.
263
+ pad_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
264
+ The bounding box to use for the special [PAD] token.
265
+ pad_token_label (`int`, *optional*, defaults to -100):
266
+ The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's
267
+ CrossEntropyLoss.
268
+ only_label_first_subword (`bool`, *optional*, defaults to `True`):
269
+ Whether or not to only label the first subword, in case word labels are provided.
270
+ """
271
+
272
+ vocab_files_names = VOCAB_FILES_NAMES
273
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
274
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
275
+ model_input_names = ["input_ids", "attention_mask", "bbox"]
276
+
277
+ def __init__(
278
+ self,
279
+ vocab_file,
280
+ merges_file,
281
+ errors="replace",
282
+ bos_token="<s>",
283
+ eos_token="</s>",
284
+ sep_token="</s>",
285
+ cls_token="<s>",
286
+ unk_token="<unk>",
287
+ pad_token="<pad>",
288
+ mask_token="<mask>",
289
+ add_prefix_space=True,
290
+ cls_token_box=[0, 0, 0, 0],
291
+ sep_token_box=[0, 0, 0, 0],
292
+ pad_token_box=[0, 0, 0, 0],
293
+ pad_token_label=-100,
294
+ only_label_first_subword=True,
295
+ **kwargs,
296
+ ):
297
+ bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
298
+ eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
299
+ sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
300
+ cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
301
+ unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
302
+ pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
303
+
304
+ # Mask token behave like a normal word, i.e. include the space before it
305
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
306
+
307
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
308
+ self.encoder = json.load(vocab_handle)
309
+ self.decoder = {v: k for k, v in self.encoder.items()}
310
+ self.errors = errors # how to handle errors in decoding
311
+ self.byte_encoder = bytes_to_unicode()
312
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
313
+ with open(merges_file, encoding="utf-8") as merges_handle:
314
+ bpe_merges = merges_handle.read().split("\n")[1:-1]
315
+ bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
316
+ self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
317
+ self.cache = {}
318
+ self.add_prefix_space = add_prefix_space
319
+
320
+ # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
321
+ self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
322
+
323
+ # additional properties
324
+ self.cls_token_box = cls_token_box
325
+ self.sep_token_box = sep_token_box
326
+ self.pad_token_box = pad_token_box
327
+ self.pad_token_label = pad_token_label
328
+ self.only_label_first_subword = only_label_first_subword
329
+
330
+ super().__init__(
331
+ errors=errors,
332
+ bos_token=bos_token,
333
+ eos_token=eos_token,
334
+ unk_token=unk_token,
335
+ sep_token=sep_token,
336
+ cls_token=cls_token,
337
+ pad_token=pad_token,
338
+ mask_token=mask_token,
339
+ add_prefix_space=add_prefix_space,
340
+ cls_token_box=cls_token_box,
341
+ sep_token_box=sep_token_box,
342
+ pad_token_box=pad_token_box,
343
+ pad_token_label=pad_token_label,
344
+ only_label_first_subword=only_label_first_subword,
345
+ **kwargs,
346
+ )
347
+
348
+ @property
349
+ # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size
350
+ def vocab_size(self):
351
+ return len(self.encoder)
352
+
353
+ # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.get_vocab
354
+ def get_vocab(self):
355
+ vocab = dict(self.encoder).copy()
356
+ vocab.update(self.added_tokens_encoder)
357
+ return vocab
358
+
359
+ # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.bpe
360
+ def bpe(self, token):
361
+ if token in self.cache:
362
+ return self.cache[token]
363
+ word = tuple(token)
364
+ pairs = get_pairs(word)
365
+
366
+ if not pairs:
367
+ return token
368
+
369
+ while True:
370
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
371
+ if bigram not in self.bpe_ranks:
372
+ break
373
+ first, second = bigram
374
+ new_word = []
375
+ i = 0
376
+ while i < len(word):
377
+ try:
378
+ j = word.index(first, i)
379
+ except ValueError:
380
+ new_word.extend(word[i:])
381
+ break
382
+ else:
383
+ new_word.extend(word[i:j])
384
+ i = j
385
+
386
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
387
+ new_word.append(first + second)
388
+ i += 2
389
+ else:
390
+ new_word.append(word[i])
391
+ i += 1
392
+ new_word = tuple(new_word)
393
+ word = new_word
394
+ if len(word) == 1:
395
+ break
396
+ else:
397
+ pairs = get_pairs(word)
398
+ word = " ".join(word)
399
+ self.cache[token] = word
400
+ return word
401
+
402
+ # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer._tokenize
403
+ def _tokenize(self, text):
404
+ """Tokenize a string."""
405
+ bpe_tokens = []
406
+ for token in re.findall(self.pat, text):
407
+ token = "".join(
408
+ self.byte_encoder[b] for b in token.encode("utf-8")
409
+ ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
410
+ bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
411
+ return bpe_tokens
412
+
413
+ # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer._convert_token_to_id
414
+ def _convert_token_to_id(self, token):
415
+ """Converts a token (str) in an id using the vocab."""
416
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
417
+
418
+ # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer._convert_id_to_token
419
+ def _convert_id_to_token(self, index):
420
+ """Converts an index (integer) in a token (str) using the vocab."""
421
+ return self.decoder.get(index)
422
+
423
+ # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.convert_tokens_to_string
424
+ def convert_tokens_to_string(self, tokens):
425
+ """Converts a sequence of tokens (string) in a single string."""
426
+ text = "".join(tokens)
427
+ text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
428
+ return text
429
+
430
+ # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.save_vocabulary
431
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
432
+ if not os.path.isdir(save_directory):
433
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
434
+ return
435
+ vocab_file = os.path.join(
436
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
437
+ )
438
+ merge_file = os.path.join(
439
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
440
+ )
441
+
442
+ with open(vocab_file, "w", encoding="utf-8") as f:
443
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
444
+
445
+ index = 0
446
+ with open(merge_file, "w", encoding="utf-8") as writer:
447
+ writer.write("#version: 0.2\n")
448
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
449
+ if index != token_index:
450
+ logger.warning(
451
+ f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
452
+ " Please check that the tokenizer is not corrupted!"
453
+ )
454
+ index = token_index
455
+ writer.write(" ".join(bpe_tokens) + "\n")
456
+ index += 1
457
+
458
+ return vocab_file, merge_file
459
+
460
+ # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.build_inputs_with_special_tokens
461
+ def build_inputs_with_special_tokens(
462
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
463
+ ) -> List[int]:
464
+ """
465
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
466
+ adding special tokens. A RoBERTa sequence has the following format:
467
+
468
+ - single sequence: `<s> X </s>`
469
+ - pair of sequences: `<s> A </s></s> B </s>`
470
+
471
+ Args:
472
+ token_ids_0 (`List[int]`):
473
+ List of IDs to which the special tokens will be added.
474
+ token_ids_1 (`List[int]`, *optional*):
475
+ Optional second list of IDs for sequence pairs.
476
+
477
+ Returns:
478
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
479
+ """
480
+ if token_ids_1 is None:
481
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
482
+ cls = [self.cls_token_id]
483
+ sep = [self.sep_token_id]
484
+ return cls + token_ids_0 + sep + sep + token_ids_1 + sep
485
+
486
+ # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.get_special_tokens_mask
487
+ def get_special_tokens_mask(
488
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
489
+ ) -> List[int]:
490
+ """
491
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
492
+ special tokens using the tokenizer `prepare_for_model` method.
493
+
494
+ Args:
495
+ token_ids_0 (`List[int]`):
496
+ List of IDs.
497
+ token_ids_1 (`List[int]`, *optional*):
498
+ Optional second list of IDs for sequence pairs.
499
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
500
+ Whether or not the token list is already formatted with special tokens for the model.
501
+
502
+ Returns:
503
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
504
+ """
505
+ if already_has_special_tokens:
506
+ return super().get_special_tokens_mask(
507
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
508
+ )
509
+
510
+ if token_ids_1 is None:
511
+ return [1] + ([0] * len(token_ids_0)) + [1]
512
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
513
+
514
+ # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.create_token_type_ids_from_sequences
515
+ def create_token_type_ids_from_sequences(
516
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
517
+ ) -> List[int]:
518
+ """
519
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. RoBERTa does not
520
+ make use of token type ids, therefore a list of zeros is returned.
521
+
522
+ Args:
523
+ token_ids_0 (`List[int]`):
524
+ List of IDs.
525
+ token_ids_1 (`List[int]`, *optional*):
526
+ Optional second list of IDs for sequence pairs.
527
+
528
+ Returns:
529
+ `List[int]`: List of zeros.
530
+ """
531
+ sep = [self.sep_token_id]
532
+ cls = [self.cls_token_id]
533
+
534
+ if token_ids_1 is None:
535
+ return len(cls + token_ids_0 + sep) * [0]
536
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
537
+
538
+ def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
539
+ add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space)
540
+ # If the text starts with a token that should not be split, no space is added before the text in any case.
541
+ # It's necessary to match the fast tokenization
542
+ if (
543
+ (is_split_into_words or add_prefix_space)
544
+ and (len(text) > 0 and not text[0].isspace())
545
+ and sum([text.startswith(no_split_token) for no_split_token in self.added_tokens_encoder]) == 0
546
+ ):
547
+ text = " " + text
548
+ return (text, kwargs)
549
+
550
+ @add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
551
+ # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2.LayoutLMv2Tokenizer.__call__
552
+ def __call__(
553
+ self,
554
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
555
+ text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None,
556
+ boxes: Union[List[List[int]], List[List[List[int]]]] = None,
557
+ word_labels: Optional[Union[List[int], List[List[int]]]] = None,
558
+ add_special_tokens: bool = True,
559
+ padding: Union[bool, str, PaddingStrategy] = False,
560
+ truncation: Union[bool, str, TruncationStrategy] = None,
561
+ max_length: Optional[int] = None,
562
+ stride: int = 0,
563
+ pad_to_multiple_of: Optional[int] = None,
564
+ return_tensors: Optional[Union[str, TensorType]] = None,
565
+ return_token_type_ids: Optional[bool] = None,
566
+ return_attention_mask: Optional[bool] = None,
567
+ return_overflowing_tokens: bool = False,
568
+ return_special_tokens_mask: bool = False,
569
+ return_offsets_mapping: bool = False,
570
+ return_length: bool = False,
571
+ verbose: bool = True,
572
+ **kwargs,
573
+ ) -> BatchEncoding:
574
+ """
575
+ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
576
+ sequences with word-level normalized bounding boxes and optional labels.
577
+
578
+ Args:
579
+ text (`str`, `List[str]`, `List[List[str]]`):
580
+ The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
581
+ (words of a single example or questions of a batch of examples) or a list of list of strings (batch of
582
+ words).
583
+ text_pair (`List[str]`, `List[List[str]]`):
584
+ The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
585
+ (pretokenized string).
586
+ boxes (`List[List[int]]`, `List[List[List[int]]]`):
587
+ Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.
588
+ word_labels (`List[int]`, `List[List[int]]`, *optional*):
589
+ Word-level integer labels (for token classification tasks such as FUNSD, CORD).
590
+ """
591
+
592
+ # Input type checking for clearer error
593
+ def _is_valid_text_input(t):
594
+ if isinstance(t, str):
595
+ # Strings are fine
596
+ return True
597
+ elif isinstance(t, (list, tuple)):
598
+ # List are fine as long as they are...
599
+ if len(t) == 0:
600
+ # ... empty
601
+ return True
602
+ elif isinstance(t[0], str):
603
+ # ... list of strings
604
+ return True
605
+ elif isinstance(t[0], (list, tuple)):
606
+ # ... list with an empty list or with a list of strings
607
+ return len(t[0]) == 0 or isinstance(t[0][0], str)
608
+ else:
609
+ return False
610
+ else:
611
+ return False
612
+
613
+ if text_pair is not None:
614
+ # in case text + text_pair are provided, text = questions, text_pair = words
615
+ if not _is_valid_text_input(text):
616
+ raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ")
617
+ if not isinstance(text_pair, (list, tuple)):
618
+ raise ValueError(
619
+ "Words must be of type `List[str]` (single pretokenized example), "
620
+ "or `List[List[str]]` (batch of pretokenized examples)."
621
+ )
622
+ else:
623
+ # in case only text is provided => must be words
624
+ if not isinstance(text, (list, tuple)):
625
+ raise ValueError(
626
+ "Words must be of type `List[str]` (single pretokenized example), "
627
+ "or `List[List[str]]` (batch of pretokenized examples)."
628
+ )
629
+
630
+ if text_pair is not None:
631
+ is_batched = isinstance(text, (list, tuple))
632
+ else:
633
+ is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))
634
+
635
+ words = text if text_pair is None else text_pair
636
+ if boxes is None:
637
+ raise ValueError("You must provide corresponding bounding boxes")
638
+ if is_batched:
639
+ if len(words) != len(boxes):
640
+ raise ValueError("You must provide words and boxes for an equal amount of examples")
641
+ for words_example, boxes_example in zip(words, boxes):
642
+ if len(words_example) != len(boxes_example):
643
+ raise ValueError("You must provide as many words as there are bounding boxes")
644
+ else:
645
+ if len(words) != len(boxes):
646
+ raise ValueError("You must provide as many words as there are bounding boxes")
647
+
648
+ if is_batched:
649
+ if text_pair is not None and len(text) != len(text_pair):
650
+ raise ValueError(
651
+ f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:"
652
+ f" {len(text_pair)}."
653
+ )
654
+ batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
655
+ is_pair = bool(text_pair is not None)
656
+ return self.batch_encode_plus(
657
+ batch_text_or_text_pairs=batch_text_or_text_pairs,
658
+ is_pair=is_pair,
659
+ boxes=boxes,
660
+ word_labels=word_labels,
661
+ add_special_tokens=add_special_tokens,
662
+ padding=padding,
663
+ truncation=truncation,
664
+ max_length=max_length,
665
+ stride=stride,
666
+ pad_to_multiple_of=pad_to_multiple_of,
667
+ return_tensors=return_tensors,
668
+ return_token_type_ids=return_token_type_ids,
669
+ return_attention_mask=return_attention_mask,
670
+ return_overflowing_tokens=return_overflowing_tokens,
671
+ return_special_tokens_mask=return_special_tokens_mask,
672
+ return_offsets_mapping=return_offsets_mapping,
673
+ return_length=return_length,
674
+ verbose=verbose,
675
+ **kwargs,
676
+ )
677
+ else:
678
+ return self.encode_plus(
679
+ text=text,
680
+ text_pair=text_pair,
681
+ boxes=boxes,
682
+ word_labels=word_labels,
683
+ add_special_tokens=add_special_tokens,
684
+ padding=padding,
685
+ truncation=truncation,
686
+ max_length=max_length,
687
+ stride=stride,
688
+ pad_to_multiple_of=pad_to_multiple_of,
689
+ return_tensors=return_tensors,
690
+ return_token_type_ids=return_token_type_ids,
691
+ return_attention_mask=return_attention_mask,
692
+ return_overflowing_tokens=return_overflowing_tokens,
693
+ return_special_tokens_mask=return_special_tokens_mask,
694
+ return_offsets_mapping=return_offsets_mapping,
695
+ return_length=return_length,
696
+ verbose=verbose,
697
+ **kwargs,
698
+ )
699
+
700
+ @add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
701
+ # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2.LayoutLMv2Tokenizer.batch_encode_plus
702
+ def batch_encode_plus(
703
+ self,
704
+ batch_text_or_text_pairs: Union[
705
+ List[TextInput],
706
+ List[TextInputPair],
707
+ List[PreTokenizedInput],
708
+ ],
709
+ is_pair: bool = None,
710
+ boxes: Optional[List[List[List[int]]]] = None,
711
+ word_labels: Optional[Union[List[int], List[List[int]]]] = None,
712
+ add_special_tokens: bool = True,
713
+ padding: Union[bool, str, PaddingStrategy] = False,
714
+ truncation: Union[bool, str, TruncationStrategy] = None,
715
+ max_length: Optional[int] = None,
716
+ stride: int = 0,
717
+ pad_to_multiple_of: Optional[int] = None,
718
+ return_tensors: Optional[Union[str, TensorType]] = None,
719
+ return_token_type_ids: Optional[bool] = None,
720
+ return_attention_mask: Optional[bool] = None,
721
+ return_overflowing_tokens: bool = False,
722
+ return_special_tokens_mask: bool = False,
723
+ return_offsets_mapping: bool = False,
724
+ return_length: bool = False,
725
+ verbose: bool = True,
726
+ **kwargs,
727
+ ) -> BatchEncoding:
728
+ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
729
+ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
730
+ padding=padding,
731
+ truncation=truncation,
732
+ max_length=max_length,
733
+ pad_to_multiple_of=pad_to_multiple_of,
734
+ verbose=verbose,
735
+ **kwargs,
736
+ )
737
+
738
+ return self._batch_encode_plus(
739
+ batch_text_or_text_pairs=batch_text_or_text_pairs,
740
+ is_pair=is_pair,
741
+ boxes=boxes,
742
+ word_labels=word_labels,
743
+ add_special_tokens=add_special_tokens,
744
+ padding_strategy=padding_strategy,
745
+ truncation_strategy=truncation_strategy,
746
+ max_length=max_length,
747
+ stride=stride,
748
+ pad_to_multiple_of=pad_to_multiple_of,
749
+ return_tensors=return_tensors,
750
+ return_token_type_ids=return_token_type_ids,
751
+ return_attention_mask=return_attention_mask,
752
+ return_overflowing_tokens=return_overflowing_tokens,
753
+ return_special_tokens_mask=return_special_tokens_mask,
754
+ return_offsets_mapping=return_offsets_mapping,
755
+ return_length=return_length,
756
+ verbose=verbose,
757
+ **kwargs,
758
+ )
759
+
760
+ # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2.LayoutLMv2Tokenizer._batch_encode_plus
761
+ def _batch_encode_plus(
762
+ self,
763
+ batch_text_or_text_pairs: Union[
764
+ List[TextInput],
765
+ List[TextInputPair],
766
+ List[PreTokenizedInput],
767
+ ],
768
+ is_pair: bool = None,
769
+ boxes: Optional[List[List[List[int]]]] = None,
770
+ word_labels: Optional[List[List[int]]] = None,
771
+ add_special_tokens: bool = True,
772
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
773
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
774
+ max_length: Optional[int] = None,
775
+ stride: int = 0,
776
+ pad_to_multiple_of: Optional[int] = None,
777
+ return_tensors: Optional[Union[str, TensorType]] = None,
778
+ return_token_type_ids: Optional[bool] = None,
779
+ return_attention_mask: Optional[bool] = None,
780
+ return_overflowing_tokens: bool = False,
781
+ return_special_tokens_mask: bool = False,
782
+ return_offsets_mapping: bool = False,
783
+ return_length: bool = False,
784
+ verbose: bool = True,
785
+ **kwargs,
786
+ ) -> BatchEncoding:
787
+ if return_offsets_mapping:
788
+ raise NotImplementedError(
789
+ "return_offset_mapping is not available when using Python tokenizers. "
790
+ "To use this feature, change your tokenizer to one deriving from "
791
+ "transformers.PreTrainedTokenizerFast."
792
+ )
793
+
794
+ batch_outputs = self._batch_prepare_for_model(
795
+ batch_text_or_text_pairs=batch_text_or_text_pairs,
796
+ is_pair=is_pair,
797
+ boxes=boxes,
798
+ word_labels=word_labels,
799
+ add_special_tokens=add_special_tokens,
800
+ padding_strategy=padding_strategy,
801
+ truncation_strategy=truncation_strategy,
802
+ max_length=max_length,
803
+ stride=stride,
804
+ pad_to_multiple_of=pad_to_multiple_of,
805
+ return_attention_mask=return_attention_mask,
806
+ return_token_type_ids=return_token_type_ids,
807
+ return_overflowing_tokens=return_overflowing_tokens,
808
+ return_special_tokens_mask=return_special_tokens_mask,
809
+ return_length=return_length,
810
+ return_tensors=return_tensors,
811
+ verbose=verbose,
812
+ )
813
+
814
+ return BatchEncoding(batch_outputs)
815
+
816
+ @add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
817
+ # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2.LayoutLMv2Tokenizer._batch_prepare_for_model
818
+ def _batch_prepare_for_model(
819
+ self,
820
+ batch_text_or_text_pairs,
821
+ is_pair: bool = None,
822
+ boxes: Optional[List[List[int]]] = None,
823
+ word_labels: Optional[List[List[int]]] = None,
824
+ add_special_tokens: bool = True,
825
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
826
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
827
+ max_length: Optional[int] = None,
828
+ stride: int = 0,
829
+ pad_to_multiple_of: Optional[int] = None,
830
+ return_tensors: Optional[str] = None,
831
+ return_token_type_ids: Optional[bool] = None,
832
+ return_attention_mask: Optional[bool] = None,
833
+ return_overflowing_tokens: bool = False,
834
+ return_special_tokens_mask: bool = False,
835
+ return_length: bool = False,
836
+ verbose: bool = True,
837
+ ) -> BatchEncoding:
838
+ """
839
+ Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It
840
+ adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
841
+ manages a moving window (with user defined stride) for overflowing tokens.
842
+
843
+ Args:
844
+ batch_ids_pairs: list of tokenized input ids or input ids pairs
845
+ """
846
+
847
+ batch_outputs = {}
848
+ for idx, example in enumerate(zip(batch_text_or_text_pairs, boxes)):
849
+ batch_text_or_text_pair, boxes_example = example
850
+ outputs = self.prepare_for_model(
851
+ batch_text_or_text_pair[0] if is_pair else batch_text_or_text_pair,
852
+ batch_text_or_text_pair[1] if is_pair else None,
853
+ boxes_example,
854
+ word_labels=word_labels[idx] if word_labels is not None else None,
855
+ add_special_tokens=add_special_tokens,
856
+ padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward
857
+ truncation=truncation_strategy.value,
858
+ max_length=max_length,
859
+ stride=stride,
860
+ pad_to_multiple_of=None, # we pad in batch afterward
861
+ return_attention_mask=False, # we pad in batch afterward
862
+ return_token_type_ids=return_token_type_ids,
863
+ return_overflowing_tokens=return_overflowing_tokens,
864
+ return_special_tokens_mask=return_special_tokens_mask,
865
+ return_length=return_length,
866
+ return_tensors=None, # We convert the whole batch to tensors at the end
867
+ prepend_batch_axis=False,
868
+ verbose=verbose,
869
+ )
870
+
871
+ for key, value in outputs.items():
872
+ if key not in batch_outputs:
873
+ batch_outputs[key] = []
874
+ batch_outputs[key].append(value)
875
+
876
+ batch_outputs = self.pad(
877
+ batch_outputs,
878
+ padding=padding_strategy.value,
879
+ max_length=max_length,
880
+ pad_to_multiple_of=pad_to_multiple_of,
881
+ return_attention_mask=return_attention_mask,
882
+ )
883
+
884
+ batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
885
+
886
+ return batch_outputs
887
+
888
+ @add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING)
889
+ # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2.LayoutLMv2Tokenizer.encode
890
+ def encode(
891
+ self,
892
+ text: Union[TextInput, PreTokenizedInput],
893
+ text_pair: Optional[PreTokenizedInput] = None,
894
+ boxes: Optional[List[List[int]]] = None,
895
+ word_labels: Optional[List[int]] = None,
896
+ add_special_tokens: bool = True,
897
+ padding: Union[bool, str, PaddingStrategy] = False,
898
+ truncation: Union[bool, str, TruncationStrategy] = None,
899
+ max_length: Optional[int] = None,
900
+ stride: int = 0,
901
+ pad_to_multiple_of: Optional[int] = None,
902
+ return_tensors: Optional[Union[str, TensorType]] = None,
903
+ return_token_type_ids: Optional[bool] = None,
904
+ return_attention_mask: Optional[bool] = None,
905
+ return_overflowing_tokens: bool = False,
906
+ return_special_tokens_mask: bool = False,
907
+ return_offsets_mapping: bool = False,
908
+ return_length: bool = False,
909
+ verbose: bool = True,
910
+ **kwargs,
911
+ ) -> List[int]:
912
+ encoded_inputs = self.encode_plus(
913
+ text=text,
914
+ text_pair=text_pair,
915
+ boxes=boxes,
916
+ word_labels=word_labels,
917
+ add_special_tokens=add_special_tokens,
918
+ padding=padding,
919
+ truncation=truncation,
920
+ max_length=max_length,
921
+ stride=stride,
922
+ pad_to_multiple_of=pad_to_multiple_of,
923
+ return_tensors=return_tensors,
924
+ return_token_type_ids=return_token_type_ids,
925
+ return_attention_mask=return_attention_mask,
926
+ return_overflowing_tokens=return_overflowing_tokens,
927
+ return_special_tokens_mask=return_special_tokens_mask,
928
+ return_offsets_mapping=return_offsets_mapping,
929
+ return_length=return_length,
930
+ verbose=verbose,
931
+ **kwargs,
932
+ )
933
+
934
+ return encoded_inputs["input_ids"]
935
+
936
+ @add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
937
+ # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2.LayoutLMv2Tokenizer.encode_plus
938
+ def encode_plus(
939
+ self,
940
+ text: Union[TextInput, PreTokenizedInput],
941
+ text_pair: Optional[PreTokenizedInput] = None,
942
+ boxes: Optional[List[List[int]]] = None,
943
+ word_labels: Optional[List[int]] = None,
944
+ add_special_tokens: bool = True,
945
+ padding: Union[bool, str, PaddingStrategy] = False,
946
+ truncation: Union[bool, str, TruncationStrategy] = None,
947
+ max_length: Optional[int] = None,
948
+ stride: int = 0,
949
+ pad_to_multiple_of: Optional[int] = None,
950
+ return_tensors: Optional[Union[str, TensorType]] = None,
951
+ return_token_type_ids: Optional[bool] = None,
952
+ return_attention_mask: Optional[bool] = None,
953
+ return_overflowing_tokens: bool = False,
954
+ return_special_tokens_mask: bool = False,
955
+ return_offsets_mapping: bool = False,
956
+ return_length: bool = False,
957
+ verbose: bool = True,
958
+ **kwargs,
959
+ ) -> BatchEncoding:
960
+ """
961
+ Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated,
962
+ `__call__` should be used instead.
963
+
964
+ Args:
965
+ text (`str`, `List[str]`, `List[List[str]]`):
966
+ The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
967
+ text_pair (`List[str]` or `List[int]`, *optional*):
968
+ Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
969
+ list of list of strings (words of a batch of examples).
970
+ """
971
+
972
+ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
973
+ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
974
+ padding=padding,
975
+ truncation=truncation,
976
+ max_length=max_length,
977
+ pad_to_multiple_of=pad_to_multiple_of,
978
+ verbose=verbose,
979
+ **kwargs,
980
+ )
981
+
982
+ return self._encode_plus(
983
+ text=text,
984
+ boxes=boxes,
985
+ text_pair=text_pair,
986
+ word_labels=word_labels,
987
+ add_special_tokens=add_special_tokens,
988
+ padding_strategy=padding_strategy,
989
+ truncation_strategy=truncation_strategy,
990
+ max_length=max_length,
991
+ stride=stride,
992
+ pad_to_multiple_of=pad_to_multiple_of,
993
+ return_tensors=return_tensors,
994
+ return_token_type_ids=return_token_type_ids,
995
+ return_attention_mask=return_attention_mask,
996
+ return_overflowing_tokens=return_overflowing_tokens,
997
+ return_special_tokens_mask=return_special_tokens_mask,
998
+ return_offsets_mapping=return_offsets_mapping,
999
+ return_length=return_length,
1000
+ verbose=verbose,
1001
+ **kwargs,
1002
+ )
1003
+
1004
+ # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2.LayoutLMv2Tokenizer._encode_plus
1005
+ def _encode_plus(
1006
+ self,
1007
+ text: Union[TextInput, PreTokenizedInput],
1008
+ text_pair: Optional[PreTokenizedInput] = None,
1009
+ boxes: Optional[List[List[int]]] = None,
1010
+ word_labels: Optional[List[int]] = None,
1011
+ add_special_tokens: bool = True,
1012
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
1013
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
1014
+ max_length: Optional[int] = None,
1015
+ stride: int = 0,
1016
+ pad_to_multiple_of: Optional[int] = None,
1017
+ return_tensors: Optional[Union[str, TensorType]] = None,
1018
+ return_token_type_ids: Optional[bool] = None,
1019
+ return_attention_mask: Optional[bool] = None,
1020
+ return_overflowing_tokens: bool = False,
1021
+ return_special_tokens_mask: bool = False,
1022
+ return_offsets_mapping: bool = False,
1023
+ return_length: bool = False,
1024
+ verbose: bool = True,
1025
+ **kwargs,
1026
+ ) -> BatchEncoding:
1027
+ if return_offsets_mapping:
1028
+ raise NotImplementedError(
1029
+ "return_offset_mapping is not available when using Python tokenizers. "
1030
+ "To use this feature, change your tokenizer to one deriving from "
1031
+ "transformers.PreTrainedTokenizerFast. "
1032
+ "More information on available tokenizers at "
1033
+ "https://github.com/huggingface/transformers/pull/2674"
1034
+ )
1035
+
1036
+ return self.prepare_for_model(
1037
+ text=text,
1038
+ text_pair=text_pair,
1039
+ boxes=boxes,
1040
+ word_labels=word_labels,
1041
+ add_special_tokens=add_special_tokens,
1042
+ padding=padding_strategy.value,
1043
+ truncation=truncation_strategy.value,
1044
+ max_length=max_length,
1045
+ stride=stride,
1046
+ pad_to_multiple_of=pad_to_multiple_of,
1047
+ return_tensors=return_tensors,
1048
+ prepend_batch_axis=True,
1049
+ return_attention_mask=return_attention_mask,
1050
+ return_token_type_ids=return_token_type_ids,
1051
+ return_overflowing_tokens=return_overflowing_tokens,
1052
+ return_special_tokens_mask=return_special_tokens_mask,
1053
+ return_length=return_length,
1054
+ verbose=verbose,
1055
+ )
1056
+
1057
+ @add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
1058
+ def prepare_for_model(
1059
+ self,
1060
+ text: Union[TextInput, PreTokenizedInput],
1061
+ text_pair: Optional[PreTokenizedInput] = None,
1062
+ boxes: Optional[List[List[int]]] = None,
1063
+ word_labels: Optional[List[int]] = None,
1064
+ add_special_tokens: bool = True,
1065
+ padding: Union[bool, str, PaddingStrategy] = False,
1066
+ truncation: Union[bool, str, TruncationStrategy] = None,
1067
+ max_length: Optional[int] = None,
1068
+ stride: int = 0,
1069
+ pad_to_multiple_of: Optional[int] = None,
1070
+ return_tensors: Optional[Union[str, TensorType]] = None,
1071
+ return_token_type_ids: Optional[bool] = None,
1072
+ return_attention_mask: Optional[bool] = None,
1073
+ return_overflowing_tokens: bool = False,
1074
+ return_special_tokens_mask: bool = False,
1075
+ return_offsets_mapping: bool = False,
1076
+ return_length: bool = False,
1077
+ verbose: bool = True,
1078
+ prepend_batch_axis: bool = False,
1079
+ **kwargs,
1080
+ ) -> BatchEncoding:
1081
+ """
1082
+ Prepares a sequence or a pair of sequences so that it can be used by the model. It adds special tokens,
1083
+ truncates sequences if overflowing while taking into account the special tokens and manages a moving window
1084
+ (with user defined stride) for overflowing tokens. Please Note, for *text_pair* different than `None` and
1085
+ *truncation_strategy = longest_first* or `True`, it is not possible to return overflowing tokens. Such a
1086
+ combination of arguments will raise an error.
1087
+
1088
+ Word-level `boxes` are turned into token-level `bbox`. If provided, word-level `word_labels` are turned into
1089
+ token-level `labels`. The word label is used for the first token of the word, while remaining tokens are
1090
+ labeled with -100, such that they will be ignored by the loss function.
1091
+
1092
+ Args:
1093
+ text (`str`, `List[str]`, `List[List[str]]`):
1094
+ The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
1095
+ text_pair (`List[str]` or `List[int]`, *optional*):
1096
+ Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
1097
+ list of list of strings (words of a batch of examples).
1098
+ """
1099
+
1100
+ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
1101
+ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
1102
+ padding=padding,
1103
+ truncation=truncation,
1104
+ max_length=max_length,
1105
+ pad_to_multiple_of=pad_to_multiple_of,
1106
+ verbose=verbose,
1107
+ **kwargs,
1108
+ )
1109
+
1110
+ tokens = []
1111
+ pair_tokens = []
1112
+ token_boxes = []
1113
+ pair_token_boxes = []
1114
+ labels = []
1115
+
1116
+ if text_pair is None:
1117
+ if word_labels is None:
1118
+ # CASE 1: document image classification (training + inference) + CASE 2: token classification (inference)
1119
+ for word, box in zip(text, boxes):
1120
+ if len(word) < 1: # skip empty words
1121
+ continue
1122
+ word_tokens = self.tokenize(word)
1123
+ tokens.extend(word_tokens)
1124
+ token_boxes.extend([box] * len(word_tokens))
1125
+ else:
1126
+ # CASE 2: token classification (training)
1127
+ for word, box, label in zip(text, boxes, word_labels):
1128
+ if len(word) < 1: # skip empty words
1129
+ continue
1130
+ word_tokens = self.tokenize(word)
1131
+ tokens.extend(word_tokens)
1132
+ token_boxes.extend([box] * len(word_tokens))
1133
+ if self.only_label_first_subword:
1134
+ # Use the real label id for the first token of the word, and padding ids for the remaining tokens
1135
+ labels.extend([label] + [self.pad_token_label] * (len(word_tokens) - 1))
1136
+ else:
1137
+ labels.extend([label] * len(word_tokens))
1138
+ else:
1139
+ # CASE 3: document visual question answering (inference)
1140
+ # text = question
1141
+ # text_pair = words
1142
+ tokens = self.tokenize(text)
1143
+ token_boxes = [self.pad_token_box for _ in range(len(tokens))]
1144
+
1145
+ for word, box in zip(text_pair, boxes):
1146
+ if len(word) < 1: # skip empty words
1147
+ continue
1148
+ word_tokens = self.tokenize(word)
1149
+ pair_tokens.extend(word_tokens)
1150
+ pair_token_boxes.extend([box] * len(word_tokens))
1151
+
1152
+ # Create ids + pair_ids
1153
+ ids = self.convert_tokens_to_ids(tokens)
1154
+ pair_ids = self.convert_tokens_to_ids(pair_tokens) if pair_tokens else None
1155
+
1156
+ if (
1157
+ return_overflowing_tokens
1158
+ and truncation_strategy == TruncationStrategy.LONGEST_FIRST
1159
+ and pair_ids is not None
1160
+ ):
1161
+ raise ValueError(
1162
+ "Not possible to return overflowing tokens for pair of sequences with the "
1163
+ "`longest_first`. Please select another truncation strategy than `longest_first`, "
1164
+ "for instance `only_second` or `only_first`."
1165
+ )
1166
+
1167
+ # Compute the total size of the returned encodings
1168
+ pair = bool(pair_ids is not None)
1169
+ len_ids = len(ids)
1170
+ len_pair_ids = len(pair_ids) if pair else 0
1171
+ total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0)
1172
+
1173
+ # Truncation: Handle max sequence length
1174
+ overflowing_tokens = []
1175
+ overflowing_token_boxes = []
1176
+ overflowing_labels = []
1177
+ if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length:
1178
+ (
1179
+ ids,
1180
+ token_boxes,
1181
+ pair_ids,
1182
+ pair_token_boxes,
1183
+ labels,
1184
+ overflowing_tokens,
1185
+ overflowing_token_boxes,
1186
+ overflowing_labels,
1187
+ ) = self.truncate_sequences(
1188
+ ids,
1189
+ token_boxes,
1190
+ pair_ids=pair_ids,
1191
+ pair_token_boxes=pair_token_boxes,
1192
+ labels=labels,
1193
+ num_tokens_to_remove=total_len - max_length,
1194
+ truncation_strategy=truncation_strategy,
1195
+ stride=stride,
1196
+ )
1197
+
1198
+ if return_token_type_ids and not add_special_tokens:
1199
+ raise ValueError(
1200
+ "Asking to return token_type_ids while setting add_special_tokens to False "
1201
+ "results in an undefined behavior. Please set add_special_tokens to True or "
1202
+ "set return_token_type_ids to None."
1203
+ )
1204
+
1205
+ # Load from model defaults
1206
+ if return_token_type_ids is None:
1207
+ return_token_type_ids = "token_type_ids" in self.model_input_names
1208
+ if return_attention_mask is None:
1209
+ return_attention_mask = "attention_mask" in self.model_input_names
1210
+
1211
+ encoded_inputs = {}
1212
+
1213
+ if return_overflowing_tokens:
1214
+ encoded_inputs["overflowing_tokens"] = overflowing_tokens
1215
+ encoded_inputs["overflowing_token_boxes"] = overflowing_token_boxes
1216
+ encoded_inputs["overflowing_labels"] = overflowing_labels
1217
+ encoded_inputs["num_truncated_tokens"] = total_len - max_length
1218
+
1219
+ # Add special tokens
1220
+ if add_special_tokens:
1221
+ sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
1222
+ token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids)
1223
+ token_boxes = [self.cls_token_box] + token_boxes + [self.sep_token_box]
1224
+ if pair_token_boxes:
1225
+ pair_token_boxes = [self.sep_token_box] + pair_token_boxes + [self.sep_token_box]
1226
+ token_boxes = token_boxes + pair_token_boxes if pair else token_boxes
1227
+ if labels:
1228
+ labels = [self.pad_token_label] + labels + [self.pad_token_label]
1229
+ else:
1230
+ sequence = ids + pair_ids if pair else ids
1231
+ token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else [])
1232
+ token_boxes = token_boxes + pair_token_boxes if pair else token_boxes
1233
+
1234
+ # Build output dictionary
1235
+ encoded_inputs["input_ids"] = sequence
1236
+ encoded_inputs["bbox"] = token_boxes
1237
+ if return_token_type_ids:
1238
+ encoded_inputs["token_type_ids"] = token_type_ids
1239
+ if return_special_tokens_mask:
1240
+ if add_special_tokens:
1241
+ encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids)
1242
+ else:
1243
+ encoded_inputs["special_tokens_mask"] = [0] * len(sequence)
1244
+
1245
+ if labels:
1246
+ encoded_inputs["labels"] = labels
1247
+
1248
+ # Check lengths
1249
+ self._eventual_warn_about_too_long_sequence(encoded_inputs["input_ids"], max_length, verbose)
1250
+
1251
+ # Padding
1252
+ if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask:
1253
+ encoded_inputs = self.pad(
1254
+ encoded_inputs,
1255
+ max_length=max_length,
1256
+ padding=padding_strategy.value,
1257
+ pad_to_multiple_of=pad_to_multiple_of,
1258
+ return_attention_mask=return_attention_mask,
1259
+ )
1260
+
1261
+ if return_length:
1262
+ encoded_inputs["length"] = len(encoded_inputs["input_ids"])
1263
+
1264
+ batch_outputs = BatchEncoding(
1265
+ encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis
1266
+ )
1267
+
1268
+ return batch_outputs
1269
+
1270
+ # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2.LayoutLMv2Tokenizer.truncate_sequences
1271
+ def truncate_sequences(
1272
+ self,
1273
+ ids: List[int],
1274
+ token_boxes: List[List[int]],
1275
+ pair_ids: Optional[List[int]] = None,
1276
+ pair_token_boxes: Optional[List[List[int]]] = None,
1277
+ labels: Optional[List[int]] = None,
1278
+ num_tokens_to_remove: int = 0,
1279
+ truncation_strategy: Union[str, TruncationStrategy] = "longest_first",
1280
+ stride: int = 0,
1281
+ ) -> Tuple[List[int], List[int], List[int]]:
1282
+ """
1283
+ Truncates a sequence pair in-place following the strategy.
1284
+
1285
+ Args:
1286
+ ids (`List[int]`):
1287
+ Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and
1288
+ `convert_tokens_to_ids` methods.
1289
+ token_boxes (`List[List[int]]`):
1290
+ Bounding boxes of the first sequence.
1291
+ pair_ids (`List[int]`, *optional*):
1292
+ Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize`
1293
+ and `convert_tokens_to_ids` methods.
1294
+ pair_token_boxes (`List[List[int]]`, *optional*):
1295
+ Bounding boxes of the second sequence.
1296
+ labels (`List[int]`, *optional*):
1297
+ Labels of the first sequence (for token classification tasks).
1298
+ num_tokens_to_remove (`int`, *optional*, defaults to 0):
1299
+ Number of tokens to remove using the truncation strategy.
1300
+ truncation_strategy (`str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
1301
+ The strategy to follow for truncation. Can be:
1302
+
1303
+ - `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
1304
+ maximum acceptable input length for the model if that argument is not provided. This will truncate
1305
+ token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a
1306
+ batch of pairs) is provided.
1307
+ - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
1308
+ maximum acceptable input length for the model if that argument is not provided. This will only
1309
+ truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
1310
+ - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
1311
+ maximum acceptable input length for the model if that argument is not provided. This will only
1312
+ truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
1313
+ - `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater
1314
+ than the model maximum admissible input size).
1315
+ stride (`int`, *optional*, defaults to 0):
1316
+ If set to a positive number, the overflowing tokens returned will contain some tokens from the main
1317
+ sequence returned. The value of this argument defines the number of additional tokens.
1318
+
1319
+ Returns:
1320
+ `Tuple[List[int], List[int], List[int]]`: The truncated `ids`, the truncated `pair_ids` and the list of
1321
+ overflowing tokens. Note: The *longest_first* strategy returns empty list of overflowing tokens if a pair
1322
+ of sequences (or a batch of pairs) is provided.
1323
+ """
1324
+ if num_tokens_to_remove <= 0:
1325
+ return ids, token_boxes, pair_ids, pair_token_boxes, labels, [], [], []
1326
+
1327
+ if not isinstance(truncation_strategy, TruncationStrategy):
1328
+ truncation_strategy = TruncationStrategy(truncation_strategy)
1329
+
1330
+ overflowing_tokens = []
1331
+ overflowing_token_boxes = []
1332
+ overflowing_labels = []
1333
+ if truncation_strategy == TruncationStrategy.ONLY_FIRST or (
1334
+ truncation_strategy == TruncationStrategy.LONGEST_FIRST and pair_ids is None
1335
+ ):
1336
+ if len(ids) > num_tokens_to_remove:
1337
+ window_len = min(len(ids), stride + num_tokens_to_remove)
1338
+ overflowing_tokens = ids[-window_len:]
1339
+ overflowing_token_boxes = token_boxes[-window_len:]
1340
+ overflowing_labels = labels[-window_len:]
1341
+ ids = ids[:-num_tokens_to_remove]
1342
+ token_boxes = token_boxes[:-num_tokens_to_remove]
1343
+ labels = labels[:-num_tokens_to_remove]
1344
+ else:
1345
+ error_msg = (
1346
+ f"We need to remove {num_tokens_to_remove} to truncate the input "
1347
+ f"but the first sequence has a length {len(ids)}. "
1348
+ )
1349
+ if truncation_strategy == TruncationStrategy.ONLY_FIRST:
1350
+ error_msg = (
1351
+ error_msg + "Please select another truncation strategy than "
1352
+ f"{truncation_strategy}, for instance 'longest_first' or 'only_second'."
1353
+ )
1354
+ logger.error(error_msg)
1355
+ elif truncation_strategy == TruncationStrategy.LONGEST_FIRST:
1356
+ logger.warning(
1357
+ "Be aware, overflowing tokens are not returned for the setting you have chosen,"
1358
+ f" i.e. sequence pairs with the '{TruncationStrategy.LONGEST_FIRST.value}' "
1359
+ "truncation strategy. So the returned list will always be empty even if some "
1360
+ "tokens have been removed."
1361
+ )
1362
+ for _ in range(num_tokens_to_remove):
1363
+ if pair_ids is None or len(ids) > len(pair_ids):
1364
+ ids = ids[:-1]
1365
+ token_boxes = token_boxes[:-1]
1366
+ labels = labels[:-1]
1367
+ else:
1368
+ pair_ids = pair_ids[:-1]
1369
+ pair_token_boxes = pair_token_boxes[:-1]
1370
+ elif truncation_strategy == TruncationStrategy.ONLY_SECOND and pair_ids is not None:
1371
+ if len(pair_ids) > num_tokens_to_remove:
1372
+ window_len = min(len(pair_ids), stride + num_tokens_to_remove)
1373
+ overflowing_tokens = pair_ids[-window_len:]
1374
+ overflowing_token_boxes = pair_token_boxes[-window_len:]
1375
+ pair_ids = pair_ids[:-num_tokens_to_remove]
1376
+ pair_token_boxes = pair_token_boxes[:-num_tokens_to_remove]
1377
+ else:
1378
+ logger.error(
1379
+ f"We need to remove {num_tokens_to_remove} to truncate the input "
1380
+ f"but the second sequence has a length {len(pair_ids)}. "
1381
+ f"Please select another truncation strategy than {truncation_strategy}, "
1382
+ "for instance 'longest_first' or 'only_first'."
1383
+ )
1384
+
1385
+ return (
1386
+ ids,
1387
+ token_boxes,
1388
+ pair_ids,
1389
+ pair_token_boxes,
1390
+ labels,
1391
+ overflowing_tokens,
1392
+ overflowing_token_boxes,
1393
+ overflowing_labels,
1394
+ )
1395
+
1396
+ # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2.LayoutLMv2Tokenizer._pad
1397
+ def _pad(
1398
+ self,
1399
+ encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
1400
+ max_length: Optional[int] = None,
1401
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
1402
+ pad_to_multiple_of: Optional[int] = None,
1403
+ return_attention_mask: Optional[bool] = None,
1404
+ ) -> dict:
1405
+ """
1406
+ Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
1407
+
1408
+ Args:
1409
+ encoded_inputs:
1410
+ Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
1411
+ max_length: maximum length of the returned list and optionally padding length (see below).
1412
+ Will truncate by taking into account the special tokens.
1413
+ padding_strategy: PaddingStrategy to use for padding.
1414
+
1415
+ - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
1416
+ - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
1417
+ - PaddingStrategy.DO_NOT_PAD: Do not pad
1418
+ The tokenizer padding sides are defined in self.padding_side:
1419
+
1420
+ - 'left': pads on the left of the sequences
1421
+ - 'right': pads on the right of the sequences
1422
+ pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
1423
+ This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
1424
+ `>= 7.5` (Volta).
1425
+ return_attention_mask:
1426
+ (optional) Set to False to avoid returning attention mask (default: set to model specifics)
1427
+ """
1428
+ # Load from model defaults
1429
+ if return_attention_mask is None:
1430
+ return_attention_mask = "attention_mask" in self.model_input_names
1431
+
1432
+ required_input = encoded_inputs[self.model_input_names[0]]
1433
+
1434
+ if padding_strategy == PaddingStrategy.LONGEST:
1435
+ max_length = len(required_input)
1436
+
1437
+ if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
1438
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
1439
+
1440
+ needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
1441
+
1442
+ # Initialize attention mask if not present.
1443
+ if return_attention_mask and "attention_mask" not in encoded_inputs:
1444
+ encoded_inputs["attention_mask"] = [1] * len(required_input)
1445
+
1446
+ if needs_to_be_padded:
1447
+ difference = max_length - len(required_input)
1448
+ if self.padding_side == "right":
1449
+ if return_attention_mask:
1450
+ encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference
1451
+ if "token_type_ids" in encoded_inputs:
1452
+ encoded_inputs["token_type_ids"] = (
1453
+ encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference
1454
+ )
1455
+ if "bbox" in encoded_inputs:
1456
+ encoded_inputs["bbox"] = encoded_inputs["bbox"] + [self.pad_token_box] * difference
1457
+ if "labels" in encoded_inputs:
1458
+ encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference
1459
+ if "special_tokens_mask" in encoded_inputs:
1460
+ encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
1461
+ encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference
1462
+ elif self.padding_side == "left":
1463
+ if return_attention_mask:
1464
+ encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
1465
+ if "token_type_ids" in encoded_inputs:
1466
+ encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
1467
+ "token_type_ids"
1468
+ ]
1469
+ if "bbox" in encoded_inputs:
1470
+ encoded_inputs["bbox"] = [self.pad_token_box] * difference + encoded_inputs["bbox"]
1471
+ if "labels" in encoded_inputs:
1472
+ encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["labels"]
1473
+ if "special_tokens_mask" in encoded_inputs:
1474
+ encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
1475
+ encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
1476
+ else:
1477
+ raise ValueError("Invalid padding strategy:" + str(self.padding_side))
1478
+
1479
+ return encoded_inputs
mgm/lib/python3.10/site-packages/transformers/models/pvt/__init__.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Authors: Wenhai Wang, Enze Xie, Xiang Li, Deng-Ping Fan,
3
+ # Kaitao Song, Ding Liang, Tong Lu, Ping Luo, Ling Shao and The HuggingFace Inc. team.
4
+ # All rights reserved.
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ from typing import TYPE_CHECKING
18
+
19
+ from ...utils import (
20
+ OptionalDependencyNotAvailable,
21
+ _LazyModule,
22
+ is_torch_available,
23
+ is_vision_available,
24
+ )
25
+
26
+
27
+ _import_structure = {
28
+ "configuration_pvt": ["PVT_PRETRAINED_CONFIG_ARCHIVE_MAP", "PvtConfig", "PvtOnnxConfig"],
29
+ }
30
+
31
+ try:
32
+ if not is_vision_available():
33
+ raise OptionalDependencyNotAvailable()
34
+ except OptionalDependencyNotAvailable:
35
+ pass
36
+ else:
37
+ _import_structure["image_processing_pvt"] = ["PvtImageProcessor"]
38
+
39
+ try:
40
+ if not is_torch_available():
41
+ raise OptionalDependencyNotAvailable()
42
+ except OptionalDependencyNotAvailable:
43
+ pass
44
+ else:
45
+ _import_structure["modeling_pvt"] = [
46
+ "PVT_PRETRAINED_MODEL_ARCHIVE_LIST",
47
+ "PvtForImageClassification",
48
+ "PvtModel",
49
+ "PvtPreTrainedModel",
50
+ ]
51
+
52
+
53
+ if TYPE_CHECKING:
54
+ from .configuration_pvt import PVT_PRETRAINED_CONFIG_ARCHIVE_MAP, PvtConfig, PvtOnnxConfig
55
+
56
+ try:
57
+ if not is_vision_available():
58
+ raise OptionalDependencyNotAvailable()
59
+ except OptionalDependencyNotAvailable:
60
+ pass
61
+ else:
62
+ from .image_processing_pvt import PvtImageProcessor
63
+
64
+ try:
65
+ if not is_torch_available():
66
+ raise OptionalDependencyNotAvailable()
67
+ except OptionalDependencyNotAvailable:
68
+ pass
69
+ else:
70
+ from .modeling_pvt import (
71
+ PVT_PRETRAINED_MODEL_ARCHIVE_LIST,
72
+ PvtForImageClassification,
73
+ PvtModel,
74
+ PvtPreTrainedModel,
75
+ )
76
+
77
+ else:
78
+ import sys
79
+
80
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
mgm/lib/python3.10/site-packages/transformers/models/pvt/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.1 kB). View file
 
mgm/lib/python3.10/site-packages/transformers/models/pvt/__pycache__/configuration_pvt.cpython-310.pyc ADDED
Binary file (6.63 kB). View file
 
mgm/lib/python3.10/site-packages/transformers/models/pvt/__pycache__/convert_pvt_to_pytorch.cpython-310.pyc ADDED
Binary file (6.23 kB). View file
 
mgm/lib/python3.10/site-packages/transformers/models/pvt/__pycache__/image_processing_pvt.cpython-310.pyc ADDED
Binary file (11.8 kB). View file
 
mgm/lib/python3.10/site-packages/transformers/models/pvt/__pycache__/modeling_pvt.cpython-310.pyc ADDED
Binary file (20.3 kB). View file
 
mgm/lib/python3.10/site-packages/transformers/models/pvt/configuration_pvt.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Authors: Wenhai Wang, Enze Xie, Xiang Li, Deng-Ping Fan,
3
+ # Kaitao Song, Ding Liang, Tong Lu, Ping Luo, Ling Shao and The HuggingFace Inc. team.
4
+ # All rights reserved.
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ """ Pvt model configuration"""
18
+
19
+ from collections import OrderedDict
20
+ from typing import Callable, List, Mapping
21
+
22
+ from packaging import version
23
+
24
+ from ...configuration_utils import PretrainedConfig
25
+ from ...onnx import OnnxConfig
26
+ from ...utils import logging
27
+
28
+
29
+ logger = logging.get_logger(__name__)
30
+
31
+ PVT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
32
+ "pvt-tiny-224": "https://huggingface.co/Zetatech/pvt-tiny-224",
33
+ # See all PVT models at https://huggingface.co/models?filter=pvt
34
+ }
35
+
36
+
37
+ class PvtConfig(PretrainedConfig):
38
+ r"""
39
+ This is the configuration class to store the configuration of a [`PvtModel`]. It is used to instantiate an Pvt
40
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
41
+ defaults will yield a similar configuration to that of the Pvt
42
+ [Xrenya/pvt-tiny-224](https://huggingface.co/Xrenya/pvt-tiny-224) architecture.
43
+
44
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
45
+ documentation from [`PretrainedConfig`] for more information.
46
+
47
+ Args:
48
+ image_size (`int`, *optional*, defaults to 224):
49
+ The input image size
50
+ num_channels (`int`, *optional*, defaults to 3):
51
+ The number of input channels.
52
+ num_encoder_blocks (`int`, *optional*, defaults to 4):
53
+ The number of encoder blocks (i.e. stages in the Mix Transformer encoder).
54
+ depths (`List[int]`, *optional*, defaults to `[2, 2, 2, 2]`):
55
+ The number of layers in each encoder block.
56
+ sequence_reduction_ratios (`List[int]`, *optional*, defaults to `[8, 4, 2, 1]`):
57
+ Sequence reduction ratios in each encoder block.
58
+ hidden_sizes (`List[int]`, *optional*, defaults to `[64, 128, 320, 512]`):
59
+ Dimension of each of the encoder blocks.
60
+ patch_sizes (`List[int]`, *optional*, defaults to `[4, 2, 2, 2]`):
61
+ Patch size before each encoder block.
62
+ strides (`List[int]`, *optional*, defaults to `[4, 2, 2, 2]`):
63
+ Stride before each encoder block.
64
+ num_attention_heads (`List[int]`, *optional*, defaults to `[1, 2, 5, 8]`):
65
+ Number of attention heads for each attention layer in each block of the Transformer encoder.
66
+ mlp_ratios (`List[int]`, *optional*, defaults to `[8, 8, 4, 4]`):
67
+ Ratio of the size of the hidden layer compared to the size of the input layer of the Mix FFNs in the
68
+ encoder blocks.
69
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
70
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
71
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
72
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
73
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
74
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
75
+ The dropout ratio for the attention probabilities.
76
+ initializer_range (`float`, *optional*, defaults to 0.02):
77
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
78
+ drop_path_rate (`float`, *optional*, defaults to 0.0):
79
+ The dropout probability for stochastic depth, used in the blocks of the Transformer encoder.
80
+ layer_norm_eps (`float`, *optional*, defaults to 1e-06):
81
+ The epsilon used by the layer normalization layers.
82
+ qkv_bias (`bool`, *optional*, defaults to `True`):
83
+ Whether or not a learnable bias should be added to the queries, keys and values.
84
+ num_labels ('int', *optional*, defaults to 1000):
85
+ The number of classes.
86
+ Example:
87
+
88
+ ```python
89
+ >>> from transformers import PvtModel, PvtConfig
90
+
91
+ >>> # Initializing a PVT Xrenya/pvt-tiny-224 style configuration
92
+ >>> configuration = PvtConfig()
93
+
94
+ >>> # Initializing a model from the Xrenya/pvt-tiny-224 style configuration
95
+ >>> model = PvtModel(configuration)
96
+
97
+ >>> # Accessing the model configuration
98
+ >>> configuration = model.config
99
+ ```"""
100
+
101
+ model_type = "pvt"
102
+
103
+ def __init__(
104
+ self,
105
+ image_size: int = 224,
106
+ num_channels: int = 3,
107
+ num_encoder_blocks: int = 4,
108
+ depths: List[int] = [2, 2, 2, 2],
109
+ sequence_reduction_ratios: List[int] = [8, 4, 2, 1],
110
+ hidden_sizes: List[int] = [64, 128, 320, 512],
111
+ patch_sizes: List[int] = [4, 2, 2, 2],
112
+ strides: List[int] = [4, 2, 2, 2],
113
+ num_attention_heads: List[int] = [1, 2, 5, 8],
114
+ mlp_ratios: List[int] = [8, 8, 4, 4],
115
+ hidden_act: Mapping[str, Callable] = "gelu",
116
+ hidden_dropout_prob: float = 0.0,
117
+ attention_probs_dropout_prob: float = 0.0,
118
+ initializer_range: float = 0.02,
119
+ drop_path_rate: float = 0.0,
120
+ layer_norm_eps: float = 1e-6,
121
+ qkv_bias: bool = True,
122
+ num_labels: int = 1000,
123
+ **kwargs,
124
+ ):
125
+ super().__init__(**kwargs)
126
+
127
+ self.image_size = image_size
128
+ self.num_channels = num_channels
129
+ self.num_encoder_blocks = num_encoder_blocks
130
+ self.depths = depths
131
+ self.sequence_reduction_ratios = sequence_reduction_ratios
132
+ self.hidden_sizes = hidden_sizes
133
+ self.patch_sizes = patch_sizes
134
+ self.strides = strides
135
+ self.mlp_ratios = mlp_ratios
136
+ self.num_attention_heads = num_attention_heads
137
+ self.hidden_act = hidden_act
138
+ self.hidden_dropout_prob = hidden_dropout_prob
139
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
140
+ self.initializer_range = initializer_range
141
+ self.drop_path_rate = drop_path_rate
142
+ self.layer_norm_eps = layer_norm_eps
143
+ self.num_labels = num_labels
144
+ self.qkv_bias = qkv_bias
145
+
146
+
147
+ class PvtOnnxConfig(OnnxConfig):
148
+ torch_onnx_minimum_version = version.parse("1.11")
149
+
150
+ @property
151
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
152
+ return OrderedDict(
153
+ [
154
+ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
155
+ ]
156
+ )
157
+
158
+ @property
159
+ def atol_for_validation(self) -> float:
160
+ return 1e-4
161
+
162
+ @property
163
+ def default_onnx_opset(self) -> int:
164
+ return 12
mgm/lib/python3.10/site-packages/transformers/models/pvt/convert_pvt_to_pytorch.py ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Authors: Wenhai Wang, Enze Xie, Xiang Li, Deng-Ping Fan,
3
+ # Kaitao Song, Ding Liang, Tong Lu, Ping Luo, Ling Shao and The HuggingFace Inc. team.
4
+ # All rights reserved.
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ """Convert Pvt checkpoints from the original library."""
18
+
19
+
20
+ import argparse
21
+ from pathlib import Path
22
+
23
+ import requests
24
+ import torch
25
+ from PIL import Image
26
+
27
+ from transformers import PvtConfig, PvtForImageClassification, PvtImageProcessor
28
+ from transformers.utils import logging
29
+
30
+
31
+ logging.set_verbosity_info()
32
+ logger = logging.get_logger(__name__)
33
+
34
+
35
+ # here we list all keys to be renamed (original name on the left, our name on the right)
36
+ def create_rename_keys(config):
37
+ rename_keys = []
38
+ for i in range(config.num_encoder_blocks):
39
+ # Remane embedings' paramters
40
+ rename_keys.append((f"pos_embed{i + 1}", f"pvt.encoder.patch_embeddings.{i}.position_embeddings"))
41
+
42
+ rename_keys.append((f"patch_embed{i + 1}.proj.weight", f"pvt.encoder.patch_embeddings.{i}.projection.weight"))
43
+ rename_keys.append((f"patch_embed{i + 1}.proj.bias", f"pvt.encoder.patch_embeddings.{i}.projection.bias"))
44
+ rename_keys.append((f"patch_embed{i + 1}.norm.weight", f"pvt.encoder.patch_embeddings.{i}.layer_norm.weight"))
45
+ rename_keys.append((f"patch_embed{i + 1}.norm.bias", f"pvt.encoder.patch_embeddings.{i}.layer_norm.bias"))
46
+
47
+ for j in range(config.depths[i]):
48
+ # Rename blocks' parameters
49
+ rename_keys.append(
50
+ (f"block{i + 1}.{j}.attn.q.weight", f"pvt.encoder.block.{i}.{j}.attention.self.query.weight")
51
+ )
52
+ rename_keys.append(
53
+ (f"block{i + 1}.{j}.attn.q.bias", f"pvt.encoder.block.{i}.{j}.attention.self.query.bias")
54
+ )
55
+ rename_keys.append(
56
+ (f"block{i + 1}.{j}.attn.kv.weight", f"pvt.encoder.block.{i}.{j}.attention.self.kv.weight")
57
+ )
58
+ rename_keys.append((f"block{i + 1}.{j}.attn.kv.bias", f"pvt.encoder.block.{i}.{j}.attention.self.kv.bias"))
59
+
60
+ if config.sequence_reduction_ratios[i] > 1:
61
+ rename_keys.append(
62
+ (
63
+ f"block{i + 1}.{j}.attn.norm.weight",
64
+ f"pvt.encoder.block.{i}.{j}.attention.self.layer_norm.weight",
65
+ )
66
+ )
67
+ rename_keys.append(
68
+ (f"block{i + 1}.{j}.attn.norm.bias", f"pvt.encoder.block.{i}.{j}.attention.self.layer_norm.bias")
69
+ )
70
+ rename_keys.append(
71
+ (
72
+ f"block{i + 1}.{j}.attn.sr.weight",
73
+ f"pvt.encoder.block.{i}.{j}.attention.self.sequence_reduction.weight",
74
+ )
75
+ )
76
+ rename_keys.append(
77
+ (
78
+ f"block{i + 1}.{j}.attn.sr.bias",
79
+ f"pvt.encoder.block.{i}.{j}.attention.self.sequence_reduction.bias",
80
+ )
81
+ )
82
+
83
+ rename_keys.append(
84
+ (f"block{i + 1}.{j}.attn.proj.weight", f"pvt.encoder.block.{i}.{j}.attention.output.dense.weight")
85
+ )
86
+ rename_keys.append(
87
+ (f"block{i + 1}.{j}.attn.proj.bias", f"pvt.encoder.block.{i}.{j}.attention.output.dense.bias")
88
+ )
89
+
90
+ rename_keys.append((f"block{i + 1}.{j}.norm1.weight", f"pvt.encoder.block.{i}.{j}.layer_norm_1.weight"))
91
+ rename_keys.append((f"block{i + 1}.{j}.norm1.bias", f"pvt.encoder.block.{i}.{j}.layer_norm_1.bias"))
92
+
93
+ rename_keys.append((f"block{i + 1}.{j}.norm2.weight", f"pvt.encoder.block.{i}.{j}.layer_norm_2.weight"))
94
+ rename_keys.append((f"block{i + 1}.{j}.norm2.bias", f"pvt.encoder.block.{i}.{j}.layer_norm_2.bias"))
95
+
96
+ rename_keys.append((f"block{i + 1}.{j}.mlp.fc1.weight", f"pvt.encoder.block.{i}.{j}.mlp.dense1.weight"))
97
+ rename_keys.append((f"block{i + 1}.{j}.mlp.fc1.bias", f"pvt.encoder.block.{i}.{j}.mlp.dense1.bias"))
98
+ rename_keys.append((f"block{i + 1}.{j}.mlp.fc2.weight", f"pvt.encoder.block.{i}.{j}.mlp.dense2.weight"))
99
+ rename_keys.append((f"block{i + 1}.{j}.mlp.fc2.bias", f"pvt.encoder.block.{i}.{j}.mlp.dense2.bias"))
100
+
101
+ # Rename cls token
102
+ rename_keys.extend(
103
+ [
104
+ ("cls_token", "pvt.encoder.patch_embeddings.3.cls_token"),
105
+ ]
106
+ )
107
+ # Rename norm layer and classifier layer
108
+ rename_keys.extend(
109
+ [
110
+ ("norm.weight", "pvt.encoder.layer_norm.weight"),
111
+ ("norm.bias", "pvt.encoder.layer_norm.bias"),
112
+ ("head.weight", "classifier.weight"),
113
+ ("head.bias", "classifier.bias"),
114
+ ]
115
+ )
116
+
117
+ return rename_keys
118
+
119
+
120
+ # we split up the matrix of each encoder layer into queries, keys and values
121
+ def read_in_k_v(state_dict, config):
122
+ # for each of the encoder blocks:
123
+ for i in range(config.num_encoder_blocks):
124
+ for j in range(config.depths[i]):
125
+ # read in weights + bias of keys and values (which is a single matrix in the original implementation)
126
+ kv_weight = state_dict.pop(f"pvt.encoder.block.{i}.{j}.attention.self.kv.weight")
127
+ kv_bias = state_dict.pop(f"pvt.encoder.block.{i}.{j}.attention.self.kv.bias")
128
+ # next, add keys and values (in that order) to the state dict
129
+ state_dict[f"pvt.encoder.block.{i}.{j}.attention.self.key.weight"] = kv_weight[: config.hidden_sizes[i], :]
130
+ state_dict[f"pvt.encoder.block.{i}.{j}.attention.self.key.bias"] = kv_bias[: config.hidden_sizes[i]]
131
+
132
+ state_dict[f"pvt.encoder.block.{i}.{j}.attention.self.value.weight"] = kv_weight[
133
+ config.hidden_sizes[i] :, :
134
+ ]
135
+ state_dict[f"pvt.encoder.block.{i}.{j}.attention.self.value.bias"] = kv_bias[config.hidden_sizes[i] :]
136
+
137
+
138
+ def rename_key(dct, old, new):
139
+ val = dct.pop(old)
140
+ dct[new] = val
141
+
142
+
143
+ # We will verify our results on an image of cute cats
144
+ def prepare_img():
145
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
146
+ im = Image.open(requests.get(url, stream=True).raw)
147
+ return im
148
+
149
+
150
+ @torch.no_grad()
151
+ def convert_pvt_checkpoint(pvt_size, pvt_checkpoint, pytorch_dump_folder_path):
152
+ """
153
+ Copy/paste/tweak model's weights to our PVT structure.
154
+ """
155
+
156
+ # define default Pvt configuration
157
+ if pvt_size == "tiny":
158
+ config_path = "Zetatech/pvt-tiny-224"
159
+ elif pvt_size == "small":
160
+ config_path = "Zetatech/pvt-small-224"
161
+ elif pvt_size == "medium":
162
+ config_path = "Zetatech/pvt-medium-224"
163
+ elif pvt_size == "large":
164
+ config_path = "Zetatech/pvt-large-224"
165
+ else:
166
+ raise ValueError(f"Available model's size: 'tiny', 'small', 'medium', 'large', but " f"'{pvt_size}' was given")
167
+ config = PvtConfig(name_or_path=config_path)
168
+ # load original model from https://github.com/whai362/PVT
169
+ state_dict = torch.load(pvt_checkpoint, map_location="cpu")
170
+
171
+ rename_keys = create_rename_keys(config)
172
+ for src, dest in rename_keys:
173
+ rename_key(state_dict, src, dest)
174
+ read_in_k_v(state_dict, config)
175
+
176
+ # load HuggingFace model
177
+ model = PvtForImageClassification(config).eval()
178
+ model.load_state_dict(state_dict)
179
+
180
+ # Check outputs on an image, prepared by PVTFeatureExtractor
181
+ image_processor = PvtImageProcessor(size=config.image_size)
182
+ encoding = image_processor(images=prepare_img(), return_tensors="pt")
183
+ pixel_values = encoding["pixel_values"]
184
+ outputs = model(pixel_values)
185
+ logits = outputs.logits.detach().cpu()
186
+
187
+ if pvt_size == "tiny":
188
+ expected_slice_logits = torch.tensor([-1.4192, -1.9158, -0.9702])
189
+ elif pvt_size == "small":
190
+ expected_slice_logits = torch.tensor([0.4353, -0.1960, -0.2373])
191
+ elif pvt_size == "medium":
192
+ expected_slice_logits = torch.tensor([-0.2914, -0.2231, 0.0321])
193
+ elif pvt_size == "large":
194
+ expected_slice_logits = torch.tensor([0.3740, -0.7739, -0.4214])
195
+ else:
196
+ raise ValueError(f"Available model's size: 'tiny', 'small', 'medium', 'large', but " f"'{pvt_size}' was given")
197
+
198
+ assert torch.allclose(logits[0, :3], expected_slice_logits, atol=1e-4)
199
+
200
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
201
+ print(f"Saving model pytorch_model.bin to {pytorch_dump_folder_path}")
202
+ model.save_pretrained(pytorch_dump_folder_path)
203
+ print(f"Saving image processor to {pytorch_dump_folder_path}")
204
+ image_processor.save_pretrained(pytorch_dump_folder_path)
205
+
206
+
207
+ if __name__ == "__main__":
208
+ parser = argparse.ArgumentParser()
209
+ # Required parameters
210
+ parser.add_argument(
211
+ "--pvt_size",
212
+ default="tiny",
213
+ type=str,
214
+ help="Size of the PVT pretrained model you'd like to convert.",
215
+ )
216
+ parser.add_argument(
217
+ "--pvt_checkpoint",
218
+ default="pvt_tiny.pth",
219
+ type=str,
220
+ help="Checkpoint of the PVT pretrained model you'd like to convert.",
221
+ )
222
+ parser.add_argument(
223
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
224
+ )
225
+
226
+ args = parser.parse_args()
227
+ convert_pvt_checkpoint(args.pvt_size, args.pvt_checkpoint, args.pytorch_dump_folder_path)