ZTWHHH commited on
Commit
c773b79
·
verified ·
1 Parent(s): bfc0623

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. evalkit_cambrian/lib/python3.10/site-packages/transformers/models/__pycache__/__init__.cpython-310.pyc +0 -0
  3. evalkit_cambrian/lib/python3.10/site-packages/transformers/models/clip/__pycache__/__init__.cpython-310.pyc +0 -0
  4. evalkit_cambrian/lib/python3.10/site-packages/transformers/models/clip/__pycache__/feature_extraction_clip.cpython-310.pyc +0 -0
  5. evalkit_cambrian/lib/python3.10/site-packages/transformers/models/clip/__pycache__/image_processing_clip.cpython-310.pyc +0 -0
  6. evalkit_cambrian/lib/python3.10/site-packages/transformers/models/clip/__pycache__/modeling_clip.cpython-310.pyc +0 -0
  7. evalkit_cambrian/lib/python3.10/site-packages/transformers/models/clip/__pycache__/modeling_flax_clip.cpython-310.pyc +0 -0
  8. evalkit_cambrian/lib/python3.10/site-packages/transformers/models/clip/__pycache__/processing_clip.cpython-310.pyc +0 -0
  9. evalkit_cambrian/lib/python3.10/site-packages/transformers/models/clip/__pycache__/tokenization_clip_fast.cpython-310.pyc +0 -0
  10. evalkit_cambrian/lib/python3.10/site-packages/transformers/models/clip/tokenization_clip.py +536 -0
  11. evalkit_cambrian/lib/python3.10/site-packages/transformers/models/deberta_v2/__pycache__/__init__.cpython-310.pyc +0 -0
  12. evalkit_cambrian/lib/python3.10/site-packages/transformers/models/deberta_v2/__pycache__/configuration_deberta_v2.cpython-310.pyc +0 -0
  13. evalkit_cambrian/lib/python3.10/site-packages/transformers/models/deberta_v2/__pycache__/modeling_deberta_v2.cpython-310.pyc +0 -0
  14. evalkit_cambrian/lib/python3.10/site-packages/transformers/models/deberta_v2/__pycache__/modeling_tf_deberta_v2.cpython-310.pyc +0 -0
  15. evalkit_cambrian/lib/python3.10/site-packages/transformers/models/deberta_v2/__pycache__/tokenization_deberta_v2.cpython-310.pyc +0 -0
  16. evalkit_cambrian/lib/python3.10/site-packages/transformers/models/deberta_v2/__pycache__/tokenization_deberta_v2_fast.cpython-310.pyc +0 -0
  17. evalkit_cambrian/lib/python3.10/site-packages/transformers/models/deberta_v2/configuration_deberta_v2.py +200 -0
  18. evalkit_cambrian/lib/python3.10/site-packages/transformers/models/deberta_v2/modeling_deberta_v2.py +1633 -0
  19. evalkit_cambrian/lib/python3.10/site-packages/transformers/models/deberta_v2/modeling_tf_deberta_v2.py +1875 -0
  20. evalkit_cambrian/lib/python3.10/site-packages/transformers/models/deberta_v2/tokenization_deberta_v2.py +550 -0
  21. evalkit_cambrian/lib/python3.10/site-packages/transformers/models/deberta_v2/tokenization_deberta_v2_fast.py +250 -0
  22. evalkit_cambrian/lib/python3.10/site-packages/transformers/models/squeezebert/__init__.py +93 -0
  23. evalkit_cambrian/lib/python3.10/site-packages/transformers/models/squeezebert/__pycache__/__init__.cpython-310.pyc +0 -0
  24. evalkit_cambrian/lib/python3.10/site-packages/transformers/models/squeezebert/__pycache__/configuration_squeezebert.cpython-310.pyc +0 -0
  25. evalkit_cambrian/lib/python3.10/site-packages/transformers/models/squeezebert/__pycache__/modeling_squeezebert.cpython-310.pyc +0 -0
  26. evalkit_cambrian/lib/python3.10/site-packages/transformers/models/squeezebert/__pycache__/tokenization_squeezebert.cpython-310.pyc +0 -0
  27. evalkit_cambrian/lib/python3.10/site-packages/transformers/models/squeezebert/__pycache__/tokenization_squeezebert_fast.cpython-310.pyc +0 -0
  28. evalkit_cambrian/lib/python3.10/site-packages/transformers/models/squeezebert/configuration_squeezebert.py +177 -0
  29. evalkit_cambrian/lib/python3.10/site-packages/transformers/models/squeezebert/modeling_squeezebert.py +1090 -0
  30. evalkit_cambrian/lib/python3.10/site-packages/transformers/models/squeezebert/tokenization_squeezebert.py +531 -0
  31. evalkit_cambrian/lib/python3.10/site-packages/transformers/models/squeezebert/tokenization_squeezebert_fast.py +212 -0
  32. evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vilt/__init__.py +85 -0
  33. evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vilt/__pycache__/configuration_vilt.cpython-310.pyc +0 -0
  34. evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vilt/__pycache__/convert_vilt_original_to_pytorch.cpython-310.pyc +0 -0
  35. evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vilt/__pycache__/feature_extraction_vilt.cpython-310.pyc +0 -0
  36. evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vilt/__pycache__/processing_vilt.cpython-310.pyc +0 -0
  37. evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vilt/configuration_vilt.py +148 -0
  38. evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vilt/convert_vilt_original_to_pytorch.py +300 -0
  39. evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vilt/feature_extraction_vilt.py +33 -0
  40. evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vilt/image_processing_vilt.py +483 -0
  41. evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vilt/modeling_vilt.py +1489 -0
  42. evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vilt/processing_vilt.py +148 -0
  43. evalkit_tf446/lib/python3.10/site-packages/nvidia/cufft/lib/libcufft.so.10 +3 -0
  44. evalkit_tf449/lib/python3.10/site-packages/nvidia/cublas/lib/libcublasLt.so.12 +3 -0
  45. infer_4_47_1/lib/python3.10/site-packages/gradio/_frontend_code/chatbot/dist/Index.svelte +167 -0
  46. infer_4_47_1/lib/python3.10/site-packages/gradio/_frontend_code/chatbot/dist/shared/CopyAll.svelte.d.ts +17 -0
  47. infer_4_47_1/lib/python3.10/site-packages/gradio/_frontend_code/chatbot/dist/shared/FlagActive.svelte.d.ts +23 -0
  48. infer_4_47_1/lib/python3.10/site-packages/gradio/_frontend_code/chatbot/dist/shared/MessageBox.svelte.d.ts +20 -0
  49. infer_4_47_1/lib/python3.10/site-packages/gradio/_frontend_code/chatbot/dist/shared/Pending.svelte +126 -0
  50. infer_4_47_1/lib/python3.10/site-packages/gradio/_frontend_code/chatbot/dist/shared/Pending.svelte.d.ts +18 -0
.gitattributes CHANGED
@@ -1572,3 +1572,5 @@ infer_4_47_1/lib/python3.10/site-packages/PIL/_imaging.cpython-310-x86_64-linux-
1572
  infer_4_47_1/lib/python3.10/site-packages/PIL/_imagingft.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1573
  falcon/lib/python3.10/site-packages/nvidia/cublas/lib/libcublasLt.so.11 filter=lfs diff=lfs merge=lfs -text
1574
  infer_4_47_1/lib/python3.10/site-packages/gradio/templates/node/build/client/_app/immutable/chunks/Index.MHH1IQIS.js.br filter=lfs diff=lfs merge=lfs -text
 
 
 
1572
  infer_4_47_1/lib/python3.10/site-packages/PIL/_imagingft.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1573
  falcon/lib/python3.10/site-packages/nvidia/cublas/lib/libcublasLt.so.11 filter=lfs diff=lfs merge=lfs -text
1574
  infer_4_47_1/lib/python3.10/site-packages/gradio/templates/node/build/client/_app/immutable/chunks/Index.MHH1IQIS.js.br filter=lfs diff=lfs merge=lfs -text
1575
+ evalkit_tf446/lib/python3.10/site-packages/nvidia/cufft/lib/libcufft.so.10 filter=lfs diff=lfs merge=lfs -text
1576
+ evalkit_tf449/lib/python3.10/site-packages/nvidia/cublas/lib/libcublasLt.so.12 filter=lfs diff=lfs merge=lfs -text
evalkit_cambrian/lib/python3.10/site-packages/transformers/models/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (4.72 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/transformers/models/clip/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.52 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/transformers/models/clip/__pycache__/feature_extraction_clip.cpython-310.pyc ADDED
Binary file (991 Bytes). View file
 
evalkit_cambrian/lib/python3.10/site-packages/transformers/models/clip/__pycache__/image_processing_clip.cpython-310.pyc ADDED
Binary file (13.3 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/transformers/models/clip/__pycache__/modeling_clip.cpython-310.pyc ADDED
Binary file (41.3 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/transformers/models/clip/__pycache__/modeling_flax_clip.cpython-310.pyc ADDED
Binary file (38.5 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/transformers/models/clip/__pycache__/processing_clip.cpython-310.pyc ADDED
Binary file (6.18 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/transformers/models/clip/__pycache__/tokenization_clip_fast.cpython-310.pyc ADDED
Binary file (6.36 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/transformers/models/clip/tokenization_clip.py ADDED
@@ -0,0 +1,536 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Open AI Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for CLIP."""
16
+
17
+ import json
18
+ import os
19
+ import unicodedata
20
+ from functools import lru_cache
21
+ from typing import List, Optional, Tuple
22
+
23
+ import regex as re
24
+
25
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
26
+ from ...utils import logging
27
+
28
+
29
+ logger = logging.get_logger(__name__)
30
+
31
+ VOCAB_FILES_NAMES = {
32
+ "vocab_file": "vocab.json",
33
+ "merges_file": "merges.txt",
34
+ }
35
+
36
+ PRETRAINED_VOCAB_FILES_MAP = {
37
+ "vocab_file": {
38
+ "openai/clip-vit-base-patch32": "https://huggingface.co/openai/clip-vit-base-patch32/resolve/main/vocab.json",
39
+ },
40
+ "merges_file": {
41
+ "openai/clip-vit-base-patch32": "https://huggingface.co/openai/clip-vit-base-patch32/resolve/main/merges.txt",
42
+ },
43
+ }
44
+
45
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
46
+ "openai/clip-vit-base-patch32": 77,
47
+ }
48
+
49
+
50
+ PRETRAINED_INIT_CONFIGURATION = {
51
+ "openai/clip-vit-base-patch32": {},
52
+ }
53
+
54
+
55
+ @lru_cache()
56
+ def bytes_to_unicode():
57
+ """
58
+ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
59
+ characters the bpe code barfs on.
60
+
61
+ The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
62
+ if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
63
+ decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
64
+ tables between utf-8 bytes and unicode strings.
65
+ """
66
+ bs = (
67
+ list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
68
+ )
69
+ cs = bs[:]
70
+ n = 0
71
+ for b in range(2**8):
72
+ if b not in bs:
73
+ bs.append(b)
74
+ cs.append(2**8 + n)
75
+ n += 1
76
+ cs = [chr(n) for n in cs]
77
+ return dict(zip(bs, cs))
78
+
79
+
80
+ def get_pairs(word):
81
+ """
82
+ Return set of symbol pairs in a word.
83
+
84
+ Word is represented as tuple of symbols (symbols being variable-length strings).
85
+ """
86
+ pairs = set()
87
+ prev_char = word[0]
88
+ for char in word[1:]:
89
+ pairs.add((prev_char, char))
90
+ prev_char = char
91
+ return pairs
92
+
93
+
94
+ def whitespace_clean(text):
95
+ text = re.sub(r"\s+", " ", text)
96
+ text = text.strip()
97
+ return text
98
+
99
+
100
+ # Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize
101
+ def whitespace_tokenize(text):
102
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
103
+ text = text.strip()
104
+ if not text:
105
+ return []
106
+ tokens = text.split()
107
+ return tokens
108
+
109
+
110
+ # Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
111
+ class BasicTokenizer(object):
112
+ """
113
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
114
+
115
+ Args:
116
+ do_lower_case (`bool`, *optional*, defaults to `True`):
117
+ Whether or not to lowercase the input when tokenizing.
118
+ never_split (`Iterable`, *optional*):
119
+ Collection of tokens which will never be split during tokenization. Only has an effect when
120
+ `do_basic_tokenize=True`
121
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
122
+ Whether or not to tokenize Chinese characters.
123
+
124
+ This should likely be deactivated for Japanese (see this
125
+ [issue](https://github.com/huggingface/transformers/issues/328)).
126
+ strip_accents (`bool`, *optional*):
127
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
128
+ value for `lowercase` (as in the original BERT).
129
+ do_split_on_punc (`bool`, *optional*, defaults to `True`):
130
+ In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
131
+ the full context of the words, such as contractions.
132
+ """
133
+
134
+ def __init__(
135
+ self,
136
+ do_lower_case=True,
137
+ never_split=None,
138
+ tokenize_chinese_chars=True,
139
+ strip_accents=None,
140
+ do_split_on_punc=True,
141
+ ):
142
+ if never_split is None:
143
+ never_split = []
144
+ self.do_lower_case = do_lower_case
145
+ self.never_split = set(never_split)
146
+ self.tokenize_chinese_chars = tokenize_chinese_chars
147
+ self.strip_accents = strip_accents
148
+ self.do_split_on_punc = do_split_on_punc
149
+
150
+ def tokenize(self, text, never_split=None):
151
+ """
152
+ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
153
+
154
+ Args:
155
+ never_split (`List[str]`, *optional*)
156
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
157
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
158
+ """
159
+ # union() returns a new set by concatenating the two sets.
160
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
161
+ text = self._clean_text(text)
162
+
163
+ # This was added on November 1st, 2018 for the multilingual and Chinese
164
+ # models. This is also applied to the English models now, but it doesn't
165
+ # matter since the English models were not trained on any Chinese data
166
+ # and generally don't have any Chinese data in them (there are Chinese
167
+ # characters in the vocabulary because Wikipedia does have some Chinese
168
+ # words in the English Wikipedia.).
169
+ if self.tokenize_chinese_chars:
170
+ text = self._tokenize_chinese_chars(text)
171
+ # prevents treating the same character with different unicode codepoints as different characters
172
+ unicode_normalized_text = unicodedata.normalize("NFC", text)
173
+ orig_tokens = whitespace_tokenize(unicode_normalized_text)
174
+ split_tokens = []
175
+ for token in orig_tokens:
176
+ if token not in never_split:
177
+ if self.do_lower_case:
178
+ token = token.lower()
179
+ if self.strip_accents is not False:
180
+ token = self._run_strip_accents(token)
181
+ elif self.strip_accents:
182
+ token = self._run_strip_accents(token)
183
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
184
+
185
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
186
+ return output_tokens
187
+
188
+ def _run_strip_accents(self, text):
189
+ """Strips accents from a piece of text."""
190
+ text = unicodedata.normalize("NFD", text)
191
+ output = []
192
+ for char in text:
193
+ cat = unicodedata.category(char)
194
+ if cat == "Mn":
195
+ continue
196
+ output.append(char)
197
+ return "".join(output)
198
+
199
+ def _run_split_on_punc(self, text, never_split=None):
200
+ """Splits punctuation on a piece of text."""
201
+ if not self.do_split_on_punc or (never_split is not None and text in never_split):
202
+ return [text]
203
+ chars = list(text)
204
+ i = 0
205
+ start_new_word = True
206
+ output = []
207
+ while i < len(chars):
208
+ char = chars[i]
209
+ if _is_punctuation(char):
210
+ output.append([char])
211
+ start_new_word = True
212
+ else:
213
+ if start_new_word:
214
+ output.append([])
215
+ start_new_word = False
216
+ output[-1].append(char)
217
+ i += 1
218
+
219
+ return ["".join(x) for x in output]
220
+
221
+ def _tokenize_chinese_chars(self, text):
222
+ """Adds whitespace around any CJK character."""
223
+ output = []
224
+ for char in text:
225
+ cp = ord(char)
226
+ if self._is_chinese_char(cp):
227
+ output.append(" ")
228
+ output.append(char)
229
+ output.append(" ")
230
+ else:
231
+ output.append(char)
232
+ return "".join(output)
233
+
234
+ def _is_chinese_char(self, cp):
235
+ """Checks whether CP is the codepoint of a CJK character."""
236
+ # This defines a "chinese character" as anything in the CJK Unicode block:
237
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
238
+ #
239
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
240
+ # despite its name. The modern Korean Hangul alphabet is a different block,
241
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
242
+ # space-separated words, so they are not treated specially and handled
243
+ # like the all of the other languages.
244
+ if (
245
+ (cp >= 0x4E00 and cp <= 0x9FFF)
246
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
247
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
248
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
249
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
250
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
251
+ or (cp >= 0xF900 and cp <= 0xFAFF)
252
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
253
+ ): #
254
+ return True
255
+
256
+ return False
257
+
258
+ def _clean_text(self, text):
259
+ """Performs invalid character removal and whitespace cleanup on text."""
260
+ output = []
261
+ for char in text:
262
+ cp = ord(char)
263
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
264
+ continue
265
+ if _is_whitespace(char):
266
+ output.append(" ")
267
+ else:
268
+ output.append(char)
269
+ return "".join(output)
270
+
271
+
272
+ class CLIPTokenizer(PreTrainedTokenizer):
273
+ """
274
+ Construct a CLIP tokenizer. Based on byte-level Byte-Pair-Encoding.
275
+
276
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
277
+ this superclass for more information regarding those methods.
278
+
279
+ Args:
280
+ vocab_file (`str`):
281
+ Path to the vocabulary file.
282
+ merges_file (`str`):
283
+ Path to the merges file.
284
+ errors (`str`, *optional*, defaults to `"replace"`):
285
+ Paradigm to follow when decoding bytes to UTF-8. See
286
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
287
+ unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
288
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
289
+ token instead.
290
+ bos_token (`str`, *optional*, defaults to `"<|startoftext|>"`):
291
+ The beginning of sequence token.
292
+ eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
293
+ The end of sequence token.
294
+ pad_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
295
+ The token used for padding, for example when batching sequences of different lengths.
296
+ """
297
+
298
+ vocab_files_names = VOCAB_FILES_NAMES
299
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
300
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
301
+ model_input_names = ["input_ids", "attention_mask"]
302
+
303
+ def __init__(
304
+ self,
305
+ vocab_file,
306
+ merges_file,
307
+ errors="replace",
308
+ unk_token="<|endoftext|>",
309
+ bos_token="<|startoftext|>",
310
+ eos_token="<|endoftext|>",
311
+ pad_token="<|endoftext|>", # hack to enable padding
312
+ **kwargs,
313
+ ):
314
+ bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
315
+ eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
316
+ unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
317
+ try:
318
+ import ftfy
319
+
320
+ self.fix_text = ftfy.fix_text
321
+ except ImportError:
322
+ logger.info("ftfy or spacy is not installed using custom BasicTokenizer instead of ftfy.")
323
+ self.nlp = BasicTokenizer(strip_accents=False, do_split_on_punc=False)
324
+ self.fix_text = None
325
+
326
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
327
+ self.encoder = json.load(vocab_handle)
328
+ self.decoder = {v: k for k, v in self.encoder.items()}
329
+ self.errors = errors # how to handle errors in decoding
330
+ self.byte_encoder = bytes_to_unicode()
331
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
332
+ with open(merges_file, encoding="utf-8") as merges_handle:
333
+ bpe_merges = merges_handle.read().strip().split("\n")[1 : 49152 - 256 - 2 + 1]
334
+ bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
335
+ self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
336
+ self.cache = {"<|startoftext|>": "<|startoftext|>", "<|endoftext|>": "<|endoftext|>"}
337
+
338
+ self.pat = re.compile(
339
+ r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""",
340
+ re.IGNORECASE,
341
+ )
342
+
343
+ super().__init__(
344
+ errors=errors,
345
+ unk_token=unk_token,
346
+ bos_token=bos_token,
347
+ eos_token=eos_token,
348
+ pad_token=pad_token,
349
+ **kwargs,
350
+ )
351
+
352
+ @property
353
+ def vocab_size(self):
354
+ return len(self.encoder)
355
+
356
+ def get_vocab(self):
357
+ return dict(self.encoder, **self.added_tokens_encoder)
358
+
359
+ def build_inputs_with_special_tokens(
360
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
361
+ ) -> List[int]:
362
+ """
363
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
364
+ adding special tokens. A CLIP sequence has the following format:
365
+
366
+ - single sequence: `<|startoftext|> X <|endoftext|>`
367
+
368
+ Pairs of sequences are not the expected use case, but they will be handled without a separator.
369
+
370
+ Args:
371
+ token_ids_0 (`List[int]`):
372
+ List of IDs to which the special tokens will be added.
373
+ token_ids_1 (`List[int]`, *optional*):
374
+ Optional second list of IDs for sequence pairs.
375
+
376
+ Returns:
377
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
378
+ """
379
+ bos_token = [self.bos_token_id]
380
+ eos_token = [self.eos_token_id]
381
+
382
+ if token_ids_1 is None:
383
+ return bos_token + token_ids_0 + eos_token
384
+ return bos_token + token_ids_0 + eos_token + eos_token + token_ids_1 + eos_token
385
+
386
+ def get_special_tokens_mask(
387
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
388
+ ) -> List[int]:
389
+ """
390
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
391
+ special tokens using the tokenizer `prepare_for_model` method.
392
+
393
+ Args:
394
+ token_ids_0 (`List[int]`):
395
+ List of IDs.
396
+ token_ids_1 (`List[int]`, *optional*):
397
+ Optional second list of IDs for sequence pairs.
398
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
399
+ Whether or not the token list is already formatted with special tokens for the model.
400
+
401
+ Returns:
402
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
403
+ """
404
+
405
+ if already_has_special_tokens:
406
+ return super().get_special_tokens_mask(
407
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
408
+ )
409
+
410
+ if token_ids_1 is None:
411
+ return [1] + ([0] * len(token_ids_0)) + [1]
412
+ return [1] + ([0] * len(token_ids_0)) + [1] + [1] + ([0] * len(token_ids_1)) + [1]
413
+
414
+ def create_token_type_ids_from_sequences(
415
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
416
+ ) -> List[int]:
417
+ """
418
+ Create a mask from the two sequences passed. CLIP does not make use of token type ids, therefore a list of
419
+ zeros is returned.
420
+
421
+ Args:
422
+ token_ids_0 (`List[int]`):
423
+ List of IDs.
424
+ token_ids_1 (`List[int]`, *optional*):
425
+ Optional second list of IDs for sequence pairs.
426
+
427
+ Returns:
428
+ `List[int]`: List of zeros.
429
+ """
430
+ bos_token = [self.bos_token_id]
431
+ eos_token = [self.eos_token_id]
432
+
433
+ if token_ids_1 is None:
434
+ return len(bos_token + token_ids_0 + eos_token) * [0]
435
+ return len(bos_token + token_ids_0 + eos_token + eos_token + token_ids_1 + eos_token) * [0]
436
+
437
+ def bpe(self, token):
438
+ if token in self.cache:
439
+ return self.cache[token]
440
+ word = tuple(token[:-1]) + (token[-1] + "</w>",)
441
+ pairs = get_pairs(word)
442
+
443
+ if not pairs:
444
+ return token + "</w>"
445
+
446
+ while True:
447
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
448
+ if bigram not in self.bpe_ranks:
449
+ break
450
+ first, second = bigram
451
+ new_word = []
452
+ i = 0
453
+ while i < len(word):
454
+ try:
455
+ j = word.index(first, i)
456
+ except ValueError:
457
+ new_word.extend(word[i:])
458
+ break
459
+ else:
460
+ new_word.extend(word[i:j])
461
+ i = j
462
+
463
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
464
+ new_word.append(first + second)
465
+ i += 2
466
+ else:
467
+ new_word.append(word[i])
468
+ i += 1
469
+ new_word = tuple(new_word)
470
+ word = new_word
471
+ if len(word) == 1:
472
+ break
473
+ else:
474
+ pairs = get_pairs(word)
475
+ word = " ".join(word)
476
+ self.cache[token] = word
477
+ return word
478
+
479
+ def _tokenize(self, text):
480
+ """Tokenize a string."""
481
+ bpe_tokens = []
482
+ if self.fix_text is None:
483
+ text = " ".join(self.nlp.tokenize(text))
484
+ else:
485
+ text = whitespace_clean(self.fix_text(text)).lower()
486
+
487
+ for token in re.findall(self.pat, text):
488
+ token = "".join(
489
+ self.byte_encoder[b] for b in token.encode("utf-8")
490
+ ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
491
+ bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
492
+ return bpe_tokens
493
+
494
+ def _convert_token_to_id(self, token):
495
+ """Converts a token (str) in an id using the vocab."""
496
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
497
+
498
+ def _convert_id_to_token(self, index):
499
+ """Converts an index (integer) in a token (str) using the vocab."""
500
+ return self.decoder.get(index)
501
+
502
+ def convert_tokens_to_string(self, tokens):
503
+ """Converts a sequence of tokens (string) in a single string."""
504
+ text = "".join(tokens)
505
+ byte_array = bytearray([self.byte_decoder[c] for c in text])
506
+ text = byte_array.decode("utf-8", errors=self.errors).replace("</w>", " ").strip()
507
+ return text
508
+
509
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
510
+ if not os.path.isdir(save_directory):
511
+ logger.error("Vocabulary path ({}) should be a directory".format(save_directory))
512
+ return
513
+ vocab_file = os.path.join(
514
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
515
+ )
516
+ merge_file = os.path.join(
517
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
518
+ )
519
+
520
+ with open(vocab_file, "w", encoding="utf-8") as f:
521
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
522
+
523
+ index = 0
524
+ with open(merge_file, "w", encoding="utf-8") as writer:
525
+ writer.write("#version: 0.2\n")
526
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
527
+ if index != token_index:
528
+ logger.warning(
529
+ "Saving vocabulary to {}: BPE merge indices are not consecutive."
530
+ " Please check that the tokenizer is not corrupted!".format(merge_file)
531
+ )
532
+ index = token_index
533
+ writer.write(" ".join(bpe_tokens) + "\n")
534
+ index += 1
535
+
536
+ return vocab_file, merge_file
evalkit_cambrian/lib/python3.10/site-packages/transformers/models/deberta_v2/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.98 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/transformers/models/deberta_v2/__pycache__/configuration_deberta_v2.cpython-310.pyc ADDED
Binary file (8.4 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/transformers/models/deberta_v2/__pycache__/modeling_deberta_v2.cpython-310.pyc ADDED
Binary file (45.7 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/transformers/models/deberta_v2/__pycache__/modeling_tf_deberta_v2.cpython-310.pyc ADDED
Binary file (56.3 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/transformers/models/deberta_v2/__pycache__/tokenization_deberta_v2.cpython-310.pyc ADDED
Binary file (20.1 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/transformers/models/deberta_v2/__pycache__/tokenization_deberta_v2_fast.cpython-310.pyc ADDED
Binary file (9.66 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/transformers/models/deberta_v2/configuration_deberta_v2.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020, Microsoft and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ DeBERTa-v2 model configuration"""
16
+ from collections import OrderedDict
17
+ from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
18
+
19
+ from ...configuration_utils import PretrainedConfig
20
+ from ...onnx import OnnxConfig
21
+ from ...utils import logging
22
+
23
+
24
+ if TYPE_CHECKING:
25
+ from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+ DEBERTA_V2_PRETRAINED_CONFIG_ARCHIVE_MAP = {
31
+ "microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json",
32
+ "microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json",
33
+ "microsoft/deberta-v2-xlarge-mnli": (
34
+ "https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"
35
+ ),
36
+ "microsoft/deberta-v2-xxlarge-mnli": (
37
+ "https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"
38
+ ),
39
+ }
40
+
41
+
42
+ class DebertaV2Config(PretrainedConfig):
43
+ r"""
44
+ This is the configuration class to store the configuration of a [`DebertaV2Model`]. It is used to instantiate a
45
+ DeBERTa-v2 model according to the specified arguments, defining the model architecture. Instantiating a
46
+ configuration with the defaults will yield a similar configuration to that of the DeBERTa
47
+ [microsoft/deberta-v2-xlarge](https://huggingface.co/microsoft/deberta-v2-xlarge) architecture.
48
+
49
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
50
+ documentation from [`PretrainedConfig`] for more information.
51
+
52
+ Arguments:
53
+ vocab_size (`int`, *optional*, defaults to 128100):
54
+ Vocabulary size of the DeBERTa-v2 model. Defines the number of different tokens that can be represented by
55
+ the `inputs_ids` passed when calling [`DebertaV2Model`].
56
+ hidden_size (`int`, *optional*, defaults to 1536):
57
+ Dimensionality of the encoder layers and the pooler layer.
58
+ num_hidden_layers (`int`, *optional*, defaults to 24):
59
+ Number of hidden layers in the Transformer encoder.
60
+ num_attention_heads (`int`, *optional*, defaults to 24):
61
+ Number of attention heads for each attention layer in the Transformer encoder.
62
+ intermediate_size (`int`, *optional*, defaults to 6144):
63
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
64
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
65
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
66
+ `"relu"`, `"silu"`, `"gelu"`, `"tanh"`, `"gelu_fast"`, `"mish"`, `"linear"`, `"sigmoid"` and `"gelu_new"`
67
+ are supported.
68
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
69
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
70
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
71
+ The dropout ratio for the attention probabilities.
72
+ max_position_embeddings (`int`, *optional*, defaults to 512):
73
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
74
+ just in case (e.g., 512 or 1024 or 2048).
75
+ type_vocab_size (`int`, *optional*, defaults to 0):
76
+ The vocabulary size of the `token_type_ids` passed when calling [`DebertaModel`] or [`TFDebertaModel`].
77
+ initializer_range (`float`, *optional*, defaults to 0.02):
78
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
79
+ layer_norm_eps (`float`, *optional*, defaults to 1e-7):
80
+ The epsilon used by the layer normalization layers.
81
+ relative_attention (`bool`, *optional*, defaults to `True`):
82
+ Whether use relative position encoding.
83
+ max_relative_positions (`int`, *optional*, defaults to -1):
84
+ The range of relative positions `[-max_position_embeddings, max_position_embeddings]`. Use the same value
85
+ as `max_position_embeddings`.
86
+ pad_token_id (`int`, *optional*, defaults to 0):
87
+ The value used to pad input_ids.
88
+ position_biased_input (`bool`, *optional*, defaults to `False`):
89
+ Whether add absolute position embedding to content embedding.
90
+ pos_att_type (`List[str]`, *optional*):
91
+ The type of relative position attention, it can be a combination of `["p2c", "c2p"]`, e.g. `["p2c"]`,
92
+ `["p2c", "c2p"]`, `["p2c", "c2p"]`.
93
+ layer_norm_eps (`float`, optional, defaults to 1e-12):
94
+ The epsilon used by the layer normalization layers.
95
+
96
+ Example:
97
+
98
+ ```python
99
+ >>> from transformers import DebertaV2Config, DebertaV2Model
100
+
101
+ >>> # Initializing a DeBERTa-v2 microsoft/deberta-v2-xlarge style configuration
102
+ >>> configuration = DebertaV2Config()
103
+
104
+ >>> # Initializing a model (with random weights) from the microsoft/deberta-v2-xlarge style configuration
105
+ >>> model = DebertaV2Model(configuration)
106
+
107
+ >>> # Accessing the model configuration
108
+ >>> configuration = model.config
109
+ ```"""
110
+
111
+ model_type = "deberta-v2"
112
+
113
+ def __init__(
114
+ self,
115
+ vocab_size=128100,
116
+ hidden_size=1536,
117
+ num_hidden_layers=24,
118
+ num_attention_heads=24,
119
+ intermediate_size=6144,
120
+ hidden_act="gelu",
121
+ hidden_dropout_prob=0.1,
122
+ attention_probs_dropout_prob=0.1,
123
+ max_position_embeddings=512,
124
+ type_vocab_size=0,
125
+ initializer_range=0.02,
126
+ layer_norm_eps=1e-7,
127
+ relative_attention=False,
128
+ max_relative_positions=-1,
129
+ pad_token_id=0,
130
+ position_biased_input=True,
131
+ pos_att_type=None,
132
+ pooler_dropout=0,
133
+ pooler_hidden_act="gelu",
134
+ **kwargs,
135
+ ):
136
+ super().__init__(**kwargs)
137
+
138
+ self.hidden_size = hidden_size
139
+ self.num_hidden_layers = num_hidden_layers
140
+ self.num_attention_heads = num_attention_heads
141
+ self.intermediate_size = intermediate_size
142
+ self.hidden_act = hidden_act
143
+ self.hidden_dropout_prob = hidden_dropout_prob
144
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
145
+ self.max_position_embeddings = max_position_embeddings
146
+ self.type_vocab_size = type_vocab_size
147
+ self.initializer_range = initializer_range
148
+ self.relative_attention = relative_attention
149
+ self.max_relative_positions = max_relative_positions
150
+ self.pad_token_id = pad_token_id
151
+ self.position_biased_input = position_biased_input
152
+
153
+ # Backwards compatibility
154
+ if isinstance(pos_att_type, str):
155
+ pos_att_type = [x.strip() for x in pos_att_type.lower().split("|")]
156
+
157
+ self.pos_att_type = pos_att_type
158
+ self.vocab_size = vocab_size
159
+ self.layer_norm_eps = layer_norm_eps
160
+
161
+ self.pooler_hidden_size = kwargs.get("pooler_hidden_size", hidden_size)
162
+ self.pooler_dropout = pooler_dropout
163
+ self.pooler_hidden_act = pooler_hidden_act
164
+
165
+
166
+ class DebertaV2OnnxConfig(OnnxConfig):
167
+ @property
168
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
169
+ if self.task == "multiple-choice":
170
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
171
+ else:
172
+ dynamic_axis = {0: "batch", 1: "sequence"}
173
+ if self._config.type_vocab_size > 0:
174
+ return OrderedDict(
175
+ [("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)]
176
+ )
177
+ else:
178
+ return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)])
179
+
180
+ @property
181
+ def default_onnx_opset(self) -> int:
182
+ return 12
183
+
184
+ def generate_dummy_inputs(
185
+ self,
186
+ preprocessor: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"],
187
+ batch_size: int = -1,
188
+ seq_length: int = -1,
189
+ num_choices: int = -1,
190
+ is_pair: bool = False,
191
+ framework: Optional["TensorType"] = None,
192
+ num_channels: int = 3,
193
+ image_width: int = 40,
194
+ image_height: int = 40,
195
+ tokenizer: "PreTrainedTokenizerBase" = None,
196
+ ) -> Mapping[str, Any]:
197
+ dummy_inputs = super().generate_dummy_inputs(preprocessor=preprocessor, framework=framework)
198
+ if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
199
+ del dummy_inputs["token_type_ids"]
200
+ return dummy_inputs
evalkit_cambrian/lib/python3.10/site-packages/transformers/models/deberta_v2/modeling_deberta_v2.py ADDED
@@ -0,0 +1,1633 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 Microsoft and the Hugging Face Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch DeBERTa-v2 model."""
16
+
17
+ from collections.abc import Sequence
18
+ from typing import Optional, Tuple, Union
19
+
20
+ import torch
21
+ import torch.utils.checkpoint
22
+ from torch import nn
23
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss
24
+
25
+ from ...activations import ACT2FN
26
+ from ...modeling_outputs import (
27
+ BaseModelOutput,
28
+ MaskedLMOutput,
29
+ MultipleChoiceModelOutput,
30
+ QuestionAnsweringModelOutput,
31
+ SequenceClassifierOutput,
32
+ TokenClassifierOutput,
33
+ )
34
+ from ...modeling_utils import PreTrainedModel
35
+ from ...pytorch_utils import softmax_backward_data
36
+ from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
37
+ from .configuration_deberta_v2 import DebertaV2Config
38
+
39
+
40
+ logger = logging.get_logger(__name__)
41
+
42
+ _CONFIG_FOR_DOC = "DebertaV2Config"
43
+ _CHECKPOINT_FOR_DOC = "microsoft/deberta-v2-xlarge"
44
+ _QA_TARGET_START_INDEX = 2
45
+ _QA_TARGET_END_INDEX = 9
46
+
47
+ DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST = [
48
+ "microsoft/deberta-v2-xlarge",
49
+ "microsoft/deberta-v2-xxlarge",
50
+ "microsoft/deberta-v2-xlarge-mnli",
51
+ "microsoft/deberta-v2-xxlarge-mnli",
52
+ ]
53
+
54
+
55
+ # Copied from transformers.models.deberta.modeling_deberta.ContextPooler
56
+ class ContextPooler(nn.Module):
57
+ def __init__(self, config):
58
+ super().__init__()
59
+ self.dense = nn.Linear(config.pooler_hidden_size, config.pooler_hidden_size)
60
+ self.dropout = StableDropout(config.pooler_dropout)
61
+ self.config = config
62
+
63
+ def forward(self, hidden_states):
64
+ # We "pool" the model by simply taking the hidden state corresponding
65
+ # to the first token.
66
+
67
+ context_token = hidden_states[:, 0]
68
+ context_token = self.dropout(context_token)
69
+ pooled_output = self.dense(context_token)
70
+ pooled_output = ACT2FN[self.config.pooler_hidden_act](pooled_output)
71
+ return pooled_output
72
+
73
+ @property
74
+ def output_dim(self):
75
+ return self.config.hidden_size
76
+
77
+
78
+ # Copied from transformers.models.deberta.modeling_deberta.XSoftmax with deberta->deberta_v2
79
+ class XSoftmax(torch.autograd.Function):
80
+ """
81
+ Masked Softmax which is optimized for saving memory
82
+
83
+ Args:
84
+ input (`torch.tensor`): The input tensor that will apply softmax.
85
+ mask (`torch.IntTensor`):
86
+ The mask matrix where 0 indicate that element will be ignored in the softmax calculation.
87
+ dim (int): The dimension that will apply softmax
88
+
89
+ Example:
90
+
91
+ ```python
92
+ >>> import torch
93
+ >>> from transformers.models.deberta_v2.modeling_deberta_v2 import XSoftmax
94
+
95
+ >>> # Make a tensor
96
+ >>> x = torch.randn([4, 20, 100])
97
+
98
+ >>> # Create a mask
99
+ >>> mask = (x > 0).int()
100
+
101
+ >>> # Specify the dimension to apply softmax
102
+ >>> dim = -1
103
+
104
+ >>> y = XSoftmax.apply(x, mask, dim)
105
+ ```"""
106
+
107
+ @staticmethod
108
+ def forward(self, input, mask, dim):
109
+ self.dim = dim
110
+ rmask = ~(mask.to(torch.bool))
111
+
112
+ output = input.masked_fill(rmask, torch.tensor(torch.finfo(input.dtype).min))
113
+ output = torch.softmax(output, self.dim)
114
+ output.masked_fill_(rmask, 0)
115
+ self.save_for_backward(output)
116
+ return output
117
+
118
+ @staticmethod
119
+ def backward(self, grad_output):
120
+ (output,) = self.saved_tensors
121
+ inputGrad = softmax_backward_data(self, grad_output, output, self.dim, output)
122
+ return inputGrad, None, None
123
+
124
+ @staticmethod
125
+ def symbolic(g, self, mask, dim):
126
+ import torch.onnx.symbolic_helper as sym_help
127
+ from torch.onnx.symbolic_opset9 import masked_fill, softmax
128
+
129
+ mask_cast_value = g.op("Cast", mask, to_i=sym_help.cast_pytorch_to_onnx["Long"])
130
+ r_mask = g.op(
131
+ "Cast",
132
+ g.op("Sub", g.op("Constant", value_t=torch.tensor(1, dtype=torch.int64)), mask_cast_value),
133
+ to_i=sym_help.cast_pytorch_to_onnx["Bool"],
134
+ )
135
+ output = masked_fill(
136
+ g, self, r_mask, g.op("Constant", value_t=torch.tensor(torch.finfo(self.type().dtype()).min))
137
+ )
138
+ output = softmax(g, output, dim)
139
+ return masked_fill(g, output, r_mask, g.op("Constant", value_t=torch.tensor(0, dtype=torch.bool)))
140
+
141
+
142
+ # Copied from transformers.models.deberta.modeling_deberta.DropoutContext
143
+ class DropoutContext(object):
144
+ def __init__(self):
145
+ self.dropout = 0
146
+ self.mask = None
147
+ self.scale = 1
148
+ self.reuse_mask = True
149
+
150
+
151
+ # Copied from transformers.models.deberta.modeling_deberta.get_mask
152
+ def get_mask(input, local_context):
153
+ if not isinstance(local_context, DropoutContext):
154
+ dropout = local_context
155
+ mask = None
156
+ else:
157
+ dropout = local_context.dropout
158
+ dropout *= local_context.scale
159
+ mask = local_context.mask if local_context.reuse_mask else None
160
+
161
+ if dropout > 0 and mask is None:
162
+ mask = (1 - torch.empty_like(input).bernoulli_(1 - dropout)).to(torch.bool)
163
+
164
+ if isinstance(local_context, DropoutContext):
165
+ if local_context.mask is None:
166
+ local_context.mask = mask
167
+
168
+ return mask, dropout
169
+
170
+
171
+ # Copied from transformers.models.deberta.modeling_deberta.XDropout
172
+ class XDropout(torch.autograd.Function):
173
+ """Optimized dropout function to save computation and memory by using mask operation instead of multiplication."""
174
+
175
+ @staticmethod
176
+ def forward(ctx, input, local_ctx):
177
+ mask, dropout = get_mask(input, local_ctx)
178
+ ctx.scale = 1.0 / (1 - dropout)
179
+ if dropout > 0:
180
+ ctx.save_for_backward(mask)
181
+ return input.masked_fill(mask, 0) * ctx.scale
182
+ else:
183
+ return input
184
+
185
+ @staticmethod
186
+ def backward(ctx, grad_output):
187
+ if ctx.scale > 1:
188
+ (mask,) = ctx.saved_tensors
189
+ return grad_output.masked_fill(mask, 0) * ctx.scale, None
190
+ else:
191
+ return grad_output, None
192
+
193
+ @staticmethod
194
+ def symbolic(g: torch._C.Graph, input: torch._C.Value, local_ctx: Union[float, DropoutContext]) -> torch._C.Value:
195
+ from torch.onnx import symbolic_opset12
196
+
197
+ dropout_p = local_ctx
198
+ if isinstance(local_ctx, DropoutContext):
199
+ dropout_p = local_ctx.dropout
200
+ # StableDropout only calls this function when training.
201
+ train = True
202
+ # TODO: We should check if the opset_version being used to export
203
+ # is > 12 here, but there's no good way to do that. As-is, if the
204
+ # opset_version < 12, export will fail with a CheckerError.
205
+ # Once https://github.com/pytorch/pytorch/issues/78391 is fixed, do something like:
206
+ # if opset_version < 12:
207
+ # return torch.onnx.symbolic_opset9.dropout(g, input, dropout_p, train)
208
+ return symbolic_opset12.dropout(g, input, dropout_p, train)
209
+
210
+
211
+ # Copied from transformers.models.deberta.modeling_deberta.StableDropout
212
+ class StableDropout(nn.Module):
213
+ """
214
+ Optimized dropout module for stabilizing the training
215
+
216
+ Args:
217
+ drop_prob (float): the dropout probabilities
218
+ """
219
+
220
+ def __init__(self, drop_prob):
221
+ super().__init__()
222
+ self.drop_prob = drop_prob
223
+ self.count = 0
224
+ self.context_stack = None
225
+
226
+ def forward(self, x):
227
+ """
228
+ Call the module
229
+
230
+ Args:
231
+ x (`torch.tensor`): The input tensor to apply dropout
232
+ """
233
+ if self.training and self.drop_prob > 0:
234
+ return XDropout.apply(x, self.get_context())
235
+ return x
236
+
237
+ def clear_context(self):
238
+ self.count = 0
239
+ self.context_stack = None
240
+
241
+ def init_context(self, reuse_mask=True, scale=1):
242
+ if self.context_stack is None:
243
+ self.context_stack = []
244
+ self.count = 0
245
+ for c in self.context_stack:
246
+ c.reuse_mask = reuse_mask
247
+ c.scale = scale
248
+
249
+ def get_context(self):
250
+ if self.context_stack is not None:
251
+ if self.count >= len(self.context_stack):
252
+ self.context_stack.append(DropoutContext())
253
+ ctx = self.context_stack[self.count]
254
+ ctx.dropout = self.drop_prob
255
+ self.count += 1
256
+ return ctx
257
+ else:
258
+ return self.drop_prob
259
+
260
+
261
+ # Copied from transformers.models.deberta.modeling_deberta.DebertaSelfOutput with DebertaLayerNorm->LayerNorm
262
+ class DebertaV2SelfOutput(nn.Module):
263
+ def __init__(self, config):
264
+ super().__init__()
265
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
266
+ self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps)
267
+ self.dropout = StableDropout(config.hidden_dropout_prob)
268
+
269
+ def forward(self, hidden_states, input_tensor):
270
+ hidden_states = self.dense(hidden_states)
271
+ hidden_states = self.dropout(hidden_states)
272
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
273
+ return hidden_states
274
+
275
+
276
+ # Copied from transformers.models.deberta.modeling_deberta.DebertaAttention with Deberta->DebertaV2
277
+ class DebertaV2Attention(nn.Module):
278
+ def __init__(self, config):
279
+ super().__init__()
280
+ self.self = DisentangledSelfAttention(config)
281
+ self.output = DebertaV2SelfOutput(config)
282
+ self.config = config
283
+
284
+ def forward(
285
+ self,
286
+ hidden_states,
287
+ attention_mask,
288
+ output_attentions=False,
289
+ query_states=None,
290
+ relative_pos=None,
291
+ rel_embeddings=None,
292
+ ):
293
+ self_output = self.self(
294
+ hidden_states,
295
+ attention_mask,
296
+ output_attentions,
297
+ query_states=query_states,
298
+ relative_pos=relative_pos,
299
+ rel_embeddings=rel_embeddings,
300
+ )
301
+ if output_attentions:
302
+ self_output, att_matrix = self_output
303
+ if query_states is None:
304
+ query_states = hidden_states
305
+ attention_output = self.output(self_output, query_states)
306
+
307
+ if output_attentions:
308
+ return (attention_output, att_matrix)
309
+ else:
310
+ return attention_output
311
+
312
+
313
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->DebertaV2
314
+ class DebertaV2Intermediate(nn.Module):
315
+ def __init__(self, config):
316
+ super().__init__()
317
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
318
+ if isinstance(config.hidden_act, str):
319
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
320
+ else:
321
+ self.intermediate_act_fn = config.hidden_act
322
+
323
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
324
+ hidden_states = self.dense(hidden_states)
325
+ hidden_states = self.intermediate_act_fn(hidden_states)
326
+ return hidden_states
327
+
328
+
329
+ # Copied from transformers.models.deberta.modeling_deberta.DebertaOutput with DebertaLayerNorm->LayerNorm
330
+ class DebertaV2Output(nn.Module):
331
+ def __init__(self, config):
332
+ super().__init__()
333
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
334
+ self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps)
335
+ self.dropout = StableDropout(config.hidden_dropout_prob)
336
+ self.config = config
337
+
338
+ def forward(self, hidden_states, input_tensor):
339
+ hidden_states = self.dense(hidden_states)
340
+ hidden_states = self.dropout(hidden_states)
341
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
342
+ return hidden_states
343
+
344
+
345
+ # Copied from transformers.models.deberta.modeling_deberta.DebertaLayer with Deberta->DebertaV2
346
+ class DebertaV2Layer(nn.Module):
347
+ def __init__(self, config):
348
+ super().__init__()
349
+ self.attention = DebertaV2Attention(config)
350
+ self.intermediate = DebertaV2Intermediate(config)
351
+ self.output = DebertaV2Output(config)
352
+
353
+ def forward(
354
+ self,
355
+ hidden_states,
356
+ attention_mask,
357
+ query_states=None,
358
+ relative_pos=None,
359
+ rel_embeddings=None,
360
+ output_attentions=False,
361
+ ):
362
+ attention_output = self.attention(
363
+ hidden_states,
364
+ attention_mask,
365
+ output_attentions=output_attentions,
366
+ query_states=query_states,
367
+ relative_pos=relative_pos,
368
+ rel_embeddings=rel_embeddings,
369
+ )
370
+ if output_attentions:
371
+ attention_output, att_matrix = attention_output
372
+ intermediate_output = self.intermediate(attention_output)
373
+ layer_output = self.output(intermediate_output, attention_output)
374
+ if output_attentions:
375
+ return (layer_output, att_matrix)
376
+ else:
377
+ return layer_output
378
+
379
+
380
+ class ConvLayer(nn.Module):
381
+ def __init__(self, config):
382
+ super().__init__()
383
+ kernel_size = getattr(config, "conv_kernel_size", 3)
384
+ groups = getattr(config, "conv_groups", 1)
385
+ self.conv_act = getattr(config, "conv_act", "tanh")
386
+ self.conv = nn.Conv1d(
387
+ config.hidden_size, config.hidden_size, kernel_size, padding=(kernel_size - 1) // 2, groups=groups
388
+ )
389
+ self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps)
390
+ self.dropout = StableDropout(config.hidden_dropout_prob)
391
+ self.config = config
392
+
393
+ def forward(self, hidden_states, residual_states, input_mask):
394
+ out = self.conv(hidden_states.permute(0, 2, 1).contiguous()).permute(0, 2, 1).contiguous()
395
+ rmask = (1 - input_mask).bool()
396
+ out.masked_fill_(rmask.unsqueeze(-1).expand(out.size()), 0)
397
+ out = ACT2FN[self.conv_act](self.dropout(out))
398
+
399
+ layer_norm_input = residual_states + out
400
+ output = self.LayerNorm(layer_norm_input).to(layer_norm_input)
401
+
402
+ if input_mask is None:
403
+ output_states = output
404
+ else:
405
+ if input_mask.dim() != layer_norm_input.dim():
406
+ if input_mask.dim() == 4:
407
+ input_mask = input_mask.squeeze(1).squeeze(1)
408
+ input_mask = input_mask.unsqueeze(2)
409
+
410
+ input_mask = input_mask.to(output.dtype)
411
+ output_states = output * input_mask
412
+
413
+ return output_states
414
+
415
+
416
+ class DebertaV2Encoder(nn.Module):
417
+ """Modified BertEncoder with relative position bias support"""
418
+
419
+ def __init__(self, config):
420
+ super().__init__()
421
+
422
+ self.layer = nn.ModuleList([DebertaV2Layer(config) for _ in range(config.num_hidden_layers)])
423
+ self.relative_attention = getattr(config, "relative_attention", False)
424
+
425
+ if self.relative_attention:
426
+ self.max_relative_positions = getattr(config, "max_relative_positions", -1)
427
+ if self.max_relative_positions < 1:
428
+ self.max_relative_positions = config.max_position_embeddings
429
+
430
+ self.position_buckets = getattr(config, "position_buckets", -1)
431
+ pos_ebd_size = self.max_relative_positions * 2
432
+
433
+ if self.position_buckets > 0:
434
+ pos_ebd_size = self.position_buckets * 2
435
+
436
+ self.rel_embeddings = nn.Embedding(pos_ebd_size, config.hidden_size)
437
+
438
+ self.norm_rel_ebd = [x.strip() for x in getattr(config, "norm_rel_ebd", "none").lower().split("|")]
439
+
440
+ if "layer_norm" in self.norm_rel_ebd:
441
+ self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps, elementwise_affine=True)
442
+
443
+ self.conv = ConvLayer(config) if getattr(config, "conv_kernel_size", 0) > 0 else None
444
+ self.gradient_checkpointing = False
445
+
446
+ def get_rel_embedding(self):
447
+ rel_embeddings = self.rel_embeddings.weight if self.relative_attention else None
448
+ if rel_embeddings is not None and ("layer_norm" in self.norm_rel_ebd):
449
+ rel_embeddings = self.LayerNorm(rel_embeddings)
450
+ return rel_embeddings
451
+
452
+ def get_attention_mask(self, attention_mask):
453
+ if attention_mask.dim() <= 2:
454
+ extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
455
+ attention_mask = extended_attention_mask * extended_attention_mask.squeeze(-2).unsqueeze(-1)
456
+ elif attention_mask.dim() == 3:
457
+ attention_mask = attention_mask.unsqueeze(1)
458
+
459
+ return attention_mask
460
+
461
+ def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None):
462
+ if self.relative_attention and relative_pos is None:
463
+ q = query_states.size(-2) if query_states is not None else hidden_states.size(-2)
464
+ relative_pos = build_relative_position(
465
+ q,
466
+ hidden_states.size(-2),
467
+ bucket_size=self.position_buckets,
468
+ max_position=self.max_relative_positions,
469
+ device=hidden_states.device,
470
+ )
471
+ return relative_pos
472
+
473
+ def forward(
474
+ self,
475
+ hidden_states,
476
+ attention_mask,
477
+ output_hidden_states=True,
478
+ output_attentions=False,
479
+ query_states=None,
480
+ relative_pos=None,
481
+ return_dict=True,
482
+ ):
483
+ if attention_mask.dim() <= 2:
484
+ input_mask = attention_mask
485
+ else:
486
+ input_mask = attention_mask.sum(-2) > 0
487
+ attention_mask = self.get_attention_mask(attention_mask)
488
+ relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos)
489
+
490
+ all_hidden_states = () if output_hidden_states else None
491
+ all_attentions = () if output_attentions else None
492
+
493
+ if isinstance(hidden_states, Sequence):
494
+ next_kv = hidden_states[0]
495
+ else:
496
+ next_kv = hidden_states
497
+ rel_embeddings = self.get_rel_embedding()
498
+ output_states = next_kv
499
+ for i, layer_module in enumerate(self.layer):
500
+ if output_hidden_states:
501
+ all_hidden_states = all_hidden_states + (output_states,)
502
+
503
+ if self.gradient_checkpointing and self.training:
504
+ output_states = self._gradient_checkpointing_func(
505
+ layer_module.__call__,
506
+ next_kv,
507
+ attention_mask,
508
+ query_states,
509
+ relative_pos,
510
+ rel_embeddings,
511
+ output_attentions,
512
+ )
513
+ else:
514
+ output_states = layer_module(
515
+ next_kv,
516
+ attention_mask,
517
+ query_states=query_states,
518
+ relative_pos=relative_pos,
519
+ rel_embeddings=rel_embeddings,
520
+ output_attentions=output_attentions,
521
+ )
522
+
523
+ if output_attentions:
524
+ output_states, att_m = output_states
525
+
526
+ if i == 0 and self.conv is not None:
527
+ output_states = self.conv(hidden_states, output_states, input_mask)
528
+
529
+ if query_states is not None:
530
+ query_states = output_states
531
+ if isinstance(hidden_states, Sequence):
532
+ next_kv = hidden_states[i + 1] if i + 1 < len(self.layer) else None
533
+ else:
534
+ next_kv = output_states
535
+
536
+ if output_attentions:
537
+ all_attentions = all_attentions + (att_m,)
538
+
539
+ if output_hidden_states:
540
+ all_hidden_states = all_hidden_states + (output_states,)
541
+
542
+ if not return_dict:
543
+ return tuple(v for v in [output_states, all_hidden_states, all_attentions] if v is not None)
544
+ return BaseModelOutput(
545
+ last_hidden_state=output_states, hidden_states=all_hidden_states, attentions=all_attentions
546
+ )
547
+
548
+
549
+ def make_log_bucket_position(relative_pos, bucket_size, max_position):
550
+ sign = torch.sign(relative_pos)
551
+ mid = bucket_size // 2
552
+ abs_pos = torch.where(
553
+ (relative_pos < mid) & (relative_pos > -mid),
554
+ torch.tensor(mid - 1).type_as(relative_pos),
555
+ torch.abs(relative_pos),
556
+ )
557
+ log_pos = (
558
+ torch.ceil(torch.log(abs_pos / mid) / torch.log(torch.tensor((max_position - 1) / mid)) * (mid - 1)) + mid
559
+ )
560
+ bucket_pos = torch.where(abs_pos <= mid, relative_pos.type_as(log_pos), log_pos * sign)
561
+ return bucket_pos
562
+
563
+
564
+ def build_relative_position(query_size, key_size, bucket_size=-1, max_position=-1, device=None):
565
+ """
566
+ Build relative position according to the query and key
567
+
568
+ We assume the absolute position of query \\(P_q\\) is range from (0, query_size) and the absolute position of key
569
+ \\(P_k\\) is range from (0, key_size), The relative positions from query to key is \\(R_{q \\rightarrow k} = P_q -
570
+ P_k\\)
571
+
572
+ Args:
573
+ query_size (int): the length of query
574
+ key_size (int): the length of key
575
+ bucket_size (int): the size of position bucket
576
+ max_position (int): the maximum allowed absolute position
577
+ device (`torch.device`): the device on which tensors will be created.
578
+
579
+ Return:
580
+ `torch.LongTensor`: A tensor with shape [1, query_size, key_size]
581
+ """
582
+
583
+ q_ids = torch.arange(0, query_size, device=device)
584
+ k_ids = torch.arange(0, key_size, device=device)
585
+ rel_pos_ids = q_ids[:, None] - k_ids[None, :]
586
+ if bucket_size > 0 and max_position > 0:
587
+ rel_pos_ids = make_log_bucket_position(rel_pos_ids, bucket_size, max_position)
588
+ rel_pos_ids = rel_pos_ids.to(torch.long)
589
+ rel_pos_ids = rel_pos_ids[:query_size, :]
590
+ rel_pos_ids = rel_pos_ids.unsqueeze(0)
591
+ return rel_pos_ids
592
+
593
+
594
+ @torch.jit.script
595
+ # Copied from transformers.models.deberta.modeling_deberta.c2p_dynamic_expand
596
+ def c2p_dynamic_expand(c2p_pos, query_layer, relative_pos):
597
+ return c2p_pos.expand([query_layer.size(0), query_layer.size(1), query_layer.size(2), relative_pos.size(-1)])
598
+
599
+
600
+ @torch.jit.script
601
+ # Copied from transformers.models.deberta.modeling_deberta.p2c_dynamic_expand
602
+ def p2c_dynamic_expand(c2p_pos, query_layer, key_layer):
603
+ return c2p_pos.expand([query_layer.size(0), query_layer.size(1), key_layer.size(-2), key_layer.size(-2)])
604
+
605
+
606
+ @torch.jit.script
607
+ # Copied from transformers.models.deberta.modeling_deberta.pos_dynamic_expand
608
+ def pos_dynamic_expand(pos_index, p2c_att, key_layer):
609
+ return pos_index.expand(p2c_att.size()[:2] + (pos_index.size(-2), key_layer.size(-2)))
610
+
611
+
612
+ class DisentangledSelfAttention(nn.Module):
613
+ """
614
+ Disentangled self-attention module
615
+
616
+ Parameters:
617
+ config (`DebertaV2Config`):
618
+ A model config class instance with the configuration to build a new model. The schema is similar to
619
+ *BertConfig*, for more details, please refer [`DebertaV2Config`]
620
+
621
+ """
622
+
623
+ def __init__(self, config):
624
+ super().__init__()
625
+ if config.hidden_size % config.num_attention_heads != 0:
626
+ raise ValueError(
627
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
628
+ f"heads ({config.num_attention_heads})"
629
+ )
630
+ self.num_attention_heads = config.num_attention_heads
631
+ _attention_head_size = config.hidden_size // config.num_attention_heads
632
+ self.attention_head_size = getattr(config, "attention_head_size", _attention_head_size)
633
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
634
+ self.query_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)
635
+ self.key_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)
636
+ self.value_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)
637
+
638
+ self.share_att_key = getattr(config, "share_att_key", False)
639
+ self.pos_att_type = config.pos_att_type if config.pos_att_type is not None else []
640
+ self.relative_attention = getattr(config, "relative_attention", False)
641
+
642
+ if self.relative_attention:
643
+ self.position_buckets = getattr(config, "position_buckets", -1)
644
+ self.max_relative_positions = getattr(config, "max_relative_positions", -1)
645
+ if self.max_relative_positions < 1:
646
+ self.max_relative_positions = config.max_position_embeddings
647
+ self.pos_ebd_size = self.max_relative_positions
648
+ if self.position_buckets > 0:
649
+ self.pos_ebd_size = self.position_buckets
650
+
651
+ self.pos_dropout = StableDropout(config.hidden_dropout_prob)
652
+
653
+ if not self.share_att_key:
654
+ if "c2p" in self.pos_att_type:
655
+ self.pos_key_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)
656
+ if "p2c" in self.pos_att_type:
657
+ self.pos_query_proj = nn.Linear(config.hidden_size, self.all_head_size)
658
+
659
+ self.dropout = StableDropout(config.attention_probs_dropout_prob)
660
+
661
+ def transpose_for_scores(self, x, attention_heads):
662
+ new_x_shape = x.size()[:-1] + (attention_heads, -1)
663
+ x = x.view(new_x_shape)
664
+ return x.permute(0, 2, 1, 3).contiguous().view(-1, x.size(1), x.size(-1))
665
+
666
+ def forward(
667
+ self,
668
+ hidden_states,
669
+ attention_mask,
670
+ output_attentions=False,
671
+ query_states=None,
672
+ relative_pos=None,
673
+ rel_embeddings=None,
674
+ ):
675
+ """
676
+ Call the module
677
+
678
+ Args:
679
+ hidden_states (`torch.FloatTensor`):
680
+ Input states to the module usually the output from previous layer, it will be the Q,K and V in
681
+ *Attention(Q,K,V)*
682
+
683
+ attention_mask (`torch.BoolTensor`):
684
+ An attention mask matrix of shape [*B*, *N*, *N*] where *B* is the batch size, *N* is the maximum
685
+ sequence length in which element [i,j] = *1* means the *i* th token in the input can attend to the *j*
686
+ th token.
687
+
688
+ output_attentions (`bool`, optional):
689
+ Whether return the attention matrix.
690
+
691
+ query_states (`torch.FloatTensor`, optional):
692
+ The *Q* state in *Attention(Q,K,V)*.
693
+
694
+ relative_pos (`torch.LongTensor`):
695
+ The relative position encoding between the tokens in the sequence. It's of shape [*B*, *N*, *N*] with
696
+ values ranging in [*-max_relative_positions*, *max_relative_positions*].
697
+
698
+ rel_embeddings (`torch.FloatTensor`):
699
+ The embedding of relative distances. It's a tensor of shape [\\(2 \\times
700
+ \\text{max_relative_positions}\\), *hidden_size*].
701
+
702
+
703
+ """
704
+ if query_states is None:
705
+ query_states = hidden_states
706
+ query_layer = self.transpose_for_scores(self.query_proj(query_states), self.num_attention_heads)
707
+ key_layer = self.transpose_for_scores(self.key_proj(hidden_states), self.num_attention_heads)
708
+ value_layer = self.transpose_for_scores(self.value_proj(hidden_states), self.num_attention_heads)
709
+
710
+ rel_att = None
711
+ # Take the dot product between "query" and "key" to get the raw attention scores.
712
+ scale_factor = 1
713
+ if "c2p" in self.pos_att_type:
714
+ scale_factor += 1
715
+ if "p2c" in self.pos_att_type:
716
+ scale_factor += 1
717
+ scale = torch.sqrt(torch.tensor(query_layer.size(-1), dtype=torch.float) * scale_factor)
718
+ attention_scores = torch.bmm(query_layer, key_layer.transpose(-1, -2) / scale.to(dtype=query_layer.dtype))
719
+ if self.relative_attention:
720
+ rel_embeddings = self.pos_dropout(rel_embeddings)
721
+ rel_att = self.disentangled_attention_bias(
722
+ query_layer, key_layer, relative_pos, rel_embeddings, scale_factor
723
+ )
724
+
725
+ if rel_att is not None:
726
+ attention_scores = attention_scores + rel_att
727
+ attention_scores = attention_scores
728
+ attention_scores = attention_scores.view(
729
+ -1, self.num_attention_heads, attention_scores.size(-2), attention_scores.size(-1)
730
+ )
731
+
732
+ # bsz x height x length x dimension
733
+ attention_probs = XSoftmax.apply(attention_scores, attention_mask, -1)
734
+ attention_probs = self.dropout(attention_probs)
735
+ context_layer = torch.bmm(
736
+ attention_probs.view(-1, attention_probs.size(-2), attention_probs.size(-1)), value_layer
737
+ )
738
+ context_layer = (
739
+ context_layer.view(-1, self.num_attention_heads, context_layer.size(-2), context_layer.size(-1))
740
+ .permute(0, 2, 1, 3)
741
+ .contiguous()
742
+ )
743
+ new_context_layer_shape = context_layer.size()[:-2] + (-1,)
744
+ context_layer = context_layer.view(new_context_layer_shape)
745
+ if output_attentions:
746
+ return (context_layer, attention_probs)
747
+ else:
748
+ return context_layer
749
+
750
+ def disentangled_attention_bias(self, query_layer, key_layer, relative_pos, rel_embeddings, scale_factor):
751
+ if relative_pos is None:
752
+ q = query_layer.size(-2)
753
+ relative_pos = build_relative_position(
754
+ q,
755
+ key_layer.size(-2),
756
+ bucket_size=self.position_buckets,
757
+ max_position=self.max_relative_positions,
758
+ device=query_layer.device,
759
+ )
760
+ if relative_pos.dim() == 2:
761
+ relative_pos = relative_pos.unsqueeze(0).unsqueeze(0)
762
+ elif relative_pos.dim() == 3:
763
+ relative_pos = relative_pos.unsqueeze(1)
764
+ # bsz x height x query x key
765
+ elif relative_pos.dim() != 4:
766
+ raise ValueError(f"Relative position ids must be of dim 2 or 3 or 4. {relative_pos.dim()}")
767
+
768
+ att_span = self.pos_ebd_size
769
+ relative_pos = relative_pos.long().to(query_layer.device)
770
+
771
+ rel_embeddings = rel_embeddings[0 : att_span * 2, :].unsqueeze(0)
772
+ if self.share_att_key:
773
+ pos_query_layer = self.transpose_for_scores(
774
+ self.query_proj(rel_embeddings), self.num_attention_heads
775
+ ).repeat(query_layer.size(0) // self.num_attention_heads, 1, 1)
776
+ pos_key_layer = self.transpose_for_scores(self.key_proj(rel_embeddings), self.num_attention_heads).repeat(
777
+ query_layer.size(0) // self.num_attention_heads, 1, 1
778
+ )
779
+ else:
780
+ if "c2p" in self.pos_att_type:
781
+ pos_key_layer = self.transpose_for_scores(
782
+ self.pos_key_proj(rel_embeddings), self.num_attention_heads
783
+ ).repeat(query_layer.size(0) // self.num_attention_heads, 1, 1) # .split(self.all_head_size, dim=-1)
784
+ if "p2c" in self.pos_att_type:
785
+ pos_query_layer = self.transpose_for_scores(
786
+ self.pos_query_proj(rel_embeddings), self.num_attention_heads
787
+ ).repeat(query_layer.size(0) // self.num_attention_heads, 1, 1) # .split(self.all_head_size, dim=-1)
788
+
789
+ score = 0
790
+ # content->position
791
+ if "c2p" in self.pos_att_type:
792
+ scale = torch.sqrt(torch.tensor(pos_key_layer.size(-1), dtype=torch.float) * scale_factor)
793
+ c2p_att = torch.bmm(query_layer, pos_key_layer.transpose(-1, -2))
794
+ c2p_pos = torch.clamp(relative_pos + att_span, 0, att_span * 2 - 1)
795
+ c2p_att = torch.gather(
796
+ c2p_att,
797
+ dim=-1,
798
+ index=c2p_pos.squeeze(0).expand([query_layer.size(0), query_layer.size(1), relative_pos.size(-1)]),
799
+ )
800
+ score += c2p_att / scale.to(dtype=c2p_att.dtype)
801
+
802
+ # position->content
803
+ if "p2c" in self.pos_att_type:
804
+ scale = torch.sqrt(torch.tensor(pos_query_layer.size(-1), dtype=torch.float) * scale_factor)
805
+ if key_layer.size(-2) != query_layer.size(-2):
806
+ r_pos = build_relative_position(
807
+ key_layer.size(-2),
808
+ key_layer.size(-2),
809
+ bucket_size=self.position_buckets,
810
+ max_position=self.max_relative_positions,
811
+ device=query_layer.device,
812
+ )
813
+ r_pos = r_pos.unsqueeze(0)
814
+ else:
815
+ r_pos = relative_pos
816
+
817
+ p2c_pos = torch.clamp(-r_pos + att_span, 0, att_span * 2 - 1)
818
+ p2c_att = torch.bmm(key_layer, pos_query_layer.transpose(-1, -2))
819
+ p2c_att = torch.gather(
820
+ p2c_att,
821
+ dim=-1,
822
+ index=p2c_pos.squeeze(0).expand([query_layer.size(0), key_layer.size(-2), key_layer.size(-2)]),
823
+ ).transpose(-1, -2)
824
+ score += p2c_att / scale.to(dtype=p2c_att.dtype)
825
+
826
+ return score
827
+
828
+
829
+ # Copied from transformers.models.deberta.modeling_deberta.DebertaEmbeddings with DebertaLayerNorm->LayerNorm
830
+ class DebertaV2Embeddings(nn.Module):
831
+ """Construct the embeddings from word, position and token_type embeddings."""
832
+
833
+ def __init__(self, config):
834
+ super().__init__()
835
+ pad_token_id = getattr(config, "pad_token_id", 0)
836
+ self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
837
+ self.word_embeddings = nn.Embedding(config.vocab_size, self.embedding_size, padding_idx=pad_token_id)
838
+
839
+ self.position_biased_input = getattr(config, "position_biased_input", True)
840
+ if not self.position_biased_input:
841
+ self.position_embeddings = None
842
+ else:
843
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, self.embedding_size)
844
+
845
+ if config.type_vocab_size > 0:
846
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, self.embedding_size)
847
+
848
+ if self.embedding_size != config.hidden_size:
849
+ self.embed_proj = nn.Linear(self.embedding_size, config.hidden_size, bias=False)
850
+ self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps)
851
+ self.dropout = StableDropout(config.hidden_dropout_prob)
852
+ self.config = config
853
+
854
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
855
+ self.register_buffer(
856
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
857
+ )
858
+
859
+ def forward(self, input_ids=None, token_type_ids=None, position_ids=None, mask=None, inputs_embeds=None):
860
+ if input_ids is not None:
861
+ input_shape = input_ids.size()
862
+ else:
863
+ input_shape = inputs_embeds.size()[:-1]
864
+
865
+ seq_length = input_shape[1]
866
+
867
+ if position_ids is None:
868
+ position_ids = self.position_ids[:, :seq_length]
869
+
870
+ if token_type_ids is None:
871
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
872
+
873
+ if inputs_embeds is None:
874
+ inputs_embeds = self.word_embeddings(input_ids)
875
+
876
+ if self.position_embeddings is not None:
877
+ position_embeddings = self.position_embeddings(position_ids.long())
878
+ else:
879
+ position_embeddings = torch.zeros_like(inputs_embeds)
880
+
881
+ embeddings = inputs_embeds
882
+ if self.position_biased_input:
883
+ embeddings += position_embeddings
884
+ if self.config.type_vocab_size > 0:
885
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
886
+ embeddings += token_type_embeddings
887
+
888
+ if self.embedding_size != self.config.hidden_size:
889
+ embeddings = self.embed_proj(embeddings)
890
+
891
+ embeddings = self.LayerNorm(embeddings)
892
+
893
+ if mask is not None:
894
+ if mask.dim() != embeddings.dim():
895
+ if mask.dim() == 4:
896
+ mask = mask.squeeze(1).squeeze(1)
897
+ mask = mask.unsqueeze(2)
898
+ mask = mask.to(embeddings.dtype)
899
+
900
+ embeddings = embeddings * mask
901
+
902
+ embeddings = self.dropout(embeddings)
903
+ return embeddings
904
+
905
+
906
+ # Copied from transformers.models.deberta.modeling_deberta.DebertaPreTrainedModel with Deberta->DebertaV2
907
+ class DebertaV2PreTrainedModel(PreTrainedModel):
908
+ """
909
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
910
+ models.
911
+ """
912
+
913
+ config_class = DebertaV2Config
914
+ base_model_prefix = "deberta"
915
+ _keys_to_ignore_on_load_unexpected = ["position_embeddings"]
916
+ supports_gradient_checkpointing = True
917
+
918
+ def _init_weights(self, module):
919
+ """Initialize the weights."""
920
+ if isinstance(module, nn.Linear):
921
+ # Slightly different from the TF version which uses truncated_normal for initialization
922
+ # cf https://github.com/pytorch/pytorch/pull/5617
923
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
924
+ if module.bias is not None:
925
+ module.bias.data.zero_()
926
+ elif isinstance(module, nn.Embedding):
927
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
928
+ if module.padding_idx is not None:
929
+ module.weight.data[module.padding_idx].zero_()
930
+
931
+
932
+ DEBERTA_START_DOCSTRING = r"""
933
+ The DeBERTa model was proposed in [DeBERTa: Decoding-enhanced BERT with Disentangled
934
+ Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It's build
935
+ on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two
936
+ improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.
937
+
938
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
939
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
940
+ and behavior.
941
+
942
+
943
+ Parameters:
944
+ config ([`DebertaV2Config`]): Model configuration class with all the parameters of the model.
945
+ Initializing with a config file does not load the weights associated with the model, only the
946
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
947
+ """
948
+
949
+ DEBERTA_INPUTS_DOCSTRING = r"""
950
+ Args:
951
+ input_ids (`torch.LongTensor` of shape `({0})`):
952
+ Indices of input sequence tokens in the vocabulary.
953
+
954
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
955
+ [`PreTrainedTokenizer.__call__`] for details.
956
+
957
+ [What are input IDs?](../glossary#input-ids)
958
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
959
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
960
+
961
+ - 1 for tokens that are **not masked**,
962
+ - 0 for tokens that are **masked**.
963
+
964
+ [What are attention masks?](../glossary#attention-mask)
965
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
966
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
967
+ 1]`:
968
+
969
+ - 0 corresponds to a *sentence A* token,
970
+ - 1 corresponds to a *sentence B* token.
971
+
972
+ [What are token type IDs?](../glossary#token-type-ids)
973
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
974
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
975
+ config.max_position_embeddings - 1]`.
976
+
977
+ [What are position IDs?](../glossary#position-ids)
978
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
979
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
980
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
981
+ model's internal embedding lookup matrix.
982
+ output_attentions (`bool`, *optional*):
983
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
984
+ tensors for more detail.
985
+ output_hidden_states (`bool`, *optional*):
986
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
987
+ more detail.
988
+ return_dict (`bool`, *optional*):
989
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
990
+ """
991
+
992
+
993
+ @add_start_docstrings(
994
+ "The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top.",
995
+ DEBERTA_START_DOCSTRING,
996
+ )
997
+ # Copied from transformers.models.deberta.modeling_deberta.DebertaModel with Deberta->DebertaV2
998
+ class DebertaV2Model(DebertaV2PreTrainedModel):
999
+ def __init__(self, config):
1000
+ super().__init__(config)
1001
+
1002
+ self.embeddings = DebertaV2Embeddings(config)
1003
+ self.encoder = DebertaV2Encoder(config)
1004
+ self.z_steps = 0
1005
+ self.config = config
1006
+ # Initialize weights and apply final processing
1007
+ self.post_init()
1008
+
1009
+ def get_input_embeddings(self):
1010
+ return self.embeddings.word_embeddings
1011
+
1012
+ def set_input_embeddings(self, new_embeddings):
1013
+ self.embeddings.word_embeddings = new_embeddings
1014
+
1015
+ def _prune_heads(self, heads_to_prune):
1016
+ """
1017
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
1018
+ class PreTrainedModel
1019
+ """
1020
+ raise NotImplementedError("The prune function is not implemented in DeBERTa model.")
1021
+
1022
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1023
+ @add_code_sample_docstrings(
1024
+ checkpoint=_CHECKPOINT_FOR_DOC,
1025
+ output_type=BaseModelOutput,
1026
+ config_class=_CONFIG_FOR_DOC,
1027
+ )
1028
+ def forward(
1029
+ self,
1030
+ input_ids: Optional[torch.Tensor] = None,
1031
+ attention_mask: Optional[torch.Tensor] = None,
1032
+ token_type_ids: Optional[torch.Tensor] = None,
1033
+ position_ids: Optional[torch.Tensor] = None,
1034
+ inputs_embeds: Optional[torch.Tensor] = None,
1035
+ output_attentions: Optional[bool] = None,
1036
+ output_hidden_states: Optional[bool] = None,
1037
+ return_dict: Optional[bool] = None,
1038
+ ) -> Union[Tuple, BaseModelOutput]:
1039
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1040
+ output_hidden_states = (
1041
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1042
+ )
1043
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1044
+
1045
+ if input_ids is not None and inputs_embeds is not None:
1046
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
1047
+ elif input_ids is not None:
1048
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
1049
+ input_shape = input_ids.size()
1050
+ elif inputs_embeds is not None:
1051
+ input_shape = inputs_embeds.size()[:-1]
1052
+ else:
1053
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
1054
+
1055
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
1056
+
1057
+ if attention_mask is None:
1058
+ attention_mask = torch.ones(input_shape, device=device)
1059
+ if token_type_ids is None:
1060
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
1061
+
1062
+ embedding_output = self.embeddings(
1063
+ input_ids=input_ids,
1064
+ token_type_ids=token_type_ids,
1065
+ position_ids=position_ids,
1066
+ mask=attention_mask,
1067
+ inputs_embeds=inputs_embeds,
1068
+ )
1069
+
1070
+ encoder_outputs = self.encoder(
1071
+ embedding_output,
1072
+ attention_mask,
1073
+ output_hidden_states=True,
1074
+ output_attentions=output_attentions,
1075
+ return_dict=return_dict,
1076
+ )
1077
+ encoded_layers = encoder_outputs[1]
1078
+
1079
+ if self.z_steps > 1:
1080
+ hidden_states = encoded_layers[-2]
1081
+ layers = [self.encoder.layer[-1] for _ in range(self.z_steps)]
1082
+ query_states = encoded_layers[-1]
1083
+ rel_embeddings = self.encoder.get_rel_embedding()
1084
+ attention_mask = self.encoder.get_attention_mask(attention_mask)
1085
+ rel_pos = self.encoder.get_rel_pos(embedding_output)
1086
+ for layer in layers[1:]:
1087
+ query_states = layer(
1088
+ hidden_states,
1089
+ attention_mask,
1090
+ output_attentions=False,
1091
+ query_states=query_states,
1092
+ relative_pos=rel_pos,
1093
+ rel_embeddings=rel_embeddings,
1094
+ )
1095
+ encoded_layers.append(query_states)
1096
+
1097
+ sequence_output = encoded_layers[-1]
1098
+
1099
+ if not return_dict:
1100
+ return (sequence_output,) + encoder_outputs[(1 if output_hidden_states else 2) :]
1101
+
1102
+ return BaseModelOutput(
1103
+ last_hidden_state=sequence_output,
1104
+ hidden_states=encoder_outputs.hidden_states if output_hidden_states else None,
1105
+ attentions=encoder_outputs.attentions,
1106
+ )
1107
+
1108
+
1109
+ @add_start_docstrings("""DeBERTa Model with a `language modeling` head on top.""", DEBERTA_START_DOCSTRING)
1110
+ class DebertaV2ForMaskedLM(DebertaV2PreTrainedModel):
1111
+ _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"]
1112
+
1113
+ def __init__(self, config):
1114
+ super().__init__(config)
1115
+
1116
+ self.deberta = DebertaV2Model(config)
1117
+ self.cls = DebertaV2OnlyMLMHead(config)
1118
+
1119
+ # Initialize weights and apply final processing
1120
+ self.post_init()
1121
+
1122
+ def get_output_embeddings(self):
1123
+ return self.cls.predictions.decoder
1124
+
1125
+ def set_output_embeddings(self, new_embeddings):
1126
+ self.cls.predictions.decoder = new_embeddings
1127
+
1128
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1129
+ @add_code_sample_docstrings(
1130
+ checkpoint=_CHECKPOINT_FOR_DOC,
1131
+ output_type=MaskedLMOutput,
1132
+ config_class=_CONFIG_FOR_DOC,
1133
+ mask="[MASK]",
1134
+ )
1135
+ # Copied from transformers.models.deberta.modeling_deberta.DebertaForMaskedLM.forward with Deberta->DebertaV2
1136
+ def forward(
1137
+ self,
1138
+ input_ids: Optional[torch.Tensor] = None,
1139
+ attention_mask: Optional[torch.Tensor] = None,
1140
+ token_type_ids: Optional[torch.Tensor] = None,
1141
+ position_ids: Optional[torch.Tensor] = None,
1142
+ inputs_embeds: Optional[torch.Tensor] = None,
1143
+ labels: Optional[torch.Tensor] = None,
1144
+ output_attentions: Optional[bool] = None,
1145
+ output_hidden_states: Optional[bool] = None,
1146
+ return_dict: Optional[bool] = None,
1147
+ ) -> Union[Tuple, MaskedLMOutput]:
1148
+ r"""
1149
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1150
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1151
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1152
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1153
+ """
1154
+
1155
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1156
+
1157
+ outputs = self.deberta(
1158
+ input_ids,
1159
+ attention_mask=attention_mask,
1160
+ token_type_ids=token_type_ids,
1161
+ position_ids=position_ids,
1162
+ inputs_embeds=inputs_embeds,
1163
+ output_attentions=output_attentions,
1164
+ output_hidden_states=output_hidden_states,
1165
+ return_dict=return_dict,
1166
+ )
1167
+
1168
+ sequence_output = outputs[0]
1169
+ prediction_scores = self.cls(sequence_output)
1170
+
1171
+ masked_lm_loss = None
1172
+ if labels is not None:
1173
+ loss_fct = CrossEntropyLoss() # -100 index = padding token
1174
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1175
+
1176
+ if not return_dict:
1177
+ output = (prediction_scores,) + outputs[1:]
1178
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1179
+
1180
+ return MaskedLMOutput(
1181
+ loss=masked_lm_loss,
1182
+ logits=prediction_scores,
1183
+ hidden_states=outputs.hidden_states,
1184
+ attentions=outputs.attentions,
1185
+ )
1186
+
1187
+
1188
+ # Copied from transformers.models.deberta.modeling_deberta.DebertaPredictionHeadTransform with Deberta->DebertaV2
1189
+ class DebertaV2PredictionHeadTransform(nn.Module):
1190
+ def __init__(self, config):
1191
+ super().__init__()
1192
+ self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
1193
+
1194
+ self.dense = nn.Linear(config.hidden_size, self.embedding_size)
1195
+ if isinstance(config.hidden_act, str):
1196
+ self.transform_act_fn = ACT2FN[config.hidden_act]
1197
+ else:
1198
+ self.transform_act_fn = config.hidden_act
1199
+ self.LayerNorm = nn.LayerNorm(self.embedding_size, eps=config.layer_norm_eps)
1200
+
1201
+ def forward(self, hidden_states):
1202
+ hidden_states = self.dense(hidden_states)
1203
+ hidden_states = self.transform_act_fn(hidden_states)
1204
+ hidden_states = self.LayerNorm(hidden_states)
1205
+ return hidden_states
1206
+
1207
+
1208
+ # Copied from transformers.models.deberta.modeling_deberta.DebertaLMPredictionHead with Deberta->DebertaV2
1209
+ class DebertaV2LMPredictionHead(nn.Module):
1210
+ def __init__(self, config):
1211
+ super().__init__()
1212
+ self.transform = DebertaV2PredictionHeadTransform(config)
1213
+
1214
+ self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
1215
+ # The output weights are the same as the input embeddings, but there is
1216
+ # an output-only bias for each token.
1217
+ self.decoder = nn.Linear(self.embedding_size, config.vocab_size, bias=False)
1218
+
1219
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
1220
+
1221
+ # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
1222
+ self.decoder.bias = self.bias
1223
+
1224
+ def forward(self, hidden_states):
1225
+ hidden_states = self.transform(hidden_states)
1226
+ hidden_states = self.decoder(hidden_states)
1227
+ return hidden_states
1228
+
1229
+
1230
+ # copied from transformers.models.bert.BertOnlyMLMHead with bert -> deberta
1231
+ class DebertaV2OnlyMLMHead(nn.Module):
1232
+ def __init__(self, config):
1233
+ super().__init__()
1234
+ self.predictions = DebertaV2LMPredictionHead(config)
1235
+
1236
+ def forward(self, sequence_output):
1237
+ prediction_scores = self.predictions(sequence_output)
1238
+ return prediction_scores
1239
+
1240
+
1241
+ @add_start_docstrings(
1242
+ """
1243
+ DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the
1244
+ pooled output) e.g. for GLUE tasks.
1245
+ """,
1246
+ DEBERTA_START_DOCSTRING,
1247
+ )
1248
+ class DebertaV2ForSequenceClassification(DebertaV2PreTrainedModel):
1249
+ def __init__(self, config):
1250
+ super().__init__(config)
1251
+
1252
+ num_labels = getattr(config, "num_labels", 2)
1253
+ self.num_labels = num_labels
1254
+
1255
+ self.deberta = DebertaV2Model(config)
1256
+ self.pooler = ContextPooler(config)
1257
+ output_dim = self.pooler.output_dim
1258
+
1259
+ self.classifier = nn.Linear(output_dim, num_labels)
1260
+ drop_out = getattr(config, "cls_dropout", None)
1261
+ drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out
1262
+ self.dropout = StableDropout(drop_out)
1263
+
1264
+ # Initialize weights and apply final processing
1265
+ self.post_init()
1266
+
1267
+ def get_input_embeddings(self):
1268
+ return self.deberta.get_input_embeddings()
1269
+
1270
+ def set_input_embeddings(self, new_embeddings):
1271
+ self.deberta.set_input_embeddings(new_embeddings)
1272
+
1273
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1274
+ @add_code_sample_docstrings(
1275
+ checkpoint=_CHECKPOINT_FOR_DOC,
1276
+ output_type=SequenceClassifierOutput,
1277
+ config_class=_CONFIG_FOR_DOC,
1278
+ )
1279
+ # Copied from transformers.models.deberta.modeling_deberta.DebertaForSequenceClassification.forward with Deberta->DebertaV2
1280
+ def forward(
1281
+ self,
1282
+ input_ids: Optional[torch.Tensor] = None,
1283
+ attention_mask: Optional[torch.Tensor] = None,
1284
+ token_type_ids: Optional[torch.Tensor] = None,
1285
+ position_ids: Optional[torch.Tensor] = None,
1286
+ inputs_embeds: Optional[torch.Tensor] = None,
1287
+ labels: Optional[torch.Tensor] = None,
1288
+ output_attentions: Optional[bool] = None,
1289
+ output_hidden_states: Optional[bool] = None,
1290
+ return_dict: Optional[bool] = None,
1291
+ ) -> Union[Tuple, SequenceClassifierOutput]:
1292
+ r"""
1293
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1294
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1295
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1296
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1297
+ """
1298
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1299
+
1300
+ outputs = self.deberta(
1301
+ input_ids,
1302
+ token_type_ids=token_type_ids,
1303
+ attention_mask=attention_mask,
1304
+ position_ids=position_ids,
1305
+ inputs_embeds=inputs_embeds,
1306
+ output_attentions=output_attentions,
1307
+ output_hidden_states=output_hidden_states,
1308
+ return_dict=return_dict,
1309
+ )
1310
+
1311
+ encoder_layer = outputs[0]
1312
+ pooled_output = self.pooler(encoder_layer)
1313
+ pooled_output = self.dropout(pooled_output)
1314
+ logits = self.classifier(pooled_output)
1315
+
1316
+ loss = None
1317
+ if labels is not None:
1318
+ if self.config.problem_type is None:
1319
+ if self.num_labels == 1:
1320
+ # regression task
1321
+ loss_fn = nn.MSELoss()
1322
+ logits = logits.view(-1).to(labels.dtype)
1323
+ loss = loss_fn(logits, labels.view(-1))
1324
+ elif labels.dim() == 1 or labels.size(-1) == 1:
1325
+ label_index = (labels >= 0).nonzero()
1326
+ labels = labels.long()
1327
+ if label_index.size(0) > 0:
1328
+ labeled_logits = torch.gather(
1329
+ logits, 0, label_index.expand(label_index.size(0), logits.size(1))
1330
+ )
1331
+ labels = torch.gather(labels, 0, label_index.view(-1))
1332
+ loss_fct = CrossEntropyLoss()
1333
+ loss = loss_fct(labeled_logits.view(-1, self.num_labels).float(), labels.view(-1))
1334
+ else:
1335
+ loss = torch.tensor(0).to(logits)
1336
+ else:
1337
+ log_softmax = nn.LogSoftmax(-1)
1338
+ loss = -((log_softmax(logits) * labels).sum(-1)).mean()
1339
+ elif self.config.problem_type == "regression":
1340
+ loss_fct = MSELoss()
1341
+ if self.num_labels == 1:
1342
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1343
+ else:
1344
+ loss = loss_fct(logits, labels)
1345
+ elif self.config.problem_type == "single_label_classification":
1346
+ loss_fct = CrossEntropyLoss()
1347
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1348
+ elif self.config.problem_type == "multi_label_classification":
1349
+ loss_fct = BCEWithLogitsLoss()
1350
+ loss = loss_fct(logits, labels)
1351
+ if not return_dict:
1352
+ output = (logits,) + outputs[1:]
1353
+ return ((loss,) + output) if loss is not None else output
1354
+
1355
+ return SequenceClassifierOutput(
1356
+ loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions
1357
+ )
1358
+
1359
+
1360
+ @add_start_docstrings(
1361
+ """
1362
+ DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1363
+ Named-Entity-Recognition (NER) tasks.
1364
+ """,
1365
+ DEBERTA_START_DOCSTRING,
1366
+ )
1367
+ # Copied from transformers.models.deberta.modeling_deberta.DebertaForTokenClassification with Deberta->DebertaV2
1368
+ class DebertaV2ForTokenClassification(DebertaV2PreTrainedModel):
1369
+ def __init__(self, config):
1370
+ super().__init__(config)
1371
+ self.num_labels = config.num_labels
1372
+
1373
+ self.deberta = DebertaV2Model(config)
1374
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1375
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1376
+
1377
+ # Initialize weights and apply final processing
1378
+ self.post_init()
1379
+
1380
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1381
+ @add_code_sample_docstrings(
1382
+ checkpoint=_CHECKPOINT_FOR_DOC,
1383
+ output_type=TokenClassifierOutput,
1384
+ config_class=_CONFIG_FOR_DOC,
1385
+ )
1386
+ def forward(
1387
+ self,
1388
+ input_ids: Optional[torch.Tensor] = None,
1389
+ attention_mask: Optional[torch.Tensor] = None,
1390
+ token_type_ids: Optional[torch.Tensor] = None,
1391
+ position_ids: Optional[torch.Tensor] = None,
1392
+ inputs_embeds: Optional[torch.Tensor] = None,
1393
+ labels: Optional[torch.Tensor] = None,
1394
+ output_attentions: Optional[bool] = None,
1395
+ output_hidden_states: Optional[bool] = None,
1396
+ return_dict: Optional[bool] = None,
1397
+ ) -> Union[Tuple, TokenClassifierOutput]:
1398
+ r"""
1399
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1400
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1401
+ """
1402
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1403
+
1404
+ outputs = self.deberta(
1405
+ input_ids,
1406
+ attention_mask=attention_mask,
1407
+ token_type_ids=token_type_ids,
1408
+ position_ids=position_ids,
1409
+ inputs_embeds=inputs_embeds,
1410
+ output_attentions=output_attentions,
1411
+ output_hidden_states=output_hidden_states,
1412
+ return_dict=return_dict,
1413
+ )
1414
+
1415
+ sequence_output = outputs[0]
1416
+
1417
+ sequence_output = self.dropout(sequence_output)
1418
+ logits = self.classifier(sequence_output)
1419
+
1420
+ loss = None
1421
+ if labels is not None:
1422
+ loss_fct = CrossEntropyLoss()
1423
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1424
+
1425
+ if not return_dict:
1426
+ output = (logits,) + outputs[1:]
1427
+ return ((loss,) + output) if loss is not None else output
1428
+
1429
+ return TokenClassifierOutput(
1430
+ loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions
1431
+ )
1432
+
1433
+
1434
+ @add_start_docstrings(
1435
+ """
1436
+ DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1437
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1438
+ """,
1439
+ DEBERTA_START_DOCSTRING,
1440
+ )
1441
+ class DebertaV2ForQuestionAnswering(DebertaV2PreTrainedModel):
1442
+ def __init__(self, config):
1443
+ super().__init__(config)
1444
+ self.num_labels = config.num_labels
1445
+
1446
+ self.deberta = DebertaV2Model(config)
1447
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1448
+
1449
+ # Initialize weights and apply final processing
1450
+ self.post_init()
1451
+
1452
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1453
+ @add_code_sample_docstrings(
1454
+ checkpoint=_CHECKPOINT_FOR_DOC,
1455
+ output_type=QuestionAnsweringModelOutput,
1456
+ config_class=_CONFIG_FOR_DOC,
1457
+ qa_target_start_index=_QA_TARGET_START_INDEX,
1458
+ qa_target_end_index=_QA_TARGET_END_INDEX,
1459
+ )
1460
+ # Copied from transformers.models.deberta.modeling_deberta.DebertaForQuestionAnswering.forward with Deberta->DebertaV2
1461
+ def forward(
1462
+ self,
1463
+ input_ids: Optional[torch.Tensor] = None,
1464
+ attention_mask: Optional[torch.Tensor] = None,
1465
+ token_type_ids: Optional[torch.Tensor] = None,
1466
+ position_ids: Optional[torch.Tensor] = None,
1467
+ inputs_embeds: Optional[torch.Tensor] = None,
1468
+ start_positions: Optional[torch.Tensor] = None,
1469
+ end_positions: Optional[torch.Tensor] = None,
1470
+ output_attentions: Optional[bool] = None,
1471
+ output_hidden_states: Optional[bool] = None,
1472
+ return_dict: Optional[bool] = None,
1473
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
1474
+ r"""
1475
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1476
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1477
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1478
+ are not taken into account for computing the loss.
1479
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1480
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1481
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1482
+ are not taken into account for computing the loss.
1483
+ """
1484
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1485
+
1486
+ outputs = self.deberta(
1487
+ input_ids,
1488
+ attention_mask=attention_mask,
1489
+ token_type_ids=token_type_ids,
1490
+ position_ids=position_ids,
1491
+ inputs_embeds=inputs_embeds,
1492
+ output_attentions=output_attentions,
1493
+ output_hidden_states=output_hidden_states,
1494
+ return_dict=return_dict,
1495
+ )
1496
+
1497
+ sequence_output = outputs[0]
1498
+
1499
+ logits = self.qa_outputs(sequence_output)
1500
+ start_logits, end_logits = logits.split(1, dim=-1)
1501
+ start_logits = start_logits.squeeze(-1).contiguous()
1502
+ end_logits = end_logits.squeeze(-1).contiguous()
1503
+
1504
+ total_loss = None
1505
+ if start_positions is not None and end_positions is not None:
1506
+ # If we are on multi-GPU, split add a dimension
1507
+ if len(start_positions.size()) > 1:
1508
+ start_positions = start_positions.squeeze(-1)
1509
+ if len(end_positions.size()) > 1:
1510
+ end_positions = end_positions.squeeze(-1)
1511
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1512
+ ignored_index = start_logits.size(1)
1513
+ start_positions = start_positions.clamp(0, ignored_index)
1514
+ end_positions = end_positions.clamp(0, ignored_index)
1515
+
1516
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1517
+ start_loss = loss_fct(start_logits, start_positions)
1518
+ end_loss = loss_fct(end_logits, end_positions)
1519
+ total_loss = (start_loss + end_loss) / 2
1520
+
1521
+ if not return_dict:
1522
+ output = (start_logits, end_logits) + outputs[1:]
1523
+ return ((total_loss,) + output) if total_loss is not None else output
1524
+
1525
+ return QuestionAnsweringModelOutput(
1526
+ loss=total_loss,
1527
+ start_logits=start_logits,
1528
+ end_logits=end_logits,
1529
+ hidden_states=outputs.hidden_states,
1530
+ attentions=outputs.attentions,
1531
+ )
1532
+
1533
+
1534
+ @add_start_docstrings(
1535
+ """
1536
+ DeBERTa Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
1537
+ softmax) e.g. for RocStories/SWAG tasks.
1538
+ """,
1539
+ DEBERTA_START_DOCSTRING,
1540
+ )
1541
+ class DebertaV2ForMultipleChoice(DebertaV2PreTrainedModel):
1542
+ def __init__(self, config):
1543
+ super().__init__(config)
1544
+
1545
+ num_labels = getattr(config, "num_labels", 2)
1546
+ self.num_labels = num_labels
1547
+
1548
+ self.deberta = DebertaV2Model(config)
1549
+ self.pooler = ContextPooler(config)
1550
+ output_dim = self.pooler.output_dim
1551
+
1552
+ self.classifier = nn.Linear(output_dim, 1)
1553
+ drop_out = getattr(config, "cls_dropout", None)
1554
+ drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out
1555
+ self.dropout = StableDropout(drop_out)
1556
+
1557
+ self.init_weights()
1558
+
1559
+ def get_input_embeddings(self):
1560
+ return self.deberta.get_input_embeddings()
1561
+
1562
+ def set_input_embeddings(self, new_embeddings):
1563
+ self.deberta.set_input_embeddings(new_embeddings)
1564
+
1565
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1566
+ @add_code_sample_docstrings(
1567
+ checkpoint=_CHECKPOINT_FOR_DOC,
1568
+ output_type=MultipleChoiceModelOutput,
1569
+ config_class=_CONFIG_FOR_DOC,
1570
+ )
1571
+ def forward(
1572
+ self,
1573
+ input_ids: Optional[torch.Tensor] = None,
1574
+ attention_mask: Optional[torch.Tensor] = None,
1575
+ token_type_ids: Optional[torch.Tensor] = None,
1576
+ position_ids: Optional[torch.Tensor] = None,
1577
+ inputs_embeds: Optional[torch.Tensor] = None,
1578
+ labels: Optional[torch.Tensor] = None,
1579
+ output_attentions: Optional[bool] = None,
1580
+ output_hidden_states: Optional[bool] = None,
1581
+ return_dict: Optional[bool] = None,
1582
+ ) -> Union[Tuple, MultipleChoiceModelOutput]:
1583
+ r"""
1584
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1585
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
1586
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
1587
+ `input_ids` above)
1588
+ """
1589
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1590
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
1591
+
1592
+ flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
1593
+ flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
1594
+ flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
1595
+ flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
1596
+ flat_inputs_embeds = (
1597
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
1598
+ if inputs_embeds is not None
1599
+ else None
1600
+ )
1601
+
1602
+ outputs = self.deberta(
1603
+ flat_input_ids,
1604
+ position_ids=flat_position_ids,
1605
+ token_type_ids=flat_token_type_ids,
1606
+ attention_mask=flat_attention_mask,
1607
+ inputs_embeds=flat_inputs_embeds,
1608
+ output_attentions=output_attentions,
1609
+ output_hidden_states=output_hidden_states,
1610
+ return_dict=return_dict,
1611
+ )
1612
+
1613
+ encoder_layer = outputs[0]
1614
+ pooled_output = self.pooler(encoder_layer)
1615
+ pooled_output = self.dropout(pooled_output)
1616
+ logits = self.classifier(pooled_output)
1617
+ reshaped_logits = logits.view(-1, num_choices)
1618
+
1619
+ loss = None
1620
+ if labels is not None:
1621
+ loss_fct = CrossEntropyLoss()
1622
+ loss = loss_fct(reshaped_logits, labels)
1623
+
1624
+ if not return_dict:
1625
+ output = (reshaped_logits,) + outputs[1:]
1626
+ return ((loss,) + output) if loss is not None else output
1627
+
1628
+ return MultipleChoiceModelOutput(
1629
+ loss=loss,
1630
+ logits=reshaped_logits,
1631
+ hidden_states=outputs.hidden_states,
1632
+ attentions=outputs.attentions,
1633
+ )
evalkit_cambrian/lib/python3.10/site-packages/transformers/models/deberta_v2/modeling_tf_deberta_v2.py ADDED
@@ -0,0 +1,1875 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 Microsoft and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ TF 2.0 DeBERTa-v2 model."""
16
+
17
+ from __future__ import annotations
18
+
19
+ from typing import Dict, Optional, Tuple, Union
20
+
21
+ import numpy as np
22
+ import tensorflow as tf
23
+
24
+ from ...activations_tf import get_tf_activation
25
+ from ...modeling_tf_outputs import (
26
+ TFBaseModelOutput,
27
+ TFMaskedLMOutput,
28
+ TFMultipleChoiceModelOutput,
29
+ TFQuestionAnsweringModelOutput,
30
+ TFSequenceClassifierOutput,
31
+ TFTokenClassifierOutput,
32
+ )
33
+ from ...modeling_tf_utils import (
34
+ TFMaskedLanguageModelingLoss,
35
+ TFModelInputType,
36
+ TFMultipleChoiceLoss,
37
+ TFPreTrainedModel,
38
+ TFQuestionAnsweringLoss,
39
+ TFSequenceClassificationLoss,
40
+ TFTokenClassificationLoss,
41
+ get_initializer,
42
+ unpack_inputs,
43
+ )
44
+ from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
45
+ from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
46
+ from .configuration_deberta_v2 import DebertaV2Config
47
+
48
+
49
+ logger = logging.get_logger(__name__)
50
+
51
+ _CONFIG_FOR_DOC = "DebertaV2Config"
52
+ _CHECKPOINT_FOR_DOC = "kamalkraj/deberta-v2-xlarge"
53
+
54
+ TF_DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST = [
55
+ "kamalkraj/deberta-v2-xlarge",
56
+ # See all DeBERTa models at https://huggingface.co/models?filter=deberta-v2
57
+ ]
58
+
59
+
60
+ # Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaContextPooler with Deberta->DebertaV2
61
+ class TFDebertaV2ContextPooler(tf.keras.layers.Layer):
62
+ def __init__(self, config: DebertaV2Config, **kwargs):
63
+ super().__init__(**kwargs)
64
+ self.dense = tf.keras.layers.Dense(config.pooler_hidden_size, name="dense")
65
+ self.dropout = TFDebertaV2StableDropout(config.pooler_dropout, name="dropout")
66
+ self.config = config
67
+
68
+ def call(self, hidden_states, training: bool = False):
69
+ # We "pool" the model by simply taking the hidden state corresponding
70
+ # to the first token.
71
+ context_token = hidden_states[:, 0]
72
+ context_token = self.dropout(context_token, training=training)
73
+ pooled_output = self.dense(context_token)
74
+ pooled_output = get_tf_activation(self.config.pooler_hidden_act)(pooled_output)
75
+ return pooled_output
76
+
77
+ @property
78
+ def output_dim(self) -> int:
79
+ return self.config.hidden_size
80
+
81
+ def build(self, input_shape=None):
82
+ if self.built:
83
+ return
84
+ self.built = True
85
+ if getattr(self, "dense", None) is not None:
86
+ with tf.name_scope(self.dense.name):
87
+ self.dense.build([None, None, self.config.pooler_hidden_size])
88
+ if getattr(self, "dropout", None) is not None:
89
+ with tf.name_scope(self.dropout.name):
90
+ self.dropout.build(None)
91
+
92
+
93
+ # Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaXSoftmax with Deberta->DebertaV2
94
+ class TFDebertaV2XSoftmax(tf.keras.layers.Layer):
95
+ """
96
+ Masked Softmax which is optimized for saving memory
97
+
98
+ Args:
99
+ input (`tf.Tensor`): The input tensor that will apply softmax.
100
+ mask (`tf.Tensor`): The mask matrix where 0 indicate that element will be ignored in the softmax calculation.
101
+ dim (int): The dimension that will apply softmax
102
+ """
103
+
104
+ def __init__(self, axis=-1, **kwargs):
105
+ super().__init__(**kwargs)
106
+ self.axis = axis
107
+
108
+ def call(self, inputs: tf.Tensor, mask: tf.Tensor):
109
+ rmask = tf.logical_not(tf.cast(mask, tf.bool))
110
+ output = tf.where(rmask, float("-inf"), inputs)
111
+ output = stable_softmax(output, self.axis)
112
+ output = tf.where(rmask, 0.0, output)
113
+ return output
114
+
115
+
116
+ # Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaStableDropout with Deberta->DebertaV2
117
+ class TFDebertaV2StableDropout(tf.keras.layers.Layer):
118
+ """
119
+ Optimized dropout module for stabilizing the training
120
+
121
+ Args:
122
+ drop_prob (float): the dropout probabilities
123
+ """
124
+
125
+ def __init__(self, drop_prob, **kwargs):
126
+ super().__init__(**kwargs)
127
+ self.drop_prob = drop_prob
128
+
129
+ @tf.custom_gradient
130
+ def xdropout(self, inputs):
131
+ """
132
+ Applies dropout to the inputs, as vanilla dropout, but also scales the remaining elements up by 1/drop_prob.
133
+ """
134
+ mask = tf.cast(
135
+ 1
136
+ - tf.compat.v1.distributions.Bernoulli(probs=1.0 - self.drop_prob).sample(sample_shape=shape_list(inputs)),
137
+ tf.bool,
138
+ )
139
+ scale = tf.convert_to_tensor(1.0 / (1 - self.drop_prob), dtype=tf.float32)
140
+ if self.drop_prob > 0:
141
+ inputs = tf.where(mask, 0.0, inputs) * scale
142
+
143
+ def grad(upstream):
144
+ if self.drop_prob > 0:
145
+ return tf.where(mask, 0.0, upstream) * scale
146
+ else:
147
+ return upstream
148
+
149
+ return inputs, grad
150
+
151
+ def call(self, inputs: tf.Tensor, training: tf.Tensor = False):
152
+ if training:
153
+ return self.xdropout(inputs)
154
+ return inputs
155
+
156
+
157
+ # Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaSelfOutput with Deberta->DebertaV2
158
+ class TFDebertaV2SelfOutput(tf.keras.layers.Layer):
159
+ def __init__(self, config: DebertaV2Config, **kwargs):
160
+ super().__init__(**kwargs)
161
+ self.dense = tf.keras.layers.Dense(config.hidden_size, name="dense")
162
+ self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
163
+ self.dropout = TFDebertaV2StableDropout(config.hidden_dropout_prob, name="dropout")
164
+ self.config = config
165
+
166
+ def call(self, hidden_states, input_tensor, training: bool = False):
167
+ hidden_states = self.dense(hidden_states)
168
+ hidden_states = self.dropout(hidden_states, training=training)
169
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
170
+ return hidden_states
171
+
172
+ def build(self, input_shape=None):
173
+ if self.built:
174
+ return
175
+ self.built = True
176
+ if getattr(self, "dense", None) is not None:
177
+ with tf.name_scope(self.dense.name):
178
+ self.dense.build([None, None, self.config.hidden_size])
179
+ if getattr(self, "LayerNorm", None) is not None:
180
+ with tf.name_scope(self.LayerNorm.name):
181
+ self.LayerNorm.build([None, None, self.config.hidden_size])
182
+ if getattr(self, "dropout", None) is not None:
183
+ with tf.name_scope(self.dropout.name):
184
+ self.dropout.build(None)
185
+
186
+
187
+ # Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaAttention with Deberta->DebertaV2
188
+ class TFDebertaV2Attention(tf.keras.layers.Layer):
189
+ def __init__(self, config: DebertaV2Config, **kwargs):
190
+ super().__init__(**kwargs)
191
+ self.self = TFDebertaV2DisentangledSelfAttention(config, name="self")
192
+ self.dense_output = TFDebertaV2SelfOutput(config, name="output")
193
+ self.config = config
194
+
195
+ def call(
196
+ self,
197
+ input_tensor: tf.Tensor,
198
+ attention_mask: tf.Tensor,
199
+ query_states: tf.Tensor = None,
200
+ relative_pos: tf.Tensor = None,
201
+ rel_embeddings: tf.Tensor = None,
202
+ output_attentions: bool = False,
203
+ training: bool = False,
204
+ ) -> Tuple[tf.Tensor]:
205
+ self_outputs = self.self(
206
+ hidden_states=input_tensor,
207
+ attention_mask=attention_mask,
208
+ query_states=query_states,
209
+ relative_pos=relative_pos,
210
+ rel_embeddings=rel_embeddings,
211
+ output_attentions=output_attentions,
212
+ training=training,
213
+ )
214
+ if query_states is None:
215
+ query_states = input_tensor
216
+ attention_output = self.dense_output(
217
+ hidden_states=self_outputs[0], input_tensor=query_states, training=training
218
+ )
219
+
220
+ output = (attention_output,) + self_outputs[1:]
221
+
222
+ return output
223
+
224
+ def build(self, input_shape=None):
225
+ if self.built:
226
+ return
227
+ self.built = True
228
+ if getattr(self, "self", None) is not None:
229
+ with tf.name_scope(self.self.name):
230
+ self.self.build(None)
231
+ if getattr(self, "dense_output", None) is not None:
232
+ with tf.name_scope(self.dense_output.name):
233
+ self.dense_output.build(None)
234
+
235
+
236
+ # Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaIntermediate with Deberta->DebertaV2
237
+ class TFDebertaV2Intermediate(tf.keras.layers.Layer):
238
+ def __init__(self, config: DebertaV2Config, **kwargs):
239
+ super().__init__(**kwargs)
240
+
241
+ self.dense = tf.keras.layers.Dense(
242
+ units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
243
+ )
244
+
245
+ if isinstance(config.hidden_act, str):
246
+ self.intermediate_act_fn = get_tf_activation(config.hidden_act)
247
+ else:
248
+ self.intermediate_act_fn = config.hidden_act
249
+ self.config = config
250
+
251
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
252
+ hidden_states = self.dense(inputs=hidden_states)
253
+ hidden_states = self.intermediate_act_fn(hidden_states)
254
+
255
+ return hidden_states
256
+
257
+ def build(self, input_shape=None):
258
+ if self.built:
259
+ return
260
+ self.built = True
261
+ if getattr(self, "dense", None) is not None:
262
+ with tf.name_scope(self.dense.name):
263
+ self.dense.build([None, None, self.config.hidden_size])
264
+
265
+
266
+ # Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaOutput with Deberta->DebertaV2
267
+ class TFDebertaV2Output(tf.keras.layers.Layer):
268
+ def __init__(self, config: DebertaV2Config, **kwargs):
269
+ super().__init__(**kwargs)
270
+
271
+ self.dense = tf.keras.layers.Dense(
272
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
273
+ )
274
+ self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
275
+ self.dropout = TFDebertaV2StableDropout(config.hidden_dropout_prob, name="dropout")
276
+ self.config = config
277
+
278
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
279
+ hidden_states = self.dense(inputs=hidden_states)
280
+ hidden_states = self.dropout(hidden_states, training=training)
281
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
282
+
283
+ return hidden_states
284
+
285
+ def build(self, input_shape=None):
286
+ if self.built:
287
+ return
288
+ self.built = True
289
+ if getattr(self, "dense", None) is not None:
290
+ with tf.name_scope(self.dense.name):
291
+ self.dense.build([None, None, self.config.intermediate_size])
292
+ if getattr(self, "LayerNorm", None) is not None:
293
+ with tf.name_scope(self.LayerNorm.name):
294
+ self.LayerNorm.build([None, None, self.config.hidden_size])
295
+ if getattr(self, "dropout", None) is not None:
296
+ with tf.name_scope(self.dropout.name):
297
+ self.dropout.build(None)
298
+
299
+
300
+ # Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaLayer with Deberta->DebertaV2
301
+ class TFDebertaV2Layer(tf.keras.layers.Layer):
302
+ def __init__(self, config: DebertaV2Config, **kwargs):
303
+ super().__init__(**kwargs)
304
+
305
+ self.attention = TFDebertaV2Attention(config, name="attention")
306
+ self.intermediate = TFDebertaV2Intermediate(config, name="intermediate")
307
+ self.bert_output = TFDebertaV2Output(config, name="output")
308
+
309
+ def call(
310
+ self,
311
+ hidden_states: tf.Tensor,
312
+ attention_mask: tf.Tensor,
313
+ query_states: tf.Tensor = None,
314
+ relative_pos: tf.Tensor = None,
315
+ rel_embeddings: tf.Tensor = None,
316
+ output_attentions: bool = False,
317
+ training: bool = False,
318
+ ) -> Tuple[tf.Tensor]:
319
+ attention_outputs = self.attention(
320
+ input_tensor=hidden_states,
321
+ attention_mask=attention_mask,
322
+ query_states=query_states,
323
+ relative_pos=relative_pos,
324
+ rel_embeddings=rel_embeddings,
325
+ output_attentions=output_attentions,
326
+ training=training,
327
+ )
328
+ attention_output = attention_outputs[0]
329
+ intermediate_output = self.intermediate(hidden_states=attention_output)
330
+ layer_output = self.bert_output(
331
+ hidden_states=intermediate_output, input_tensor=attention_output, training=training
332
+ )
333
+ outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
334
+
335
+ return outputs
336
+
337
+ def build(self, input_shape=None):
338
+ if self.built:
339
+ return
340
+ self.built = True
341
+ if getattr(self, "attention", None) is not None:
342
+ with tf.name_scope(self.attention.name):
343
+ self.attention.build(None)
344
+ if getattr(self, "intermediate", None) is not None:
345
+ with tf.name_scope(self.intermediate.name):
346
+ self.intermediate.build(None)
347
+ if getattr(self, "bert_output", None) is not None:
348
+ with tf.name_scope(self.bert_output.name):
349
+ self.bert_output.build(None)
350
+
351
+
352
+ class TFDebertaV2ConvLayer(tf.keras.layers.Layer):
353
+ def __init__(self, config: DebertaV2Config, **kwargs):
354
+ super().__init__(**kwargs)
355
+
356
+ self.kernel_size = getattr(config, "conv_kernel_size", 3)
357
+ # groups = getattr(config, "conv_groups", 1)
358
+ self.conv_act = get_tf_activation(getattr(config, "conv_act", "tanh"))
359
+ self.padding = (self.kernel_size - 1) // 2
360
+ self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
361
+ self.dropout = TFDebertaV2StableDropout(config.hidden_dropout_prob, name="dropout")
362
+ self.config = config
363
+
364
+ def build(self, input_shape=None):
365
+ if self.built:
366
+ return
367
+ self.built = True
368
+ with tf.name_scope("conv"):
369
+ self.conv_kernel = self.add_weight(
370
+ name="kernel",
371
+ shape=[self.kernel_size, self.config.hidden_size, self.config.hidden_size],
372
+ initializer=get_initializer(self.config.initializer_range),
373
+ )
374
+ self.conv_bias = self.add_weight(
375
+ name="bias", shape=[self.config.hidden_size], initializer=tf.zeros_initializer()
376
+ )
377
+ if getattr(self, "LayerNorm", None) is not None:
378
+ with tf.name_scope(self.LayerNorm.name):
379
+ self.LayerNorm.build([None, None, self.config.hidden_size])
380
+ if getattr(self, "dropout", None) is not None:
381
+ with tf.name_scope(self.dropout.name):
382
+ self.dropout.build(None)
383
+
384
+ def call(
385
+ self, hidden_states: tf.Tensor, residual_states: tf.Tensor, input_mask: tf.Tensor, training: bool = False
386
+ ) -> tf.Tensor:
387
+ out = tf.nn.conv2d(
388
+ tf.expand_dims(hidden_states, 1),
389
+ tf.expand_dims(self.conv_kernel, 0),
390
+ strides=1,
391
+ padding=[[0, 0], [0, 0], [self.padding, self.padding], [0, 0]],
392
+ )
393
+ out = tf.squeeze(tf.nn.bias_add(out, self.conv_bias), 1)
394
+ rmask = tf.cast(1 - input_mask, tf.bool)
395
+ out = tf.where(tf.broadcast_to(tf.expand_dims(rmask, -1), shape_list(out)), 0.0, out)
396
+ out = self.dropout(out, training=training)
397
+ out = self.conv_act(out)
398
+
399
+ layer_norm_input = residual_states + out
400
+ output = self.LayerNorm(layer_norm_input)
401
+
402
+ if input_mask is None:
403
+ output_states = output
404
+ else:
405
+ if len(shape_list(input_mask)) != len(shape_list(layer_norm_input)):
406
+ if len(shape_list(input_mask)) == 4:
407
+ input_mask = tf.squeeze(tf.squeeze(input_mask, axis=1), axis=1)
408
+ input_mask = tf.cast(tf.expand_dims(input_mask, axis=2), tf.float32)
409
+
410
+ output_states = output * input_mask
411
+
412
+ return output_states
413
+
414
+
415
+ class TFDebertaV2Encoder(tf.keras.layers.Layer):
416
+ def __init__(self, config: DebertaV2Config, **kwargs):
417
+ super().__init__(**kwargs)
418
+
419
+ self.layer = [TFDebertaV2Layer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
420
+ self.relative_attention = getattr(config, "relative_attention", False)
421
+ self.config = config
422
+ if self.relative_attention:
423
+ self.max_relative_positions = getattr(config, "max_relative_positions", -1)
424
+ if self.max_relative_positions < 1:
425
+ self.max_relative_positions = config.max_position_embeddings
426
+
427
+ self.position_buckets = getattr(config, "position_buckets", -1)
428
+ self.pos_ebd_size = self.max_relative_positions * 2
429
+
430
+ if self.position_buckets > 0:
431
+ self.pos_ebd_size = self.position_buckets * 2
432
+
433
+ self.norm_rel_ebd = [x.strip() for x in getattr(config, "norm_rel_ebd", "none").lower().split("|")]
434
+
435
+ if "layer_norm" in self.norm_rel_ebd:
436
+ self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
437
+
438
+ self.conv = TFDebertaV2ConvLayer(config, name="conv") if getattr(config, "conv_kernel_size", 0) > 0 else None
439
+
440
+ def build(self, input_shape=None):
441
+ if self.built:
442
+ return
443
+ self.built = True
444
+ if self.relative_attention:
445
+ self.rel_embeddings = self.add_weight(
446
+ name="rel_embeddings.weight",
447
+ shape=[self.pos_ebd_size, self.config.hidden_size],
448
+ initializer=get_initializer(self.config.initializer_range),
449
+ )
450
+ if getattr(self, "conv", None) is not None:
451
+ with tf.name_scope(self.conv.name):
452
+ self.conv.build(None)
453
+ if getattr(self, "LayerNorm", None) is not None:
454
+ with tf.name_scope(self.LayerNorm.name):
455
+ self.LayerNorm.build([None, self.config.hidden_size])
456
+ if getattr(self, "layer", None) is not None:
457
+ for layer in self.layer:
458
+ with tf.name_scope(layer.name):
459
+ layer.build(None)
460
+
461
+ def get_rel_embedding(self):
462
+ rel_embeddings = self.rel_embeddings if self.relative_attention else None
463
+ if rel_embeddings is not None and ("layer_norm" in self.norm_rel_ebd):
464
+ rel_embeddings = self.LayerNorm(rel_embeddings)
465
+ return rel_embeddings
466
+
467
+ def get_attention_mask(self, attention_mask):
468
+ if len(shape_list(attention_mask)) <= 2:
469
+ extended_attention_mask = tf.expand_dims(tf.expand_dims(attention_mask, 1), 2)
470
+ attention_mask = extended_attention_mask * tf.expand_dims(tf.squeeze(extended_attention_mask, -2), -1)
471
+ attention_mask = tf.cast(attention_mask, tf.uint8)
472
+ elif len(shape_list(attention_mask)) == 3:
473
+ attention_mask = tf.expand_dims(attention_mask, 1)
474
+
475
+ return attention_mask
476
+
477
+ def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None):
478
+ if self.relative_attention and relative_pos is None:
479
+ q = shape_list(query_states)[-2] if query_states is not None else shape_list(hidden_states)[-2]
480
+ relative_pos = build_relative_position(
481
+ q,
482
+ shape_list(hidden_states)[-2],
483
+ bucket_size=self.position_buckets,
484
+ max_position=self.max_relative_positions,
485
+ )
486
+ return relative_pos
487
+
488
+ def call(
489
+ self,
490
+ hidden_states: tf.Tensor,
491
+ attention_mask: tf.Tensor,
492
+ query_states: tf.Tensor = None,
493
+ relative_pos: tf.Tensor = None,
494
+ output_attentions: bool = False,
495
+ output_hidden_states: bool = False,
496
+ return_dict: bool = True,
497
+ training: bool = False,
498
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
499
+ if len(shape_list(attention_mask)) <= 2:
500
+ input_mask = attention_mask
501
+ else:
502
+ input_mask = tf.cast(tf.math.reduce_sum(attention_mask, axis=-2) > 0, dtype=tf.uint8)
503
+
504
+ all_hidden_states = () if output_hidden_states else None
505
+ all_attentions = () if output_attentions else None
506
+
507
+ attention_mask = self.get_attention_mask(attention_mask)
508
+ relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos)
509
+
510
+ next_kv = hidden_states
511
+
512
+ rel_embeddings = self.get_rel_embedding()
513
+ output_states = next_kv
514
+ for i, layer_module in enumerate(self.layer):
515
+ if output_hidden_states:
516
+ all_hidden_states = all_hidden_states + (output_states,)
517
+
518
+ layer_outputs = layer_module(
519
+ hidden_states=next_kv,
520
+ attention_mask=attention_mask,
521
+ query_states=query_states,
522
+ relative_pos=relative_pos,
523
+ rel_embeddings=rel_embeddings,
524
+ output_attentions=output_attentions,
525
+ training=training,
526
+ )
527
+ output_states = layer_outputs[0]
528
+
529
+ if i == 0 and self.conv is not None:
530
+ output_states = self.conv(hidden_states, output_states, input_mask)
531
+
532
+ next_kv = output_states
533
+
534
+ if output_attentions:
535
+ all_attentions = all_attentions + (layer_outputs[1],)
536
+
537
+ # Add last layer
538
+ if output_hidden_states:
539
+ all_hidden_states = all_hidden_states + (output_states,)
540
+
541
+ if not return_dict:
542
+ return tuple(v for v in [output_states, all_hidden_states, all_attentions] if v is not None)
543
+
544
+ return TFBaseModelOutput(
545
+ last_hidden_state=output_states, hidden_states=all_hidden_states, attentions=all_attentions
546
+ )
547
+
548
+
549
+ def make_log_bucket_position(relative_pos, bucket_size, max_position):
550
+ sign = tf.math.sign(relative_pos)
551
+ mid = bucket_size // 2
552
+ abs_pos = tf.where((relative_pos < mid) & (relative_pos > -mid), mid - 1, tf.math.abs(relative_pos))
553
+ log_pos = (
554
+ tf.math.ceil(
555
+ tf.cast(tf.math.log(abs_pos / mid), tf.float32) / tf.math.log((max_position - 1) / mid) * (mid - 1)
556
+ )
557
+ + mid
558
+ )
559
+ bucket_pos = tf.cast(
560
+ tf.where(abs_pos <= mid, tf.cast(relative_pos, tf.float32), log_pos * tf.cast(sign, tf.float32)), tf.int32
561
+ )
562
+ return bucket_pos
563
+
564
+
565
+ def build_relative_position(query_size, key_size, bucket_size=-1, max_position=-1):
566
+ """
567
+ Build relative position according to the query and key
568
+
569
+ We assume the absolute position of query \\(P_q\\) is range from (0, query_size) and the absolute position of key
570
+ \\(P_k\\) is range from (0, key_size), The relative positions from query to key is \\(R_{q \\rightarrow k} = P_q -
571
+ P_k\\)
572
+
573
+ Args:
574
+ query_size (int): the length of query
575
+ key_size (int): the length of key
576
+ bucket_size (int): the size of position bucket
577
+ max_position (int): the maximum allowed absolute position
578
+
579
+ Return:
580
+ `tf.Tensor`: A tensor with shape [1, query_size, key_size]
581
+
582
+ """
583
+ q_ids = tf.range(query_size, dtype=tf.int32)
584
+ k_ids = tf.range(key_size, dtype=tf.int32)
585
+ rel_pos_ids = q_ids[:, None] - tf.tile(tf.expand_dims(k_ids, axis=0), [shape_list(q_ids)[0], 1])
586
+ if bucket_size > 0 and max_position > 0:
587
+ rel_pos_ids = make_log_bucket_position(rel_pos_ids, bucket_size, max_position)
588
+ rel_pos_ids = rel_pos_ids[:query_size, :]
589
+ rel_pos_ids = tf.expand_dims(rel_pos_ids, axis=0)
590
+ return tf.cast(rel_pos_ids, tf.int64)
591
+
592
+
593
+ def c2p_dynamic_expand(c2p_pos, query_layer, relative_pos):
594
+ shapes = [
595
+ shape_list(query_layer)[0],
596
+ shape_list(query_layer)[1],
597
+ shape_list(query_layer)[2],
598
+ shape_list(relative_pos)[-1],
599
+ ]
600
+ return tf.broadcast_to(c2p_pos, shapes)
601
+
602
+
603
+ def p2c_dynamic_expand(c2p_pos, query_layer, key_layer):
604
+ shapes = [
605
+ shape_list(query_layer)[0],
606
+ shape_list(query_layer)[1],
607
+ shape_list(key_layer)[-2],
608
+ shape_list(key_layer)[-2],
609
+ ]
610
+ return tf.broadcast_to(c2p_pos, shapes)
611
+
612
+
613
+ def pos_dynamic_expand(pos_index, p2c_att, key_layer):
614
+ shapes = shape_list(p2c_att)[:2] + [shape_list(pos_index)[-2], shape_list(key_layer)[-2]]
615
+ return tf.broadcast_to(pos_index, shapes)
616
+
617
+
618
+ def take_along_axis(x, indices):
619
+ # Only a valid port of np.take_along_axis when the gather axis is -1
620
+
621
+ # TPU + gathers and reshapes don't go along well -- see https://github.com/huggingface/transformers/issues/18239
622
+ if isinstance(tf.distribute.get_strategy(), tf.distribute.TPUStrategy):
623
+ # [B, S, P] -> [B, S, P, D]
624
+ one_hot_indices = tf.one_hot(indices, depth=x.shape[-1], dtype=x.dtype)
625
+
626
+ # if we ignore the first two dims, this is equivalent to multiplying a matrix (one hot) by a vector (x)
627
+ # grossly abusing notation: [B, S, P, D] . [B, S, D] = [B, S, P]
628
+ gathered = tf.einsum("ijkl,ijl->ijk", one_hot_indices, x)
629
+
630
+ # GPUs, on the other hand, prefer gathers instead of large one-hot+matmuls
631
+ else:
632
+ gathered = tf.gather(x, indices, batch_dims=2)
633
+
634
+ return gathered
635
+
636
+
637
+ class TFDebertaV2DisentangledSelfAttention(tf.keras.layers.Layer):
638
+ """
639
+ Disentangled self-attention module
640
+
641
+ Parameters:
642
+ config (`DebertaV2Config`):
643
+ A model config class instance with the configuration to build a new model. The schema is similar to
644
+ *BertConfig*, for more details, please refer [`DebertaV2Config`]
645
+
646
+ """
647
+
648
+ def __init__(self, config: DebertaV2Config, **kwargs):
649
+ super().__init__(**kwargs)
650
+ if config.hidden_size % config.num_attention_heads != 0:
651
+ raise ValueError(
652
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
653
+ f"heads ({config.num_attention_heads})"
654
+ )
655
+ self.num_attention_heads = config.num_attention_heads
656
+ _attention_head_size = config.hidden_size // config.num_attention_heads
657
+ self.attention_head_size = getattr(config, "attention_head_size", _attention_head_size)
658
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
659
+ self.query_proj = tf.keras.layers.Dense(
660
+ self.all_head_size,
661
+ kernel_initializer=get_initializer(config.initializer_range),
662
+ name="query_proj",
663
+ use_bias=True,
664
+ )
665
+ self.key_proj = tf.keras.layers.Dense(
666
+ self.all_head_size,
667
+ kernel_initializer=get_initializer(config.initializer_range),
668
+ name="key_proj",
669
+ use_bias=True,
670
+ )
671
+ self.value_proj = tf.keras.layers.Dense(
672
+ self.all_head_size,
673
+ kernel_initializer=get_initializer(config.initializer_range),
674
+ name="value_proj",
675
+ use_bias=True,
676
+ )
677
+
678
+ self.share_att_key = getattr(config, "share_att_key", False)
679
+ self.pos_att_type = config.pos_att_type if config.pos_att_type is not None else []
680
+ self.relative_attention = getattr(config, "relative_attention", False)
681
+
682
+ if self.relative_attention:
683
+ self.position_buckets = getattr(config, "position_buckets", -1)
684
+ self.max_relative_positions = getattr(config, "max_relative_positions", -1)
685
+ if self.max_relative_positions < 1:
686
+ self.max_relative_positions = config.max_position_embeddings
687
+ self.pos_ebd_size = self.max_relative_positions
688
+ if self.position_buckets > 0:
689
+ self.pos_ebd_size = self.position_buckets
690
+
691
+ self.pos_dropout = TFDebertaV2StableDropout(config.hidden_dropout_prob, name="pos_dropout")
692
+
693
+ if not self.share_att_key:
694
+ if "c2p" in self.pos_att_type:
695
+ self.pos_key_proj = tf.keras.layers.Dense(
696
+ self.all_head_size,
697
+ kernel_initializer=get_initializer(config.initializer_range),
698
+ name="pos_proj",
699
+ use_bias=True,
700
+ )
701
+ if "p2c" in self.pos_att_type:
702
+ self.pos_query_proj = tf.keras.layers.Dense(
703
+ self.all_head_size,
704
+ kernel_initializer=get_initializer(config.initializer_range),
705
+ name="pos_q_proj",
706
+ )
707
+ self.softmax = TFDebertaV2XSoftmax(axis=-1)
708
+ self.dropout = TFDebertaV2StableDropout(config.attention_probs_dropout_prob, name="dropout")
709
+ self.config = config
710
+
711
+ def transpose_for_scores(self, tensor: tf.Tensor, attention_heads: int) -> tf.Tensor:
712
+ tensor_shape = shape_list(tensor)
713
+ # In graph mode mode, we can't reshape with -1 as the final dimension if the first dimension (batch size) is None
714
+ shape = tensor_shape[:-1] + [attention_heads, tensor_shape[-1] // attention_heads]
715
+ # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
716
+ tensor = tf.reshape(tensor=tensor, shape=shape)
717
+ tensor = tf.transpose(tensor, perm=[0, 2, 1, 3])
718
+ x_shape = shape_list(tensor)
719
+ tensor = tf.reshape(tensor, shape=[-1, x_shape[-2], x_shape[-1]])
720
+ return tensor
721
+
722
+ def call(
723
+ self,
724
+ hidden_states: tf.Tensor,
725
+ attention_mask: tf.Tensor,
726
+ query_states: tf.Tensor = None,
727
+ relative_pos: tf.Tensor = None,
728
+ rel_embeddings: tf.Tensor = None,
729
+ output_attentions: bool = False,
730
+ training: bool = False,
731
+ ) -> Tuple[tf.Tensor]:
732
+ """
733
+ Call the module
734
+
735
+ Args:
736
+ hidden_states (`tf.Tensor`):
737
+ Input states to the module usually the output from previous layer, it will be the Q,K and V in
738
+ *Attention(Q,K,V)*
739
+
740
+ attention_mask (`tf.Tensor`):
741
+ An attention mask matrix of shape [*B*, *N*, *N*] where *B* is the batch size, *N* is the maximum
742
+ sequence length in which element [i,j] = *1* means the *i* th token in the input can attend to the *j*
743
+ th token.
744
+
745
+ return_att (`bool`, optional):
746
+ Whether return the attention matrix.
747
+
748
+ query_states (`tf.Tensor`, optional):
749
+ The *Q* state in *Attention(Q,K,V)*.
750
+
751
+ relative_pos (`tf.Tensor`):
752
+ The relative position encoding between the tokens in the sequence. It's of shape [*B*, *N*, *N*] with
753
+ values ranging in [*-max_relative_positions*, *max_relative_positions*].
754
+
755
+ rel_embeddings (`tf.Tensor`):
756
+ The embedding of relative distances. It's a tensor of shape [\\(2 \\times
757
+ \\text{max_relative_positions}\\), *hidden_size*].
758
+
759
+
760
+ """
761
+ if query_states is None:
762
+ query_states = hidden_states
763
+ query_layer = self.transpose_for_scores(self.query_proj(query_states), self.num_attention_heads)
764
+ key_layer = self.transpose_for_scores(self.key_proj(hidden_states), self.num_attention_heads)
765
+ value_layer = self.transpose_for_scores(self.value_proj(hidden_states), self.num_attention_heads)
766
+
767
+ rel_att = None
768
+ # Take the dot product between "query" and "key" to get the raw attention scores.
769
+ scale_factor = 1
770
+ if "c2p" in self.pos_att_type:
771
+ scale_factor += 1
772
+ if "p2c" in self.pos_att_type:
773
+ scale_factor += 1
774
+ scale = tf.math.sqrt(tf.cast(shape_list(query_layer)[-1] * scale_factor, tf.float32))
775
+ attention_scores = tf.matmul(query_layer, tf.transpose(key_layer, [0, 2, 1]) / scale)
776
+ if self.relative_attention:
777
+ rel_embeddings = self.pos_dropout(rel_embeddings)
778
+ rel_att = self.disentangled_att_bias(query_layer, key_layer, relative_pos, rel_embeddings, scale_factor)
779
+
780
+ if rel_att is not None:
781
+ attention_scores = attention_scores + rel_att
782
+ attention_scores = tf.reshape(
783
+ attention_scores,
784
+ (-1, self.num_attention_heads, shape_list(attention_scores)[-2], shape_list(attention_scores)[-1]),
785
+ )
786
+
787
+ # bsz x height x length x dimension
788
+ attention_probs = self.softmax(attention_scores, attention_mask)
789
+ attention_probs = self.dropout(attention_probs, training=training)
790
+ context_layer = tf.matmul(
791
+ tf.reshape(attention_probs, [-1, shape_list(attention_probs)[-2], shape_list(attention_probs)[-1]]),
792
+ value_layer,
793
+ )
794
+ context_layer = tf.transpose(
795
+ tf.reshape(
796
+ context_layer,
797
+ [-1, self.num_attention_heads, shape_list(context_layer)[-2], shape_list(context_layer)[-1]],
798
+ ),
799
+ [0, 2, 1, 3],
800
+ )
801
+ # Set the final dimension here explicitly.
802
+ # Calling tf.reshape(context_layer, (*context_layer_shape[:-2], -1)) raises an error when executing
803
+ # the model in graph mode as context_layer is reshaped to (None, 7, None) and Dense layer in TFDebertaV2SelfOutput
804
+ # requires final input dimension to be defined
805
+ context_layer_shape = shape_list(context_layer)
806
+ new_context_layer_shape = context_layer_shape[:-2] + [context_layer_shape[-2] * context_layer_shape[-1]]
807
+ context_layer = tf.reshape(context_layer, new_context_layer_shape)
808
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
809
+ return outputs
810
+
811
+ def disentangled_att_bias(self, query_layer, key_layer, relative_pos, rel_embeddings, scale_factor):
812
+ if relative_pos is None:
813
+ q = shape_list(query_layer)[-2]
814
+ relative_pos = build_relative_position(
815
+ q,
816
+ shape_list(key_layer)[-2],
817
+ bucket_size=self.position_buckets,
818
+ max_position=self.max_relative_positions,
819
+ )
820
+ shape_list_pos = shape_list(relative_pos)
821
+ if len(shape_list_pos) == 2:
822
+ relative_pos = tf.expand_dims(tf.expand_dims(relative_pos, 0), 0)
823
+ elif len(shape_list_pos) == 3:
824
+ relative_pos = tf.expand_dims(relative_pos, 1)
825
+ # bsz x height x query x key
826
+ elif len(shape_list_pos) != 4:
827
+ raise ValueError(f"Relative position ids must be of dim 2 or 3 or 4. {len(shape_list_pos)}")
828
+
829
+ att_span = self.pos_ebd_size
830
+ rel_embeddings = tf.expand_dims(
831
+ rel_embeddings[self.pos_ebd_size - att_span : self.pos_ebd_size + att_span, :], 0
832
+ )
833
+ if self.share_att_key:
834
+ pos_query_layer = tf.tile(
835
+ self.transpose_for_scores(self.query_proj(rel_embeddings), self.num_attention_heads),
836
+ [shape_list(query_layer)[0] // self.num_attention_heads, 1, 1],
837
+ )
838
+ pos_key_layer = tf.tile(
839
+ self.transpose_for_scores(self.key_proj(rel_embeddings), self.num_attention_heads),
840
+ [shape_list(query_layer)[0] // self.num_attention_heads, 1, 1],
841
+ )
842
+ else:
843
+ if "c2p" in self.pos_att_type:
844
+ pos_key_layer = tf.tile(
845
+ self.transpose_for_scores(self.pos_key_proj(rel_embeddings), self.num_attention_heads),
846
+ [shape_list(query_layer)[0] // self.num_attention_heads, 1, 1],
847
+ ) # .split(self.all_head_size, dim=-1)
848
+ if "p2c" in self.pos_att_type:
849
+ pos_query_layer = tf.tile(
850
+ self.transpose_for_scores(self.pos_query_proj(rel_embeddings), self.num_attention_heads),
851
+ [shape_list(query_layer)[0] // self.num_attention_heads, 1, 1],
852
+ ) # .split(self.all_head_size, dim=-1)
853
+
854
+ score = 0
855
+ # content->position
856
+ if "c2p" in self.pos_att_type:
857
+ scale = tf.math.sqrt(tf.cast(shape_list(pos_key_layer)[-1] * scale_factor, tf.float32))
858
+ c2p_att = tf.matmul(query_layer, tf.transpose(pos_key_layer, [0, 2, 1]))
859
+ c2p_pos = tf.clip_by_value(relative_pos + att_span, 0, att_span * 2 - 1)
860
+ c2p_att = take_along_axis(
861
+ c2p_att,
862
+ tf.broadcast_to(
863
+ tf.squeeze(c2p_pos, 0),
864
+ [shape_list(query_layer)[0], shape_list(query_layer)[1], shape_list(relative_pos)[-1]],
865
+ ),
866
+ )
867
+ score += c2p_att / scale
868
+
869
+ # position->content
870
+ if "p2c" in self.pos_att_type:
871
+ scale = tf.math.sqrt(tf.cast(shape_list(pos_query_layer)[-1] * scale_factor, tf.float32))
872
+ if shape_list(key_layer)[-2] != shape_list(query_layer)[-2]:
873
+ r_pos = build_relative_position(
874
+ shape_list(key_layer)[-2],
875
+ shape_list(key_layer)[-2],
876
+ bucket_size=self.position_buckets,
877
+ max_position=self.max_relative_positions,
878
+ )
879
+ r_pos = tf.expand_dims(r_pos, 0)
880
+ else:
881
+ r_pos = relative_pos
882
+
883
+ p2c_pos = tf.clip_by_value(-r_pos + att_span, 0, att_span * 2 - 1)
884
+
885
+ p2c_att = tf.matmul(key_layer, tf.transpose(pos_query_layer, [0, 2, 1]))
886
+ p2c_att = tf.transpose(
887
+ take_along_axis(
888
+ p2c_att,
889
+ tf.broadcast_to(
890
+ tf.squeeze(p2c_pos, 0),
891
+ [shape_list(query_layer)[0], shape_list(key_layer)[-2], shape_list(key_layer)[-2]],
892
+ ),
893
+ ),
894
+ [0, 2, 1],
895
+ )
896
+ score += p2c_att / scale
897
+
898
+ return score
899
+
900
+ def build(self, input_shape=None):
901
+ if self.built:
902
+ return
903
+ self.built = True
904
+ if getattr(self, "query_proj", None) is not None:
905
+ with tf.name_scope(self.query_proj.name):
906
+ self.query_proj.build([None, None, self.config.hidden_size])
907
+ if getattr(self, "key_proj", None) is not None:
908
+ with tf.name_scope(self.key_proj.name):
909
+ self.key_proj.build([None, None, self.config.hidden_size])
910
+ if getattr(self, "value_proj", None) is not None:
911
+ with tf.name_scope(self.value_proj.name):
912
+ self.value_proj.build([None, None, self.config.hidden_size])
913
+ if getattr(self, "dropout", None) is not None:
914
+ with tf.name_scope(self.dropout.name):
915
+ self.dropout.build(None)
916
+ if getattr(self, "pos_dropout", None) is not None:
917
+ with tf.name_scope(self.pos_dropout.name):
918
+ self.pos_dropout.build(None)
919
+ if getattr(self, "pos_key_proj", None) is not None:
920
+ with tf.name_scope(self.pos_key_proj.name):
921
+ self.pos_key_proj.build([None, None, self.config.hidden_size])
922
+ if getattr(self, "pos_query_proj", None) is not None:
923
+ with tf.name_scope(self.pos_query_proj.name):
924
+ self.pos_query_proj.build([None, None, self.config.hidden_size])
925
+
926
+
927
+ # Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaEmbeddings Deberta->DebertaV2
928
+ class TFDebertaV2Embeddings(tf.keras.layers.Layer):
929
+ """Construct the embeddings from word, position and token_type embeddings."""
930
+
931
+ def __init__(self, config, **kwargs):
932
+ super().__init__(**kwargs)
933
+
934
+ self.config = config
935
+ self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
936
+ self.hidden_size = config.hidden_size
937
+ self.max_position_embeddings = config.max_position_embeddings
938
+ self.position_biased_input = getattr(config, "position_biased_input", True)
939
+ self.initializer_range = config.initializer_range
940
+ if self.embedding_size != config.hidden_size:
941
+ self.embed_proj = tf.keras.layers.Dense(
942
+ config.hidden_size,
943
+ kernel_initializer=get_initializer(config.initializer_range),
944
+ name="embed_proj",
945
+ use_bias=False,
946
+ )
947
+ self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
948
+ self.dropout = TFDebertaV2StableDropout(config.hidden_dropout_prob, name="dropout")
949
+
950
+ def build(self, input_shape=None):
951
+ with tf.name_scope("word_embeddings"):
952
+ self.weight = self.add_weight(
953
+ name="weight",
954
+ shape=[self.config.vocab_size, self.embedding_size],
955
+ initializer=get_initializer(self.initializer_range),
956
+ )
957
+
958
+ with tf.name_scope("token_type_embeddings"):
959
+ if self.config.type_vocab_size > 0:
960
+ self.token_type_embeddings = self.add_weight(
961
+ name="embeddings",
962
+ shape=[self.config.type_vocab_size, self.embedding_size],
963
+ initializer=get_initializer(self.initializer_range),
964
+ )
965
+ else:
966
+ self.token_type_embeddings = None
967
+
968
+ with tf.name_scope("position_embeddings"):
969
+ if self.position_biased_input:
970
+ self.position_embeddings = self.add_weight(
971
+ name="embeddings",
972
+ shape=[self.max_position_embeddings, self.hidden_size],
973
+ initializer=get_initializer(self.initializer_range),
974
+ )
975
+ else:
976
+ self.position_embeddings = None
977
+
978
+ if self.built:
979
+ return
980
+ self.built = True
981
+ if getattr(self, "LayerNorm", None) is not None:
982
+ with tf.name_scope(self.LayerNorm.name):
983
+ self.LayerNorm.build([None, None, self.config.hidden_size])
984
+ if getattr(self, "dropout", None) is not None:
985
+ with tf.name_scope(self.dropout.name):
986
+ self.dropout.build(None)
987
+ if getattr(self, "embed_proj", None) is not None:
988
+ with tf.name_scope(self.embed_proj.name):
989
+ self.embed_proj.build([None, None, self.embedding_size])
990
+
991
+ def call(
992
+ self,
993
+ input_ids: tf.Tensor = None,
994
+ position_ids: tf.Tensor = None,
995
+ token_type_ids: tf.Tensor = None,
996
+ inputs_embeds: tf.Tensor = None,
997
+ mask: tf.Tensor = None,
998
+ training: bool = False,
999
+ ) -> tf.Tensor:
1000
+ """
1001
+ Applies embedding based on inputs tensor.
1002
+
1003
+ Returns:
1004
+ final_embeddings (`tf.Tensor`): output embedding tensor.
1005
+ """
1006
+ if input_ids is None and inputs_embeds is None:
1007
+ raise ValueError("Need to provide either `input_ids` or `input_embeds`.")
1008
+
1009
+ if input_ids is not None:
1010
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
1011
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
1012
+
1013
+ input_shape = shape_list(inputs_embeds)[:-1]
1014
+
1015
+ if token_type_ids is None:
1016
+ token_type_ids = tf.fill(dims=input_shape, value=0)
1017
+
1018
+ if position_ids is None:
1019
+ position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)
1020
+
1021
+ final_embeddings = inputs_embeds
1022
+ if self.position_biased_input:
1023
+ position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
1024
+ final_embeddings += position_embeds
1025
+ if self.config.type_vocab_size > 0:
1026
+ token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
1027
+ final_embeddings += token_type_embeds
1028
+
1029
+ if self.embedding_size != self.hidden_size:
1030
+ final_embeddings = self.embed_proj(final_embeddings)
1031
+
1032
+ final_embeddings = self.LayerNorm(final_embeddings)
1033
+
1034
+ if mask is not None:
1035
+ if len(shape_list(mask)) != len(shape_list(final_embeddings)):
1036
+ if len(shape_list(mask)) == 4:
1037
+ mask = tf.squeeze(tf.squeeze(mask, axis=1), axis=1)
1038
+ mask = tf.cast(tf.expand_dims(mask, axis=2), tf.float32)
1039
+
1040
+ final_embeddings = final_embeddings * mask
1041
+
1042
+ final_embeddings = self.dropout(final_embeddings, training=training)
1043
+
1044
+ return final_embeddings
1045
+
1046
+
1047
+ # Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaPredictionHeadTransform with Deberta->DebertaV2
1048
+ class TFDebertaV2PredictionHeadTransform(tf.keras.layers.Layer):
1049
+ def __init__(self, config: DebertaV2Config, **kwargs):
1050
+ super().__init__(**kwargs)
1051
+
1052
+ self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
1053
+
1054
+ self.dense = tf.keras.layers.Dense(
1055
+ units=self.embedding_size,
1056
+ kernel_initializer=get_initializer(config.initializer_range),
1057
+ name="dense",
1058
+ )
1059
+
1060
+ if isinstance(config.hidden_act, str):
1061
+ self.transform_act_fn = get_tf_activation(config.hidden_act)
1062
+ else:
1063
+ self.transform_act_fn = config.hidden_act
1064
+ self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
1065
+ self.config = config
1066
+
1067
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
1068
+ hidden_states = self.dense(inputs=hidden_states)
1069
+ hidden_states = self.transform_act_fn(hidden_states)
1070
+ hidden_states = self.LayerNorm(hidden_states)
1071
+
1072
+ return hidden_states
1073
+
1074
+ def build(self, input_shape=None):
1075
+ if self.built:
1076
+ return
1077
+ self.built = True
1078
+ if getattr(self, "dense", None) is not None:
1079
+ with tf.name_scope(self.dense.name):
1080
+ self.dense.build([None, None, self.config.hidden_size])
1081
+ if getattr(self, "LayerNorm", None) is not None:
1082
+ with tf.name_scope(self.LayerNorm.name):
1083
+ self.LayerNorm.build([None, None, self.embedding_size])
1084
+
1085
+
1086
+ # Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaLMPredictionHead with Deberta->DebertaV2
1087
+ class TFDebertaV2LMPredictionHead(tf.keras.layers.Layer):
1088
+ def __init__(self, config: DebertaV2Config, input_embeddings: tf.keras.layers.Layer, **kwargs):
1089
+ super().__init__(**kwargs)
1090
+
1091
+ self.config = config
1092
+ self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
1093
+
1094
+ self.transform = TFDebertaV2PredictionHeadTransform(config, name="transform")
1095
+
1096
+ # The output weights are the same as the input embeddings, but there is
1097
+ # an output-only bias for each token.
1098
+ self.input_embeddings = input_embeddings
1099
+
1100
+ def build(self, input_shape=None):
1101
+ self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
1102
+
1103
+ if self.built:
1104
+ return
1105
+ self.built = True
1106
+ if getattr(self, "transform", None) is not None:
1107
+ with tf.name_scope(self.transform.name):
1108
+ self.transform.build(None)
1109
+
1110
+ def get_output_embeddings(self) -> tf.keras.layers.Layer:
1111
+ return self.input_embeddings
1112
+
1113
+ def set_output_embeddings(self, value: tf.Variable):
1114
+ self.input_embeddings.weight = value
1115
+ self.input_embeddings.vocab_size = shape_list(value)[0]
1116
+
1117
+ def get_bias(self) -> Dict[str, tf.Variable]:
1118
+ return {"bias": self.bias}
1119
+
1120
+ def set_bias(self, value: tf.Variable):
1121
+ self.bias = value["bias"]
1122
+ self.config.vocab_size = shape_list(value["bias"])[0]
1123
+
1124
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
1125
+ hidden_states = self.transform(hidden_states=hidden_states)
1126
+ seq_length = shape_list(hidden_states)[1]
1127
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.embedding_size])
1128
+ hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
1129
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
1130
+ hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
1131
+
1132
+ return hidden_states
1133
+
1134
+
1135
+ # Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaOnlyMLMHead with Deberta->DebertaV2
1136
+ class TFDebertaV2OnlyMLMHead(tf.keras.layers.Layer):
1137
+ def __init__(self, config: DebertaV2Config, input_embeddings: tf.keras.layers.Layer, **kwargs):
1138
+ super().__init__(**kwargs)
1139
+ self.predictions = TFDebertaV2LMPredictionHead(config, input_embeddings, name="predictions")
1140
+
1141
+ def call(self, sequence_output: tf.Tensor) -> tf.Tensor:
1142
+ prediction_scores = self.predictions(hidden_states=sequence_output)
1143
+
1144
+ return prediction_scores
1145
+
1146
+ def build(self, input_shape=None):
1147
+ if self.built:
1148
+ return
1149
+ self.built = True
1150
+ if getattr(self, "predictions", None) is not None:
1151
+ with tf.name_scope(self.predictions.name):
1152
+ self.predictions.build(None)
1153
+
1154
+
1155
+ # Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaMainLayer with Deberta->DebertaV2
1156
+ class TFDebertaV2MainLayer(tf.keras.layers.Layer):
1157
+ config_class = DebertaV2Config
1158
+
1159
+ def __init__(self, config: DebertaV2Config, **kwargs):
1160
+ super().__init__(**kwargs)
1161
+
1162
+ self.config = config
1163
+
1164
+ self.embeddings = TFDebertaV2Embeddings(config, name="embeddings")
1165
+ self.encoder = TFDebertaV2Encoder(config, name="encoder")
1166
+
1167
+ def get_input_embeddings(self) -> tf.keras.layers.Layer:
1168
+ return self.embeddings
1169
+
1170
+ def set_input_embeddings(self, value: tf.Variable):
1171
+ self.embeddings.weight = value
1172
+ self.embeddings.vocab_size = shape_list(value)[0]
1173
+
1174
+ def _prune_heads(self, heads_to_prune):
1175
+ """
1176
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
1177
+ class PreTrainedModel
1178
+ """
1179
+ raise NotImplementedError
1180
+
1181
+ @unpack_inputs
1182
+ def call(
1183
+ self,
1184
+ input_ids: TFModelInputType | None = None,
1185
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1186
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1187
+ position_ids: np.ndarray | tf.Tensor | None = None,
1188
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1189
+ output_attentions: Optional[bool] = None,
1190
+ output_hidden_states: Optional[bool] = None,
1191
+ return_dict: Optional[bool] = None,
1192
+ training: bool = False,
1193
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
1194
+ if input_ids is not None and inputs_embeds is not None:
1195
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
1196
+ elif input_ids is not None:
1197
+ input_shape = shape_list(input_ids)
1198
+ elif inputs_embeds is not None:
1199
+ input_shape = shape_list(inputs_embeds)[:-1]
1200
+ else:
1201
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
1202
+
1203
+ if attention_mask is None:
1204
+ attention_mask = tf.fill(dims=input_shape, value=1)
1205
+
1206
+ if token_type_ids is None:
1207
+ token_type_ids = tf.fill(dims=input_shape, value=0)
1208
+
1209
+ embedding_output = self.embeddings(
1210
+ input_ids=input_ids,
1211
+ position_ids=position_ids,
1212
+ token_type_ids=token_type_ids,
1213
+ inputs_embeds=inputs_embeds,
1214
+ mask=attention_mask,
1215
+ training=training,
1216
+ )
1217
+
1218
+ encoder_outputs = self.encoder(
1219
+ hidden_states=embedding_output,
1220
+ attention_mask=attention_mask,
1221
+ output_attentions=output_attentions,
1222
+ output_hidden_states=output_hidden_states,
1223
+ return_dict=return_dict,
1224
+ training=training,
1225
+ )
1226
+
1227
+ sequence_output = encoder_outputs[0]
1228
+
1229
+ if not return_dict:
1230
+ return (sequence_output,) + encoder_outputs[1:]
1231
+
1232
+ return TFBaseModelOutput(
1233
+ last_hidden_state=sequence_output,
1234
+ hidden_states=encoder_outputs.hidden_states,
1235
+ attentions=encoder_outputs.attentions,
1236
+ )
1237
+
1238
+ def build(self, input_shape=None):
1239
+ if self.built:
1240
+ return
1241
+ self.built = True
1242
+ if getattr(self, "embeddings", None) is not None:
1243
+ with tf.name_scope(self.embeddings.name):
1244
+ self.embeddings.build(None)
1245
+ if getattr(self, "encoder", None) is not None:
1246
+ with tf.name_scope(self.encoder.name):
1247
+ self.encoder.build(None)
1248
+
1249
+
1250
+ # Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaPreTrainedModel with Deberta->DebertaV2
1251
+ class TFDebertaV2PreTrainedModel(TFPreTrainedModel):
1252
+ """
1253
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
1254
+ models.
1255
+ """
1256
+
1257
+ config_class = DebertaV2Config
1258
+ base_model_prefix = "deberta"
1259
+
1260
+
1261
+ DEBERTA_START_DOCSTRING = r"""
1262
+ The DeBERTa model was proposed in [DeBERTa: Decoding-enhanced BERT with Disentangled
1263
+ Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It's build
1264
+ on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two
1265
+ improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.
1266
+
1267
+ This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
1268
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
1269
+ behavior.
1270
+
1271
+ <Tip>
1272
+
1273
+ TensorFlow models and layers in `transformers` accept two formats as input:
1274
+
1275
+ - having all inputs as keyword arguments (like PyTorch models), or
1276
+ - having all inputs as a list, tuple or dict in the first positional argument.
1277
+
1278
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
1279
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
1280
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
1281
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
1282
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
1283
+ positional argument:
1284
+
1285
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
1286
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
1287
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
1288
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
1289
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
1290
+
1291
+ Note that when creating models and layers with
1292
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
1293
+ about any of this, as you can just pass inputs like you would to any other Python function!
1294
+
1295
+ </Tip>
1296
+
1297
+ Parameters:
1298
+ config ([`DebertaV2Config`]): Model configuration class with all the parameters of the model.
1299
+ Initializing with a config file does not load the weights associated with the model, only the
1300
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
1301
+ """
1302
+
1303
+ DEBERTA_INPUTS_DOCSTRING = r"""
1304
+ Args:
1305
+ input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):
1306
+ Indices of input sequence tokens in the vocabulary.
1307
+
1308
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1309
+ [`PreTrainedTokenizer.__call__`] for details.
1310
+
1311
+ [What are input IDs?](../glossary#input-ids)
1312
+ attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
1313
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1314
+
1315
+ - 1 for tokens that are **not masked**,
1316
+ - 0 for tokens that are **masked**.
1317
+
1318
+ [What are attention masks?](../glossary#attention-mask)
1319
+ token_type_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
1320
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1321
+ 1]`:
1322
+
1323
+ - 0 corresponds to a *sentence A* token,
1324
+ - 1 corresponds to a *sentence B* token.
1325
+
1326
+ [What are token type IDs?](../glossary#token-type-ids)
1327
+ position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
1328
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
1329
+ config.max_position_embeddings - 1]`.
1330
+
1331
+ [What are position IDs?](../glossary#position-ids)
1332
+ inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
1333
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1334
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
1335
+ model's internal embedding lookup matrix.
1336
+ output_attentions (`bool`, *optional*):
1337
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1338
+ tensors for more detail.
1339
+ output_hidden_states (`bool`, *optional*):
1340
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1341
+ more detail.
1342
+ return_dict (`bool`, *optional*):
1343
+ Whether or not to return a [`~utils.ModelOutput``] instead of a plain tuple.
1344
+ """
1345
+
1346
+
1347
+ @add_start_docstrings(
1348
+ "The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top.",
1349
+ DEBERTA_START_DOCSTRING,
1350
+ )
1351
+ # Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaModel with Deberta->DebertaV2
1352
+ class TFDebertaV2Model(TFDebertaV2PreTrainedModel):
1353
+ def __init__(self, config: DebertaV2Config, *inputs, **kwargs):
1354
+ super().__init__(config, *inputs, **kwargs)
1355
+
1356
+ self.deberta = TFDebertaV2MainLayer(config, name="deberta")
1357
+
1358
+ @unpack_inputs
1359
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1360
+ @add_code_sample_docstrings(
1361
+ checkpoint=_CHECKPOINT_FOR_DOC,
1362
+ output_type=TFBaseModelOutput,
1363
+ config_class=_CONFIG_FOR_DOC,
1364
+ )
1365
+ def call(
1366
+ self,
1367
+ input_ids: TFModelInputType | None = None,
1368
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1369
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1370
+ position_ids: np.ndarray | tf.Tensor | None = None,
1371
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1372
+ output_attentions: Optional[bool] = None,
1373
+ output_hidden_states: Optional[bool] = None,
1374
+ return_dict: Optional[bool] = None,
1375
+ training: Optional[bool] = False,
1376
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
1377
+ outputs = self.deberta(
1378
+ input_ids=input_ids,
1379
+ attention_mask=attention_mask,
1380
+ token_type_ids=token_type_ids,
1381
+ position_ids=position_ids,
1382
+ inputs_embeds=inputs_embeds,
1383
+ output_attentions=output_attentions,
1384
+ output_hidden_states=output_hidden_states,
1385
+ return_dict=return_dict,
1386
+ training=training,
1387
+ )
1388
+
1389
+ return outputs
1390
+
1391
+ def build(self, input_shape=None):
1392
+ if self.built:
1393
+ return
1394
+ self.built = True
1395
+ if getattr(self, "deberta", None) is not None:
1396
+ with tf.name_scope(self.deberta.name):
1397
+ self.deberta.build(None)
1398
+
1399
+
1400
+ @add_start_docstrings("""DeBERTa Model with a `language modeling` head on top.""", DEBERTA_START_DOCSTRING)
1401
+ # Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaForMaskedLM with Deberta->DebertaV2
1402
+ class TFDebertaV2ForMaskedLM(TFDebertaV2PreTrainedModel, TFMaskedLanguageModelingLoss):
1403
+ def __init__(self, config: DebertaV2Config, *inputs, **kwargs):
1404
+ super().__init__(config, *inputs, **kwargs)
1405
+
1406
+ if config.is_decoder:
1407
+ logger.warning(
1408
+ "If you want to use `TFDebertaV2ForMaskedLM` make sure `config.is_decoder=False` for "
1409
+ "bi-directional self-attention."
1410
+ )
1411
+
1412
+ self.deberta = TFDebertaV2MainLayer(config, name="deberta")
1413
+ self.mlm = TFDebertaV2OnlyMLMHead(config, input_embeddings=self.deberta.embeddings, name="cls")
1414
+
1415
+ def get_lm_head(self) -> tf.keras.layers.Layer:
1416
+ return self.mlm.predictions
1417
+
1418
+ @unpack_inputs
1419
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1420
+ @add_code_sample_docstrings(
1421
+ checkpoint=_CHECKPOINT_FOR_DOC,
1422
+ output_type=TFMaskedLMOutput,
1423
+ config_class=_CONFIG_FOR_DOC,
1424
+ )
1425
+ def call(
1426
+ self,
1427
+ input_ids: TFModelInputType | None = None,
1428
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1429
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1430
+ position_ids: np.ndarray | tf.Tensor | None = None,
1431
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1432
+ output_attentions: Optional[bool] = None,
1433
+ output_hidden_states: Optional[bool] = None,
1434
+ return_dict: Optional[bool] = None,
1435
+ labels: np.ndarray | tf.Tensor | None = None,
1436
+ training: Optional[bool] = False,
1437
+ ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
1438
+ r"""
1439
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
1440
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1441
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1442
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1443
+ """
1444
+ outputs = self.deberta(
1445
+ input_ids=input_ids,
1446
+ attention_mask=attention_mask,
1447
+ token_type_ids=token_type_ids,
1448
+ position_ids=position_ids,
1449
+ inputs_embeds=inputs_embeds,
1450
+ output_attentions=output_attentions,
1451
+ output_hidden_states=output_hidden_states,
1452
+ return_dict=return_dict,
1453
+ training=training,
1454
+ )
1455
+ sequence_output = outputs[0]
1456
+ prediction_scores = self.mlm(sequence_output=sequence_output, training=training)
1457
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=prediction_scores)
1458
+
1459
+ if not return_dict:
1460
+ output = (prediction_scores,) + outputs[2:]
1461
+ return ((loss,) + output) if loss is not None else output
1462
+
1463
+ return TFMaskedLMOutput(
1464
+ loss=loss,
1465
+ logits=prediction_scores,
1466
+ hidden_states=outputs.hidden_states,
1467
+ attentions=outputs.attentions,
1468
+ )
1469
+
1470
+ def build(self, input_shape=None):
1471
+ if self.built:
1472
+ return
1473
+ self.built = True
1474
+ if getattr(self, "deberta", None) is not None:
1475
+ with tf.name_scope(self.deberta.name):
1476
+ self.deberta.build(None)
1477
+ if getattr(self, "mlm", None) is not None:
1478
+ with tf.name_scope(self.mlm.name):
1479
+ self.mlm.build(None)
1480
+
1481
+
1482
+ @add_start_docstrings(
1483
+ """
1484
+ DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the
1485
+ pooled output) e.g. for GLUE tasks.
1486
+ """,
1487
+ DEBERTA_START_DOCSTRING,
1488
+ )
1489
+ # Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaForSequenceClassification with Deberta->DebertaV2
1490
+ class TFDebertaV2ForSequenceClassification(TFDebertaV2PreTrainedModel, TFSequenceClassificationLoss):
1491
+ def __init__(self, config: DebertaV2Config, *inputs, **kwargs):
1492
+ super().__init__(config, *inputs, **kwargs)
1493
+
1494
+ self.num_labels = config.num_labels
1495
+
1496
+ self.deberta = TFDebertaV2MainLayer(config, name="deberta")
1497
+ self.pooler = TFDebertaV2ContextPooler(config, name="pooler")
1498
+
1499
+ drop_out = getattr(config, "cls_dropout", None)
1500
+ drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out
1501
+ self.dropout = TFDebertaV2StableDropout(drop_out, name="cls_dropout")
1502
+ self.classifier = tf.keras.layers.Dense(
1503
+ units=config.num_labels,
1504
+ kernel_initializer=get_initializer(config.initializer_range),
1505
+ name="classifier",
1506
+ )
1507
+ self.output_dim = self.pooler.output_dim
1508
+
1509
+ @unpack_inputs
1510
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1511
+ @add_code_sample_docstrings(
1512
+ checkpoint=_CHECKPOINT_FOR_DOC,
1513
+ output_type=TFSequenceClassifierOutput,
1514
+ config_class=_CONFIG_FOR_DOC,
1515
+ )
1516
+ def call(
1517
+ self,
1518
+ input_ids: TFModelInputType | None = None,
1519
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1520
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1521
+ position_ids: np.ndarray | tf.Tensor | None = None,
1522
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1523
+ output_attentions: Optional[bool] = None,
1524
+ output_hidden_states: Optional[bool] = None,
1525
+ return_dict: Optional[bool] = None,
1526
+ labels: np.ndarray | tf.Tensor | None = None,
1527
+ training: Optional[bool] = False,
1528
+ ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
1529
+ r"""
1530
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
1531
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1532
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1533
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1534
+ """
1535
+ outputs = self.deberta(
1536
+ input_ids=input_ids,
1537
+ attention_mask=attention_mask,
1538
+ token_type_ids=token_type_ids,
1539
+ position_ids=position_ids,
1540
+ inputs_embeds=inputs_embeds,
1541
+ output_attentions=output_attentions,
1542
+ output_hidden_states=output_hidden_states,
1543
+ return_dict=return_dict,
1544
+ training=training,
1545
+ )
1546
+ sequence_output = outputs[0]
1547
+ pooled_output = self.pooler(sequence_output, training=training)
1548
+ pooled_output = self.dropout(pooled_output, training=training)
1549
+ logits = self.classifier(pooled_output)
1550
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
1551
+
1552
+ if not return_dict:
1553
+ output = (logits,) + outputs[1:]
1554
+
1555
+ return ((loss,) + output) if loss is not None else output
1556
+
1557
+ return TFSequenceClassifierOutput(
1558
+ loss=loss,
1559
+ logits=logits,
1560
+ hidden_states=outputs.hidden_states,
1561
+ attentions=outputs.attentions,
1562
+ )
1563
+
1564
+ def build(self, input_shape=None):
1565
+ if self.built:
1566
+ return
1567
+ self.built = True
1568
+ if getattr(self, "deberta", None) is not None:
1569
+ with tf.name_scope(self.deberta.name):
1570
+ self.deberta.build(None)
1571
+ if getattr(self, "pooler", None) is not None:
1572
+ with tf.name_scope(self.pooler.name):
1573
+ self.pooler.build(None)
1574
+ if getattr(self, "dropout", None) is not None:
1575
+ with tf.name_scope(self.dropout.name):
1576
+ self.dropout.build(None)
1577
+ if getattr(self, "classifier", None) is not None:
1578
+ with tf.name_scope(self.classifier.name):
1579
+ self.classifier.build([None, None, self.output_dim])
1580
+
1581
+
1582
+ @add_start_docstrings(
1583
+ """
1584
+ DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1585
+ Named-Entity-Recognition (NER) tasks.
1586
+ """,
1587
+ DEBERTA_START_DOCSTRING,
1588
+ )
1589
+ # Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaForTokenClassification with Deberta->DebertaV2
1590
+ class TFDebertaV2ForTokenClassification(TFDebertaV2PreTrainedModel, TFTokenClassificationLoss):
1591
+ def __init__(self, config: DebertaV2Config, *inputs, **kwargs):
1592
+ super().__init__(config, *inputs, **kwargs)
1593
+
1594
+ self.num_labels = config.num_labels
1595
+
1596
+ self.deberta = TFDebertaV2MainLayer(config, name="deberta")
1597
+ self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
1598
+ self.classifier = tf.keras.layers.Dense(
1599
+ units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
1600
+ )
1601
+ self.config = config
1602
+
1603
+ @unpack_inputs
1604
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1605
+ @add_code_sample_docstrings(
1606
+ checkpoint=_CHECKPOINT_FOR_DOC,
1607
+ output_type=TFTokenClassifierOutput,
1608
+ config_class=_CONFIG_FOR_DOC,
1609
+ )
1610
+ def call(
1611
+ self,
1612
+ input_ids: TFModelInputType | None = None,
1613
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1614
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1615
+ position_ids: np.ndarray | tf.Tensor | None = None,
1616
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1617
+ output_attentions: Optional[bool] = None,
1618
+ output_hidden_states: Optional[bool] = None,
1619
+ return_dict: Optional[bool] = None,
1620
+ labels: np.ndarray | tf.Tensor | None = None,
1621
+ training: Optional[bool] = False,
1622
+ ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
1623
+ r"""
1624
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
1625
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1626
+ """
1627
+ outputs = self.deberta(
1628
+ input_ids=input_ids,
1629
+ attention_mask=attention_mask,
1630
+ token_type_ids=token_type_ids,
1631
+ position_ids=position_ids,
1632
+ inputs_embeds=inputs_embeds,
1633
+ output_attentions=output_attentions,
1634
+ output_hidden_states=output_hidden_states,
1635
+ return_dict=return_dict,
1636
+ training=training,
1637
+ )
1638
+ sequence_output = outputs[0]
1639
+ sequence_output = self.dropout(sequence_output, training=training)
1640
+ logits = self.classifier(inputs=sequence_output)
1641
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
1642
+
1643
+ if not return_dict:
1644
+ output = (logits,) + outputs[1:]
1645
+ return ((loss,) + output) if loss is not None else output
1646
+
1647
+ return TFTokenClassifierOutput(
1648
+ loss=loss,
1649
+ logits=logits,
1650
+ hidden_states=outputs.hidden_states,
1651
+ attentions=outputs.attentions,
1652
+ )
1653
+
1654
+ def build(self, input_shape=None):
1655
+ if self.built:
1656
+ return
1657
+ self.built = True
1658
+ if getattr(self, "deberta", None) is not None:
1659
+ with tf.name_scope(self.deberta.name):
1660
+ self.deberta.build(None)
1661
+ if getattr(self, "classifier", None) is not None:
1662
+ with tf.name_scope(self.classifier.name):
1663
+ self.classifier.build([None, None, self.config.hidden_size])
1664
+
1665
+
1666
+ @add_start_docstrings(
1667
+ """
1668
+ DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1669
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1670
+ """,
1671
+ DEBERTA_START_DOCSTRING,
1672
+ )
1673
+ # Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaForQuestionAnswering with Deberta->DebertaV2
1674
+ class TFDebertaV2ForQuestionAnswering(TFDebertaV2PreTrainedModel, TFQuestionAnsweringLoss):
1675
+ def __init__(self, config: DebertaV2Config, *inputs, **kwargs):
1676
+ super().__init__(config, *inputs, **kwargs)
1677
+
1678
+ self.num_labels = config.num_labels
1679
+
1680
+ self.deberta = TFDebertaV2MainLayer(config, name="deberta")
1681
+ self.qa_outputs = tf.keras.layers.Dense(
1682
+ units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
1683
+ )
1684
+ self.config = config
1685
+
1686
+ @unpack_inputs
1687
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1688
+ @add_code_sample_docstrings(
1689
+ checkpoint=_CHECKPOINT_FOR_DOC,
1690
+ output_type=TFQuestionAnsweringModelOutput,
1691
+ config_class=_CONFIG_FOR_DOC,
1692
+ )
1693
+ def call(
1694
+ self,
1695
+ input_ids: TFModelInputType | None = None,
1696
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1697
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1698
+ position_ids: np.ndarray | tf.Tensor | None = None,
1699
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1700
+ output_attentions: Optional[bool] = None,
1701
+ output_hidden_states: Optional[bool] = None,
1702
+ return_dict: Optional[bool] = None,
1703
+ start_positions: np.ndarray | tf.Tensor | None = None,
1704
+ end_positions: np.ndarray | tf.Tensor | None = None,
1705
+ training: Optional[bool] = False,
1706
+ ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
1707
+ r"""
1708
+ start_positions (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
1709
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1710
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1711
+ are not taken into account for computing the loss.
1712
+ end_positions (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
1713
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1714
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1715
+ are not taken into account for computing the loss.
1716
+ """
1717
+ outputs = self.deberta(
1718
+ input_ids=input_ids,
1719
+ attention_mask=attention_mask,
1720
+ token_type_ids=token_type_ids,
1721
+ position_ids=position_ids,
1722
+ inputs_embeds=inputs_embeds,
1723
+ output_attentions=output_attentions,
1724
+ output_hidden_states=output_hidden_states,
1725
+ return_dict=return_dict,
1726
+ training=training,
1727
+ )
1728
+ sequence_output = outputs[0]
1729
+ logits = self.qa_outputs(inputs=sequence_output)
1730
+ start_logits, end_logits = tf.split(value=logits, num_or_size_splits=2, axis=-1)
1731
+ start_logits = tf.squeeze(input=start_logits, axis=-1)
1732
+ end_logits = tf.squeeze(input=end_logits, axis=-1)
1733
+ loss = None
1734
+
1735
+ if start_positions is not None and end_positions is not None:
1736
+ labels = {"start_position": start_positions}
1737
+ labels["end_position"] = end_positions
1738
+ loss = self.hf_compute_loss(labels=labels, logits=(start_logits, end_logits))
1739
+
1740
+ if not return_dict:
1741
+ output = (start_logits, end_logits) + outputs[2:]
1742
+ return ((loss,) + output) if loss is not None else output
1743
+
1744
+ return TFQuestionAnsweringModelOutput(
1745
+ loss=loss,
1746
+ start_logits=start_logits,
1747
+ end_logits=end_logits,
1748
+ hidden_states=outputs.hidden_states,
1749
+ attentions=outputs.attentions,
1750
+ )
1751
+
1752
+ def build(self, input_shape=None):
1753
+ if self.built:
1754
+ return
1755
+ self.built = True
1756
+ if getattr(self, "deberta", None) is not None:
1757
+ with tf.name_scope(self.deberta.name):
1758
+ self.deberta.build(None)
1759
+ if getattr(self, "qa_outputs", None) is not None:
1760
+ with tf.name_scope(self.qa_outputs.name):
1761
+ self.qa_outputs.build([None, None, self.config.hidden_size])
1762
+
1763
+
1764
+ @add_start_docstrings(
1765
+ """
1766
+ DeBERTa Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
1767
+ softmax) e.g. for RocStories/SWAG tasks.
1768
+ """,
1769
+ DEBERTA_START_DOCSTRING,
1770
+ )
1771
+ class TFDebertaV2ForMultipleChoice(TFDebertaV2PreTrainedModel, TFMultipleChoiceLoss):
1772
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1773
+ # _keys_to_ignore_on_load_unexpected = [r"mlm___cls", r"nsp___cls", r"cls.predictions", r"cls.seq_relationship"]
1774
+ # _keys_to_ignore_on_load_missing = [r"dropout"]
1775
+
1776
+ def __init__(self, config: DebertaV2Config, *inputs, **kwargs):
1777
+ super().__init__(config, *inputs, **kwargs)
1778
+
1779
+ self.deberta = TFDebertaV2MainLayer(config, name="deberta")
1780
+ self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
1781
+ self.pooler = TFDebertaV2ContextPooler(config, name="pooler")
1782
+ self.classifier = tf.keras.layers.Dense(
1783
+ units=1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
1784
+ )
1785
+ self.output_dim = self.pooler.output_dim
1786
+
1787
+ @unpack_inputs
1788
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
1789
+ @add_code_sample_docstrings(
1790
+ checkpoint=_CHECKPOINT_FOR_DOC,
1791
+ output_type=TFMultipleChoiceModelOutput,
1792
+ config_class=_CONFIG_FOR_DOC,
1793
+ )
1794
+ def call(
1795
+ self,
1796
+ input_ids: TFModelInputType | None = None,
1797
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1798
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1799
+ position_ids: np.ndarray | tf.Tensor | None = None,
1800
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1801
+ output_attentions: Optional[bool] = None,
1802
+ output_hidden_states: Optional[bool] = None,
1803
+ return_dict: Optional[bool] = None,
1804
+ labels: np.ndarray | tf.Tensor | None = None,
1805
+ training: Optional[bool] = False,
1806
+ ) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]:
1807
+ r"""
1808
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
1809
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
1810
+ where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)
1811
+ """
1812
+ if input_ids is not None:
1813
+ num_choices = shape_list(input_ids)[1]
1814
+ seq_length = shape_list(input_ids)[2]
1815
+ else:
1816
+ num_choices = shape_list(inputs_embeds)[1]
1817
+ seq_length = shape_list(inputs_embeds)[2]
1818
+
1819
+ flat_input_ids = tf.reshape(tensor=input_ids, shape=(-1, seq_length)) if input_ids is not None else None
1820
+ flat_attention_mask = (
1821
+ tf.reshape(tensor=attention_mask, shape=(-1, seq_length)) if attention_mask is not None else None
1822
+ )
1823
+ flat_token_type_ids = (
1824
+ tf.reshape(tensor=token_type_ids, shape=(-1, seq_length)) if token_type_ids is not None else None
1825
+ )
1826
+ flat_position_ids = (
1827
+ tf.reshape(tensor=position_ids, shape=(-1, seq_length)) if position_ids is not None else None
1828
+ )
1829
+ flat_inputs_embeds = (
1830
+ tf.reshape(tensor=inputs_embeds, shape=(-1, seq_length, shape_list(inputs_embeds)[3]))
1831
+ if inputs_embeds is not None
1832
+ else None
1833
+ )
1834
+ outputs = self.deberta(
1835
+ input_ids=flat_input_ids,
1836
+ attention_mask=flat_attention_mask,
1837
+ token_type_ids=flat_token_type_ids,
1838
+ position_ids=flat_position_ids,
1839
+ inputs_embeds=flat_inputs_embeds,
1840
+ output_attentions=output_attentions,
1841
+ output_hidden_states=output_hidden_states,
1842
+ return_dict=return_dict,
1843
+ training=training,
1844
+ )
1845
+ sequence_output = outputs[0]
1846
+ pooled_output = self.pooler(sequence_output, training=training)
1847
+ pooled_output = self.dropout(pooled_output, training=training)
1848
+ logits = self.classifier(pooled_output)
1849
+ reshaped_logits = tf.reshape(tensor=logits, shape=(-1, num_choices))
1850
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=reshaped_logits)
1851
+
1852
+ if not return_dict:
1853
+ output = (reshaped_logits,) + outputs[2:]
1854
+ return ((loss,) + output) if loss is not None else output
1855
+
1856
+ return TFMultipleChoiceModelOutput(
1857
+ loss=loss,
1858
+ logits=reshaped_logits,
1859
+ hidden_states=outputs.hidden_states,
1860
+ attentions=outputs.attentions,
1861
+ )
1862
+
1863
+ def build(self, input_shape=None):
1864
+ if self.built:
1865
+ return
1866
+ self.built = True
1867
+ if getattr(self, "deberta", None) is not None:
1868
+ with tf.name_scope(self.deberta.name):
1869
+ self.deberta.build(None)
1870
+ if getattr(self, "pooler", None) is not None:
1871
+ with tf.name_scope(self.pooler.name):
1872
+ self.pooler.build(None)
1873
+ if getattr(self, "classifier", None) is not None:
1874
+ with tf.name_scope(self.classifier.name):
1875
+ self.classifier.build([None, None, self.output_dim])
evalkit_cambrian/lib/python3.10/site-packages/transformers/models/deberta_v2/tokenization_deberta_v2.py ADDED
@@ -0,0 +1,550 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 Microsoft and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Tokenization class for model DeBERTa."""
16
+
17
+ import os
18
+ import unicodedata
19
+ from typing import Any, Dict, List, Optional, Tuple
20
+
21
+ import sentencepiece as sp
22
+
23
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer
24
+ from ...utils import logging
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+ PRETRAINED_VOCAB_FILES_MAP = {
30
+ "vocab_file": {
31
+ "microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/spm.model",
32
+ "microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/spm.model",
33
+ "microsoft/deberta-v2-xlarge-mnli": (
34
+ "https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/spm.model"
35
+ ),
36
+ "microsoft/deberta-v2-xxlarge-mnli": (
37
+ "https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/spm.model"
38
+ ),
39
+ }
40
+ }
41
+
42
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
43
+ "microsoft/deberta-v2-xlarge": 512,
44
+ "microsoft/deberta-v2-xxlarge": 512,
45
+ "microsoft/deberta-v2-xlarge-mnli": 512,
46
+ "microsoft/deberta-v2-xxlarge-mnli": 512,
47
+ }
48
+
49
+ PRETRAINED_INIT_CONFIGURATION = {
50
+ "microsoft/deberta-v2-xlarge": {"do_lower_case": False},
51
+ "microsoft/deberta-v2-xxlarge": {"do_lower_case": False},
52
+ "microsoft/deberta-v2-xlarge-mnli": {"do_lower_case": False},
53
+ "microsoft/deberta-v2-xxlarge-mnli": {"do_lower_case": False},
54
+ }
55
+
56
+ VOCAB_FILES_NAMES = {"vocab_file": "spm.model"}
57
+
58
+
59
+ class DebertaV2Tokenizer(PreTrainedTokenizer):
60
+ r"""
61
+ Constructs a DeBERTa-v2 tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
62
+
63
+ Args:
64
+ vocab_file (`str`):
65
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
66
+ contains the vocabulary necessary to instantiate a tokenizer.
67
+ do_lower_case (`bool`, *optional*, defaults to `False`):
68
+ Whether or not to lowercase the input when tokenizing.
69
+ bos_token (`string`, *optional*, defaults to `"[CLS]"`):
70
+ The beginning of sequence token that was used during pre-training. Can be used a sequence classifier token.
71
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
72
+ sequence. The token used is the `cls_token`.
73
+ eos_token (`string`, *optional*, defaults to `"[SEP]"`):
74
+ The end of sequence token. When building a sequence using special tokens, this is not the token that is
75
+ used for the end of sequence. The token used is the `sep_token`.
76
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
77
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
78
+ token instead.
79
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
80
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
81
+ sequence classification or for a text and a question for question answering. It is also used as the last
82
+ token of a sequence built with special tokens.
83
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
84
+ The token used for padding, for example when batching sequences of different lengths.
85
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
86
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
87
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
88
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
89
+ The token used for masking values. This is the token used when training this model with masked language
90
+ modeling. This is the token which the model will try to predict.
91
+ sp_model_kwargs (`dict`, *optional*):
92
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
93
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
94
+ to set:
95
+
96
+ - `enable_sampling`: Enable subword regularization.
97
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
98
+
99
+ - `nbest_size = {0,1}`: No sampling is performed.
100
+ - `nbest_size > 1`: samples from the nbest_size results.
101
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
102
+ using forward-filtering-and-backward-sampling algorithm.
103
+
104
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
105
+ BPE-dropout.
106
+ """
107
+
108
+ vocab_files_names = VOCAB_FILES_NAMES
109
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
110
+ pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
111
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
112
+
113
+ def __init__(
114
+ self,
115
+ vocab_file,
116
+ do_lower_case=False,
117
+ split_by_punct=False,
118
+ bos_token="[CLS]",
119
+ eos_token="[SEP]",
120
+ unk_token="[UNK]",
121
+ sep_token="[SEP]",
122
+ pad_token="[PAD]",
123
+ cls_token="[CLS]",
124
+ mask_token="[MASK]",
125
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
126
+ **kwargs,
127
+ ) -> None:
128
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
129
+
130
+ if not os.path.isfile(vocab_file):
131
+ raise ValueError(
132
+ f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
133
+ " model use `tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
134
+ )
135
+ self.do_lower_case = do_lower_case
136
+ self.split_by_punct = split_by_punct
137
+ self.vocab_file = vocab_file
138
+ self._tokenizer = SPMTokenizer(
139
+ vocab_file, None, split_by_punct=split_by_punct, sp_model_kwargs=self.sp_model_kwargs
140
+ )
141
+ unk_token = AddedToken(unk_token, normalized=True, special=True) if isinstance(unk_token, str) else unk_token
142
+ super().__init__(
143
+ do_lower_case=do_lower_case,
144
+ bos_token=bos_token,
145
+ eos_token=eos_token,
146
+ unk_token=unk_token,
147
+ sep_token=sep_token,
148
+ pad_token=pad_token,
149
+ cls_token=cls_token,
150
+ mask_token=mask_token,
151
+ split_by_punct=split_by_punct,
152
+ sp_model_kwargs=self.sp_model_kwargs,
153
+ **kwargs,
154
+ )
155
+ self._tokenizer.special_tokens = self.all_special_tokens
156
+
157
+ @property
158
+ def vocab_size(self):
159
+ return len(self.vocab)
160
+
161
+ @property
162
+ def vocab(self):
163
+ return self._tokenizer.vocab
164
+
165
+ def get_vocab(self):
166
+ vocab = self.vocab.copy()
167
+ vocab.update(self.get_added_vocab())
168
+ return vocab
169
+
170
+ def _tokenize(self, text: str) -> List[str]:
171
+ """Take as input a string and return a list of strings (tokens) for words/sub-words"""
172
+ if self.do_lower_case:
173
+ text = text.lower()
174
+ return self._tokenizer.tokenize(text)
175
+
176
+ def _convert_token_to_id(self, token):
177
+ """Converts a token (str) in an id using the vocab."""
178
+ return self._tokenizer.spm.PieceToId(token)
179
+
180
+ def _convert_id_to_token(self, index):
181
+ """Converts an index (integer) in a token (str) using the vocab."""
182
+ return self._tokenizer.spm.IdToPiece(index) if index < self.vocab_size else self.unk_token
183
+
184
+ def convert_tokens_to_string(self, tokens):
185
+ """Converts a sequence of tokens (string) in a single string."""
186
+ return self._tokenizer.decode(tokens)
187
+
188
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
189
+ """
190
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
191
+ adding special tokens. A DeBERTa sequence has the following format:
192
+
193
+ - single sequence: [CLS] X [SEP]
194
+ - pair of sequences: [CLS] A [SEP] B [SEP]
195
+
196
+ Args:
197
+ token_ids_0 (`List[int]`):
198
+ List of IDs to which the special tokens will be added.
199
+ token_ids_1 (`List[int]`, *optional*):
200
+ Optional second list of IDs for sequence pairs.
201
+
202
+ Returns:
203
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
204
+ """
205
+
206
+ if token_ids_1 is None:
207
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
208
+ cls = [self.cls_token_id]
209
+ sep = [self.sep_token_id]
210
+ return cls + token_ids_0 + sep + token_ids_1 + sep
211
+
212
+ def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):
213
+ """
214
+ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
215
+ special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
216
+
217
+ Args:
218
+ token_ids_0 (`List[int]`):
219
+ List of IDs.
220
+ token_ids_1 (`List[int]`, *optional*):
221
+ Optional second list of IDs for sequence pairs.
222
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
223
+ Whether or not the token list is already formatted with special tokens for the model.
224
+
225
+ Returns:
226
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
227
+ """
228
+
229
+ if already_has_special_tokens:
230
+ return super().get_special_tokens_mask(
231
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
232
+ )
233
+
234
+ if token_ids_1 is not None:
235
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
236
+ return [1] + ([0] * len(token_ids_0)) + [1]
237
+
238
+ def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None):
239
+ """
240
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A DeBERTa
241
+ sequence pair mask has the following format:
242
+
243
+ ```
244
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
245
+ | first sequence | second sequence |
246
+ ```
247
+
248
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
249
+
250
+ Args:
251
+ token_ids_0 (`List[int]`):
252
+ List of IDs.
253
+ token_ids_1 (`List[int]`, *optional*):
254
+ Optional second list of IDs for sequence pairs.
255
+
256
+ Returns:
257
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
258
+ """
259
+ sep = [self.sep_token_id]
260
+ cls = [self.cls_token_id]
261
+ if token_ids_1 is None:
262
+ return len(cls + token_ids_0 + sep) * [0]
263
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
264
+
265
+ def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
266
+ add_prefix_space = kwargs.pop("add_prefix_space", False)
267
+ if is_split_into_words or add_prefix_space:
268
+ text = " " + text
269
+ return (text, kwargs)
270
+
271
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
272
+ return self._tokenizer.save_pretrained(save_directory, filename_prefix=filename_prefix)
273
+
274
+
275
+ class SPMTokenizer:
276
+ r"""
277
+ Constructs a tokenizer based on [SentencePiece](https://github.com/google/sentencepiece).
278
+
279
+ Args:
280
+ vocab_file (`str`):
281
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
282
+ contains the vocabulary necessary to instantiate a tokenizer.
283
+ sp_model_kwargs (`dict`, *optional*):
284
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
285
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
286
+ to set:
287
+
288
+ - `enable_sampling`: Enable subword regularization.
289
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
290
+
291
+ - `nbest_size = {0,1}`: No sampling is performed.
292
+ - `nbest_size > 1`: samples from the nbest_size results.
293
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
294
+ using forward-filtering-and-backward-sampling algorithm.
295
+
296
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
297
+ BPE-dropout.
298
+ """
299
+
300
+ def __init__(
301
+ self, vocab_file, special_tokens, split_by_punct=False, sp_model_kwargs: Optional[Dict[str, Any]] = None
302
+ ):
303
+ self.split_by_punct = split_by_punct
304
+ self.vocab_file = vocab_file
305
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
306
+ spm = sp.SentencePieceProcessor(**self.sp_model_kwargs)
307
+ if not os.path.exists(vocab_file):
308
+ raise FileNotFoundError(f"{vocab_file} does not exist!")
309
+ spm.load(vocab_file)
310
+ bpe_vocab_size = spm.GetPieceSize()
311
+ # Token map
312
+ # <unk> 0+1
313
+ # <s> 1+1
314
+ # </s> 2+1
315
+ self.vocab = {spm.IdToPiece(i): i for i in range(bpe_vocab_size)}
316
+ self.ids_to_tokens = [spm.IdToPiece(i) for i in range(bpe_vocab_size)]
317
+ # self.vocab['[PAD]'] = 0
318
+ # self.vocab['[CLS]'] = 1
319
+ # self.vocab['[SEP]'] = 2
320
+ # self.vocab['[UNK]'] = 3
321
+
322
+ self.spm = spm
323
+ self.special_tokens = special_tokens
324
+
325
+ def __getstate__(self):
326
+ state = self.__dict__.copy()
327
+ state["spm"] = None
328
+ return state
329
+
330
+ def __setstate__(self, d):
331
+ self.__dict__ = d
332
+
333
+ # for backward compatibility
334
+ if not hasattr(self, "sp_model_kwargs"):
335
+ self.sp_model_kwargs = {}
336
+
337
+ self.spm = sp.SentencePieceProcessor(**self.sp_model_kwargs)
338
+ self.spm.Load(self.vocab_file)
339
+
340
+ def tokenize(self, text):
341
+ return self._encode_as_pieces(text)
342
+
343
+ def convert_ids_to_tokens(self, ids):
344
+ tokens = []
345
+ for i in ids:
346
+ tokens.append(self.ids_to_tokens[i])
347
+ return tokens
348
+
349
+ def decode(self, tokens, start=-1, end=-1, raw_text=None):
350
+ if raw_text is None:
351
+ current_sub_tokens = []
352
+ out_string = ""
353
+ prev_is_special = False
354
+ for token in tokens:
355
+ # make sure that special tokens are not decoded using sentencepiece model
356
+ if token in self.special_tokens:
357
+ if not prev_is_special:
358
+ out_string += " "
359
+ out_string += self.spm.decode_pieces(current_sub_tokens) + token
360
+ prev_is_special = True
361
+ current_sub_tokens = []
362
+ else:
363
+ current_sub_tokens.append(token)
364
+ prev_is_special = False
365
+ out_string += self.spm.decode_pieces(current_sub_tokens)
366
+ return out_string.strip()
367
+ else:
368
+ words = self.split_to_words(raw_text)
369
+ word_tokens = [self.tokenize(w) for w in words]
370
+ token2words = [0] * len(tokens)
371
+ tid = 0
372
+ for i, w in enumerate(word_tokens):
373
+ for k, t in enumerate(w):
374
+ token2words[tid] = i
375
+ tid += 1
376
+ word_start = token2words[start]
377
+ word_end = token2words[end] if end < len(tokens) else len(words)
378
+ text = "".join(words[word_start:word_end])
379
+ return text
380
+
381
+ # TODO add a deprecation cycle as this can have different behaviour from our API
382
+ def add_special_token(self, token):
383
+ if token not in self.special_tokens:
384
+ self.special_tokens.append(token)
385
+ if token not in self.vocab:
386
+ self.vocab[token] = len(self.vocab) - 1
387
+ self.ids_to_tokens.append(token)
388
+ return self.id(token)
389
+
390
+ def part_of_whole_word(self, token, is_bos=False):
391
+ logger.warning_once(
392
+ "The `DebertaTokenizer.part_of_whole_word` method is deprecated and will be removed in `transformers==4.35`"
393
+ )
394
+ if is_bos:
395
+ return True
396
+ if (
397
+ len(token) == 1
398
+ and (_is_whitespace(list(token)[0]) or _is_control(list(token)[0]) or _is_punctuation(list(token)[0]))
399
+ ) or token in self.special_tokens:
400
+ return False
401
+
402
+ word_start = b"\xe2\x96\x81".decode("utf-8")
403
+ return not token.startswith(word_start)
404
+
405
+ def pad(self):
406
+ return "[PAD]"
407
+
408
+ def bos(self):
409
+ return "[CLS]"
410
+
411
+ def eos(self):
412
+ return "[SEP]"
413
+
414
+ def unk(self):
415
+ return "[UNK]"
416
+
417
+ def mask(self):
418
+ return "[MASK]"
419
+
420
+ def sym(self, id):
421
+ return self.ids_to_tokens[id]
422
+
423
+ def id(self, sym):
424
+ logger.warning_once(
425
+ "The `DebertaTokenizer.id` method is deprecated and will be removed in `transformers==4.35`"
426
+ )
427
+ return self.vocab[sym] if sym in self.vocab else 1
428
+
429
+ def _encode_as_pieces(self, text):
430
+ text = convert_to_unicode(text)
431
+ if self.split_by_punct:
432
+ words = self._run_split_on_punc(text)
433
+ pieces = [self.spm.encode(w, out_type=str) for w in words]
434
+ return [p for w in pieces for p in w]
435
+ else:
436
+ return self.spm.encode(text, out_type=str)
437
+
438
+ def split_to_words(self, text):
439
+ pieces = self._encode_as_pieces(text)
440
+ word_start = b"\xe2\x96\x81".decode("utf-8")
441
+ words = []
442
+ offset = 0
443
+ prev_end = 0
444
+ for i, p in enumerate(pieces):
445
+ if p.startswith(word_start):
446
+ if offset > prev_end:
447
+ words.append(text[prev_end:offset])
448
+ prev_end = offset
449
+ w = p.replace(word_start, "")
450
+ else:
451
+ w = p
452
+ try:
453
+ s = text.index(w, offset)
454
+ pn = ""
455
+ k = i + 1
456
+ while k < len(pieces):
457
+ pn = pieces[k].replace(word_start, "")
458
+ if len(pn) > 0:
459
+ break
460
+ k += 1
461
+
462
+ if len(pn) > 0 and pn in text[offset:s]:
463
+ offset = offset + 1
464
+ else:
465
+ offset = s + len(w)
466
+ except Exception:
467
+ offset = offset + 1
468
+
469
+ if prev_end < offset:
470
+ words.append(text[prev_end:offset])
471
+
472
+ return words
473
+
474
+ def _run_split_on_punc(self, text):
475
+ """Splits punctuation on a piece of text."""
476
+ chars = list(text)
477
+ i = 0
478
+ start_new_word = True
479
+ output = []
480
+ while i < len(chars):
481
+ char = chars[i]
482
+ if _is_punctuation(char):
483
+ output.append([char])
484
+ start_new_word = True
485
+ else:
486
+ if start_new_word:
487
+ output.append([])
488
+ start_new_word = False
489
+ output[-1].append(char)
490
+ i += 1
491
+
492
+ return ["".join(x) for x in output]
493
+
494
+ def save_pretrained(self, path: str, filename_prefix: str = None):
495
+ filename = VOCAB_FILES_NAMES[list(VOCAB_FILES_NAMES.keys())[0]]
496
+ if filename_prefix is not None:
497
+ filename = filename_prefix + "-" + filename
498
+ full_path = os.path.join(path, filename)
499
+ with open(full_path, "wb") as fs:
500
+ fs.write(self.spm.serialized_model_proto())
501
+ return (full_path,)
502
+
503
+
504
+ def _is_whitespace(char):
505
+ """Checks whether `chars` is a whitespace character."""
506
+ # \t, \n, and \r are technically control characters but we treat them
507
+ # as whitespace since they are generally considered as such.
508
+ if char == " " or char == "\t" or char == "\n" or char == "\r":
509
+ return True
510
+ cat = unicodedata.category(char)
511
+ if cat == "Zs":
512
+ return True
513
+ return False
514
+
515
+
516
+ def _is_control(char):
517
+ """Checks whether `chars` is a control character."""
518
+ # These are technically control characters but we count them as whitespace
519
+ # characters.
520
+ if char == "\t" or char == "\n" or char == "\r":
521
+ return False
522
+ cat = unicodedata.category(char)
523
+ if cat.startswith("C"):
524
+ return True
525
+ return False
526
+
527
+
528
+ def _is_punctuation(char):
529
+ """Checks whether `chars` is a punctuation character."""
530
+ cp = ord(char)
531
+ # We treat all non-letter/number ASCII as punctuation.
532
+ # Characters such as "^", "$", and "`" are not in the Unicode
533
+ # Punctuation class but we treat them as punctuation anyways, for
534
+ # consistency.
535
+ if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
536
+ return True
537
+ cat = unicodedata.category(char)
538
+ if cat.startswith("P"):
539
+ return True
540
+ return False
541
+
542
+
543
+ def convert_to_unicode(text):
544
+ """Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
545
+ if isinstance(text, str):
546
+ return text
547
+ elif isinstance(text, bytes):
548
+ return text.decode("utf-8", "ignore")
549
+ else:
550
+ raise ValueError(f"Unsupported string type: {type(text)}")
evalkit_cambrian/lib/python3.10/site-packages/transformers/models/deberta_v2/tokenization_deberta_v2_fast.py ADDED
@@ -0,0 +1,250 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 Microsoft and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Fast Tokenization class for model DeBERTa."""
16
+
17
+ import os
18
+ from shutil import copyfile
19
+ from typing import Optional, Tuple
20
+
21
+ from ...file_utils import is_sentencepiece_available
22
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
23
+ from ...utils import logging
24
+
25
+
26
+ if is_sentencepiece_available():
27
+ from .tokenization_deberta_v2 import DebertaV2Tokenizer
28
+ else:
29
+ DebertaV2Tokenizer = None
30
+
31
+ logger = logging.get_logger(__name__)
32
+
33
+ VOCAB_FILES_NAMES = {"vocab_file": "spm.model", "tokenizer_file": "tokenizer.json"}
34
+
35
+ PRETRAINED_VOCAB_FILES_MAP = {
36
+ "vocab_file": {
37
+ "microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/spm.model",
38
+ "microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/spm.model",
39
+ "microsoft/deberta-v2-xlarge-mnli": (
40
+ "https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/spm.model"
41
+ ),
42
+ "microsoft/deberta-v2-xxlarge-mnli": (
43
+ "https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/spm.model"
44
+ ),
45
+ }
46
+ }
47
+
48
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
49
+ "microsoft/deberta-v2-xlarge": 512,
50
+ "microsoft/deberta-v2-xxlarge": 512,
51
+ "microsoft/deberta-v2-xlarge-mnli": 512,
52
+ "microsoft/deberta-v2-xxlarge-mnli": 512,
53
+ }
54
+
55
+ PRETRAINED_INIT_CONFIGURATION = {
56
+ "microsoft/deberta-v2-xlarge": {"do_lower_case": False},
57
+ "microsoft/deberta-v2-xxlarge": {"do_lower_case": False},
58
+ "microsoft/deberta-v2-xlarge-mnli": {"do_lower_case": False},
59
+ "microsoft/deberta-v2-xxlarge-mnli": {"do_lower_case": False},
60
+ }
61
+
62
+
63
+ class DebertaV2TokenizerFast(PreTrainedTokenizerFast):
64
+ r"""
65
+ Constructs a DeBERTa-v2 fast tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
66
+
67
+ Args:
68
+ vocab_file (`str`):
69
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
70
+ contains the vocabulary necessary to instantiate a tokenizer.
71
+ do_lower_case (`bool`, *optional*, defaults to `False`):
72
+ Whether or not to lowercase the input when tokenizing.
73
+ bos_token (`string`, *optional*, defaults to `"[CLS]"`):
74
+ The beginning of sequence token that was used during pre-training. Can be used a sequence classifier token.
75
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
76
+ sequence. The token used is the `cls_token`.
77
+ eos_token (`string`, *optional*, defaults to `"[SEP]"`):
78
+ The end of sequence token. When building a sequence using special tokens, this is not the token that is
79
+ used for the end of sequence. The token used is the `sep_token`.
80
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
81
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
82
+ token instead.
83
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
84
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
85
+ sequence classification or for a text and a question for question answering. It is also used as the last
86
+ token of a sequence built with special tokens.
87
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
88
+ The token used for padding, for example when batching sequences of different lengths.
89
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
90
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
91
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
92
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
93
+ The token used for masking values. This is the token used when training this model with masked language
94
+ modeling. This is the token which the model will try to predict.
95
+ sp_model_kwargs (`dict`, *optional*):
96
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
97
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
98
+ to set:
99
+
100
+ - `enable_sampling`: Enable subword regularization.
101
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
102
+
103
+ - `nbest_size = {0,1}`: No sampling is performed.
104
+ - `nbest_size > 1`: samples from the nbest_size results.
105
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
106
+ using forward-filtering-and-backward-sampling algorithm.
107
+
108
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
109
+ BPE-dropout.
110
+ """
111
+
112
+ vocab_files_names = VOCAB_FILES_NAMES
113
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
114
+ pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
115
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
116
+ slow_tokenizer_class = DebertaV2Tokenizer
117
+
118
+ def __init__(
119
+ self,
120
+ vocab_file=None,
121
+ tokenizer_file=None,
122
+ do_lower_case=False,
123
+ split_by_punct=False,
124
+ bos_token="[CLS]",
125
+ eos_token="[SEP]",
126
+ unk_token="[UNK]",
127
+ sep_token="[SEP]",
128
+ pad_token="[PAD]",
129
+ cls_token="[CLS]",
130
+ mask_token="[MASK]",
131
+ **kwargs,
132
+ ) -> None:
133
+ super().__init__(
134
+ vocab_file,
135
+ tokenizer_file=tokenizer_file,
136
+ do_lower_case=do_lower_case,
137
+ bos_token=bos_token,
138
+ eos_token=eos_token,
139
+ unk_token=unk_token,
140
+ sep_token=sep_token,
141
+ pad_token=pad_token,
142
+ cls_token=cls_token,
143
+ mask_token=mask_token,
144
+ split_by_punct=split_by_punct,
145
+ **kwargs,
146
+ )
147
+
148
+ self.do_lower_case = do_lower_case
149
+ self.split_by_punct = split_by_punct
150
+ self.vocab_file = vocab_file
151
+
152
+ @property
153
+ def can_save_slow_tokenizer(self) -> bool:
154
+ return os.path.isfile(self.vocab_file) if self.vocab_file else False
155
+
156
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
157
+ """
158
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
159
+ adding special tokens. A DeBERTa sequence has the following format:
160
+
161
+ - single sequence: [CLS] X [SEP]
162
+ - pair of sequences: [CLS] A [SEP] B [SEP]
163
+
164
+ Args:
165
+ token_ids_0 (`List[int]`):
166
+ List of IDs to which the special tokens will be added.
167
+ token_ids_1 (`List[int]`, *optional*):
168
+ Optional second list of IDs for sequence pairs.
169
+
170
+ Returns:
171
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
172
+ """
173
+
174
+ if token_ids_1 is None:
175
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
176
+ cls = [self.cls_token_id]
177
+ sep = [self.sep_token_id]
178
+ return cls + token_ids_0 + sep + token_ids_1 + sep
179
+
180
+ def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):
181
+ """
182
+ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
183
+ special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
184
+
185
+ Args:
186
+ token_ids_0 (`List[int]`):
187
+ List of IDs.
188
+ token_ids_1 (`List[int]`, *optional*):
189
+ Optional second list of IDs for sequence pairs.
190
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
191
+ Whether or not the token list is already formatted with special tokens for the model.
192
+
193
+ Returns:
194
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
195
+ """
196
+
197
+ if already_has_special_tokens:
198
+ return super().get_special_tokens_mask(
199
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
200
+ )
201
+
202
+ if token_ids_1 is not None:
203
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
204
+ return [1] + ([0] * len(token_ids_0)) + [1]
205
+
206
+ def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None):
207
+ """
208
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A DeBERTa
209
+ sequence pair mask has the following format:
210
+
211
+ ```
212
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
213
+ | first sequence | second sequence |
214
+ ```
215
+
216
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
217
+
218
+ Args:
219
+ token_ids_0 (`List[int]`):
220
+ List of IDs.
221
+ token_ids_1 (`List[int]`, *optional*):
222
+ Optional second list of IDs for sequence pairs.
223
+
224
+ Returns:
225
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
226
+ """
227
+ sep = [self.sep_token_id]
228
+ cls = [self.cls_token_id]
229
+ if token_ids_1 is None:
230
+ return len(cls + token_ids_0 + sep) * [0]
231
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
232
+
233
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
234
+ if not self.can_save_slow_tokenizer:
235
+ raise ValueError(
236
+ "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
237
+ "tokenizer."
238
+ )
239
+
240
+ if not os.path.isdir(save_directory):
241
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
242
+ return
243
+ out_vocab_file = os.path.join(
244
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
245
+ )
246
+
247
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
248
+ copyfile(self.vocab_file, out_vocab_file)
249
+
250
+ return (out_vocab_file,)
evalkit_cambrian/lib/python3.10/site-packages/transformers/models/squeezebert/__init__.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
18
+
19
+
20
+ _import_structure = {
21
+ "configuration_squeezebert": [
22
+ "SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
23
+ "SqueezeBertConfig",
24
+ "SqueezeBertOnnxConfig",
25
+ ],
26
+ "tokenization_squeezebert": ["SqueezeBertTokenizer"],
27
+ }
28
+
29
+ try:
30
+ if not is_tokenizers_available():
31
+ raise OptionalDependencyNotAvailable()
32
+ except OptionalDependencyNotAvailable:
33
+ pass
34
+ else:
35
+ _import_structure["tokenization_squeezebert_fast"] = ["SqueezeBertTokenizerFast"]
36
+
37
+ try:
38
+ if not is_torch_available():
39
+ raise OptionalDependencyNotAvailable()
40
+ except OptionalDependencyNotAvailable:
41
+ pass
42
+ else:
43
+ _import_structure["modeling_squeezebert"] = [
44
+ "SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
45
+ "SqueezeBertForMaskedLM",
46
+ "SqueezeBertForMultipleChoice",
47
+ "SqueezeBertForQuestionAnswering",
48
+ "SqueezeBertForSequenceClassification",
49
+ "SqueezeBertForTokenClassification",
50
+ "SqueezeBertModel",
51
+ "SqueezeBertModule",
52
+ "SqueezeBertPreTrainedModel",
53
+ ]
54
+
55
+
56
+ if TYPE_CHECKING:
57
+ from .configuration_squeezebert import (
58
+ SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
59
+ SqueezeBertConfig,
60
+ SqueezeBertOnnxConfig,
61
+ )
62
+ from .tokenization_squeezebert import SqueezeBertTokenizer
63
+
64
+ try:
65
+ if not is_tokenizers_available():
66
+ raise OptionalDependencyNotAvailable()
67
+ except OptionalDependencyNotAvailable:
68
+ pass
69
+ else:
70
+ from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
71
+
72
+ try:
73
+ if not is_torch_available():
74
+ raise OptionalDependencyNotAvailable()
75
+ except OptionalDependencyNotAvailable:
76
+ pass
77
+ else:
78
+ from .modeling_squeezebert import (
79
+ SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
80
+ SqueezeBertForMaskedLM,
81
+ SqueezeBertForMultipleChoice,
82
+ SqueezeBertForQuestionAnswering,
83
+ SqueezeBertForSequenceClassification,
84
+ SqueezeBertForTokenClassification,
85
+ SqueezeBertModel,
86
+ SqueezeBertModule,
87
+ SqueezeBertPreTrainedModel,
88
+ )
89
+
90
+ else:
91
+ import sys
92
+
93
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
evalkit_cambrian/lib/python3.10/site-packages/transformers/models/squeezebert/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.49 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/transformers/models/squeezebert/__pycache__/configuration_squeezebert.cpython-310.pyc ADDED
Binary file (7 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/transformers/models/squeezebert/__pycache__/modeling_squeezebert.cpython-310.pyc ADDED
Binary file (32.9 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/transformers/models/squeezebert/__pycache__/tokenization_squeezebert.cpython-310.pyc ADDED
Binary file (17.8 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/transformers/models/squeezebert/__pycache__/tokenization_squeezebert_fast.cpython-310.pyc ADDED
Binary file (7.77 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/transformers/models/squeezebert/configuration_squeezebert.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The SqueezeBert authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ SqueezeBERT model configuration"""
16
+ from collections import OrderedDict
17
+ from typing import Mapping
18
+
19
+ from ...configuration_utils import PretrainedConfig
20
+ from ...onnx import OnnxConfig
21
+ from ...utils import logging
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+ SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
27
+ "squeezebert/squeezebert-uncased": (
28
+ "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/config.json"
29
+ ),
30
+ "squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/config.json",
31
+ "squeezebert/squeezebert-mnli-headless": (
32
+ "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/config.json"
33
+ ),
34
+ }
35
+
36
+
37
+ class SqueezeBertConfig(PretrainedConfig):
38
+ r"""
39
+ This is the configuration class to store the configuration of a [`SqueezeBertModel`]. It is used to instantiate a
40
+ SqueezeBERT model according to the specified arguments, defining the model architecture. Instantiating a
41
+ configuration with the defaults will yield a similar configuration to that of the SqueezeBERT
42
+ [squeezebert/squeezebert-uncased](https://huggingface.co/squeezebert/squeezebert-uncased) architecture.
43
+
44
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
45
+ documentation from [`PretrainedConfig`] for more information.
46
+
47
+
48
+ Args:
49
+ vocab_size (`int`, *optional*, defaults to 30522):
50
+ Vocabulary size of the SqueezeBERT model. Defines the number of different tokens that can be represented by
51
+ the `inputs_ids` passed when calling [`SqueezeBertModel`].
52
+ hidden_size (`int`, *optional*, defaults to 768):
53
+ Dimensionality of the encoder layers and the pooler layer.
54
+ num_hidden_layers (`int`, *optional*, defaults to 12):
55
+ Number of hidden layers in the Transformer encoder.
56
+ num_attention_heads (`int`, *optional*, defaults to 12):
57
+ Number of attention heads for each attention layer in the Transformer encoder.
58
+ intermediate_size (`int`, *optional*, defaults to 3072):
59
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
60
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
61
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
62
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
63
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
64
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
65
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
66
+ The dropout ratio for the attention probabilities.
67
+ max_position_embeddings (`int`, *optional*, defaults to 512):
68
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
69
+ just in case (e.g., 512 or 1024 or 2048).
70
+ type_vocab_size (`int`, *optional*, defaults to 2):
71
+ The vocabulary size of the `token_type_ids` passed when calling [`BertModel`] or [`TFBertModel`].
72
+ initializer_range (`float`, *optional*, defaults to 0.02):
73
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
74
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
75
+
76
+ pad_token_id (`int`, *optional*, defaults to 0):
77
+ The ID of the token in the word embedding to use as padding.
78
+ embedding_size (`int`, *optional*, defaults to 768):
79
+ The dimension of the word embedding vectors.
80
+
81
+ q_groups (`int`, *optional*, defaults to 4):
82
+ The number of groups in Q layer.
83
+ k_groups (`int`, *optional*, defaults to 4):
84
+ The number of groups in K layer.
85
+ v_groups (`int`, *optional*, defaults to 4):
86
+ The number of groups in V layer.
87
+ post_attention_groups (`int`, *optional*, defaults to 1):
88
+ The number of groups in the first feed forward network layer.
89
+ intermediate_groups (`int`, *optional*, defaults to 4):
90
+ The number of groups in the second feed forward network layer.
91
+ output_groups (`int`, *optional*, defaults to 4):
92
+ The number of groups in the third feed forward network layer.
93
+
94
+ Examples:
95
+
96
+ ```python
97
+ >>> from transformers import SqueezeBertConfig, SqueezeBertModel
98
+
99
+ >>> # Initializing a SqueezeBERT configuration
100
+ >>> configuration = SqueezeBertConfig()
101
+
102
+ >>> # Initializing a model (with random weights) from the configuration above
103
+ >>> model = SqueezeBertModel(configuration)
104
+
105
+ >>> # Accessing the model configuration
106
+ >>> configuration = model.config
107
+ ```
108
+
109
+ Attributes: pretrained_config_archive_map (Dict[str, str]): A dictionary containing all the available pre-trained
110
+ checkpoints.
111
+ """
112
+
113
+ pretrained_config_archive_map = SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP
114
+ model_type = "squeezebert"
115
+
116
+ def __init__(
117
+ self,
118
+ vocab_size=30522,
119
+ hidden_size=768,
120
+ num_hidden_layers=12,
121
+ num_attention_heads=12,
122
+ intermediate_size=3072,
123
+ hidden_act="gelu",
124
+ hidden_dropout_prob=0.1,
125
+ attention_probs_dropout_prob=0.1,
126
+ max_position_embeddings=512,
127
+ type_vocab_size=2,
128
+ initializer_range=0.02,
129
+ layer_norm_eps=1e-12,
130
+ pad_token_id=0,
131
+ embedding_size=768,
132
+ q_groups=4,
133
+ k_groups=4,
134
+ v_groups=4,
135
+ post_attention_groups=1,
136
+ intermediate_groups=4,
137
+ output_groups=4,
138
+ **kwargs,
139
+ ):
140
+ super().__init__(pad_token_id=pad_token_id, **kwargs)
141
+
142
+ self.vocab_size = vocab_size
143
+ self.hidden_size = hidden_size
144
+ self.num_hidden_layers = num_hidden_layers
145
+ self.num_attention_heads = num_attention_heads
146
+ self.hidden_act = hidden_act
147
+ self.intermediate_size = intermediate_size
148
+ self.hidden_dropout_prob = hidden_dropout_prob
149
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
150
+ self.max_position_embeddings = max_position_embeddings
151
+ self.type_vocab_size = type_vocab_size
152
+ self.initializer_range = initializer_range
153
+ self.layer_norm_eps = layer_norm_eps
154
+ self.embedding_size = embedding_size
155
+ self.q_groups = q_groups
156
+ self.k_groups = k_groups
157
+ self.v_groups = v_groups
158
+ self.post_attention_groups = post_attention_groups
159
+ self.intermediate_groups = intermediate_groups
160
+ self.output_groups = output_groups
161
+
162
+
163
+ # # Copied from transformers.models.bert.configuration_bert.BertOnxxConfig with Bert->SqueezeBert
164
+ class SqueezeBertOnnxConfig(OnnxConfig):
165
+ @property
166
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
167
+ if self.task == "multiple-choice":
168
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
169
+ else:
170
+ dynamic_axis = {0: "batch", 1: "sequence"}
171
+ return OrderedDict(
172
+ [
173
+ ("input_ids", dynamic_axis),
174
+ ("attention_mask", dynamic_axis),
175
+ ("token_type_ids", dynamic_axis),
176
+ ]
177
+ )
evalkit_cambrian/lib/python3.10/site-packages/transformers/models/squeezebert/modeling_squeezebert.py ADDED
@@ -0,0 +1,1090 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The SqueezeBert authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch SqueezeBert model."""
16
+
17
+
18
+ import math
19
+ from typing import Optional, Tuple, Union
20
+
21
+ import torch
22
+ from torch import nn
23
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
24
+
25
+ from ...activations import ACT2FN
26
+ from ...modeling_outputs import (
27
+ BaseModelOutput,
28
+ BaseModelOutputWithPooling,
29
+ MaskedLMOutput,
30
+ MultipleChoiceModelOutput,
31
+ QuestionAnsweringModelOutput,
32
+ SequenceClassifierOutput,
33
+ TokenClassifierOutput,
34
+ )
35
+ from ...modeling_utils import PreTrainedModel
36
+ from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
37
+ from .configuration_squeezebert import SqueezeBertConfig
38
+
39
+
40
+ logger = logging.get_logger(__name__)
41
+
42
+ _CHECKPOINT_FOR_DOC = "squeezebert/squeezebert-uncased"
43
+ _CONFIG_FOR_DOC = "SqueezeBertConfig"
44
+
45
+ SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
46
+ "squeezebert/squeezebert-uncased",
47
+ "squeezebert/squeezebert-mnli",
48
+ "squeezebert/squeezebert-mnli-headless",
49
+ ]
50
+
51
+
52
+ class SqueezeBertEmbeddings(nn.Module):
53
+ """Construct the embeddings from word, position and token_type embeddings."""
54
+
55
+ def __init__(self, config):
56
+ super().__init__()
57
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id)
58
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size)
59
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size)
60
+
61
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
62
+ # any TensorFlow checkpoint file
63
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
64
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
65
+
66
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
67
+ self.register_buffer(
68
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
69
+ )
70
+
71
+ def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
72
+ if input_ids is not None:
73
+ input_shape = input_ids.size()
74
+ else:
75
+ input_shape = inputs_embeds.size()[:-1]
76
+
77
+ seq_length = input_shape[1]
78
+
79
+ if position_ids is None:
80
+ position_ids = self.position_ids[:, :seq_length]
81
+
82
+ if token_type_ids is None:
83
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
84
+
85
+ if inputs_embeds is None:
86
+ inputs_embeds = self.word_embeddings(input_ids)
87
+ position_embeddings = self.position_embeddings(position_ids)
88
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
89
+
90
+ embeddings = inputs_embeds + position_embeddings + token_type_embeddings
91
+ embeddings = self.LayerNorm(embeddings)
92
+ embeddings = self.dropout(embeddings)
93
+ return embeddings
94
+
95
+
96
+ class MatMulWrapper(nn.Module):
97
+ """
98
+ Wrapper for torch.matmul(). This makes flop-counting easier to implement. Note that if you directly call
99
+ torch.matmul() in your code, the flop counter will typically ignore the flops of the matmul.
100
+ """
101
+
102
+ def __init__(self):
103
+ super().__init__()
104
+
105
+ def forward(self, mat1, mat2):
106
+ """
107
+
108
+ :param inputs: two torch tensors :return: matmul of these tensors
109
+
110
+ Here are the typical dimensions found in BERT (the B is optional) mat1.shape: [B, <optional extra dims>, M, K]
111
+ mat2.shape: [B, <optional extra dims>, K, N] output shape: [B, <optional extra dims>, M, N]
112
+ """
113
+ return torch.matmul(mat1, mat2)
114
+
115
+
116
+ class SqueezeBertLayerNorm(nn.LayerNorm):
117
+ """
118
+ This is a nn.LayerNorm subclass that accepts NCW data layout and performs normalization in the C dimension.
119
+
120
+ N = batch C = channels W = sequence length
121
+ """
122
+
123
+ def __init__(self, hidden_size, eps=1e-12):
124
+ nn.LayerNorm.__init__(self, normalized_shape=hidden_size, eps=eps) # instantiates self.{weight, bias, eps}
125
+
126
+ def forward(self, x):
127
+ x = x.permute(0, 2, 1)
128
+ x = nn.LayerNorm.forward(self, x)
129
+ return x.permute(0, 2, 1)
130
+
131
+
132
+ class ConvDropoutLayerNorm(nn.Module):
133
+ """
134
+ ConvDropoutLayerNorm: Conv, Dropout, LayerNorm
135
+ """
136
+
137
+ def __init__(self, cin, cout, groups, dropout_prob):
138
+ super().__init__()
139
+
140
+ self.conv1d = nn.Conv1d(in_channels=cin, out_channels=cout, kernel_size=1, groups=groups)
141
+ self.layernorm = SqueezeBertLayerNorm(cout)
142
+ self.dropout = nn.Dropout(dropout_prob)
143
+
144
+ def forward(self, hidden_states, input_tensor):
145
+ x = self.conv1d(hidden_states)
146
+ x = self.dropout(x)
147
+ x = x + input_tensor
148
+ x = self.layernorm(x)
149
+ return x
150
+
151
+
152
+ class ConvActivation(nn.Module):
153
+ """
154
+ ConvActivation: Conv, Activation
155
+ """
156
+
157
+ def __init__(self, cin, cout, groups, act):
158
+ super().__init__()
159
+ self.conv1d = nn.Conv1d(in_channels=cin, out_channels=cout, kernel_size=1, groups=groups)
160
+ self.act = ACT2FN[act]
161
+
162
+ def forward(self, x):
163
+ output = self.conv1d(x)
164
+ return self.act(output)
165
+
166
+
167
+ class SqueezeBertSelfAttention(nn.Module):
168
+ def __init__(self, config, cin, q_groups=1, k_groups=1, v_groups=1):
169
+ """
170
+ config = used for some things; ignored for others (work in progress...) cin = input channels = output channels
171
+ groups = number of groups to use in conv1d layers
172
+ """
173
+ super().__init__()
174
+ if cin % config.num_attention_heads != 0:
175
+ raise ValueError(
176
+ f"cin ({cin}) is not a multiple of the number of attention heads ({config.num_attention_heads})"
177
+ )
178
+ self.num_attention_heads = config.num_attention_heads
179
+ self.attention_head_size = int(cin / config.num_attention_heads)
180
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
181
+
182
+ self.query = nn.Conv1d(in_channels=cin, out_channels=cin, kernel_size=1, groups=q_groups)
183
+ self.key = nn.Conv1d(in_channels=cin, out_channels=cin, kernel_size=1, groups=k_groups)
184
+ self.value = nn.Conv1d(in_channels=cin, out_channels=cin, kernel_size=1, groups=v_groups)
185
+
186
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
187
+ self.softmax = nn.Softmax(dim=-1)
188
+
189
+ self.matmul_qk = MatMulWrapper()
190
+ self.matmul_qkv = MatMulWrapper()
191
+
192
+ def transpose_for_scores(self, x):
193
+ """
194
+ - input: [N, C, W]
195
+ - output: [N, C1, W, C2] where C1 is the head index, and C2 is one head's contents
196
+ """
197
+ new_x_shape = (x.size()[0], self.num_attention_heads, self.attention_head_size, x.size()[-1]) # [N, C1, C2, W]
198
+ x = x.view(*new_x_shape)
199
+ return x.permute(0, 1, 3, 2) # [N, C1, C2, W] --> [N, C1, W, C2]
200
+
201
+ def transpose_key_for_scores(self, x):
202
+ """
203
+ - input: [N, C, W]
204
+ - output: [N, C1, C2, W] where C1 is the head index, and C2 is one head's contents
205
+ """
206
+ new_x_shape = (x.size()[0], self.num_attention_heads, self.attention_head_size, x.size()[-1]) # [N, C1, C2, W]
207
+ x = x.view(*new_x_shape)
208
+ # no `permute` needed
209
+ return x
210
+
211
+ def transpose_output(self, x):
212
+ """
213
+ - input: [N, C1, W, C2]
214
+ - output: [N, C, W]
215
+ """
216
+ x = x.permute(0, 1, 3, 2).contiguous() # [N, C1, C2, W]
217
+ new_x_shape = (x.size()[0], self.all_head_size, x.size()[3]) # [N, C, W]
218
+ x = x.view(*new_x_shape)
219
+ return x
220
+
221
+ def forward(self, hidden_states, attention_mask, output_attentions):
222
+ """
223
+ expects hidden_states in [N, C, W] data layout.
224
+
225
+ The attention_mask data layout is [N, W], and it does not need to be transposed.
226
+ """
227
+ mixed_query_layer = self.query(hidden_states)
228
+ mixed_key_layer = self.key(hidden_states)
229
+ mixed_value_layer = self.value(hidden_states)
230
+
231
+ query_layer = self.transpose_for_scores(mixed_query_layer)
232
+ key_layer = self.transpose_key_for_scores(mixed_key_layer)
233
+ value_layer = self.transpose_for_scores(mixed_value_layer)
234
+
235
+ # Take the dot product between "query" and "key" to get the raw attention scores.
236
+ attention_score = self.matmul_qk(query_layer, key_layer)
237
+ attention_score = attention_score / math.sqrt(self.attention_head_size)
238
+ # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
239
+ attention_score = attention_score + attention_mask
240
+
241
+ # Normalize the attention scores to probabilities.
242
+ attention_probs = self.softmax(attention_score)
243
+
244
+ # This is actually dropping out entire tokens to attend to, which might
245
+ # seem a bit unusual, but is taken from the original Transformer paper.
246
+ attention_probs = self.dropout(attention_probs)
247
+
248
+ context_layer = self.matmul_qkv(attention_probs, value_layer)
249
+ context_layer = self.transpose_output(context_layer)
250
+
251
+ result = {"context_layer": context_layer}
252
+ if output_attentions:
253
+ result["attention_score"] = attention_score
254
+ return result
255
+
256
+
257
+ class SqueezeBertModule(nn.Module):
258
+ def __init__(self, config):
259
+ """
260
+ - hidden_size = input chans = output chans for Q, K, V (they are all the same ... for now) = output chans for
261
+ the module
262
+ - intermediate_size = output chans for intermediate layer
263
+ - groups = number of groups for all layers in the BertModule. (eventually we could change the interface to
264
+ allow different groups for different layers)
265
+ """
266
+ super().__init__()
267
+
268
+ c0 = config.hidden_size
269
+ c1 = config.hidden_size
270
+ c2 = config.intermediate_size
271
+ c3 = config.hidden_size
272
+
273
+ self.attention = SqueezeBertSelfAttention(
274
+ config=config, cin=c0, q_groups=config.q_groups, k_groups=config.k_groups, v_groups=config.v_groups
275
+ )
276
+ self.post_attention = ConvDropoutLayerNorm(
277
+ cin=c0, cout=c1, groups=config.post_attention_groups, dropout_prob=config.hidden_dropout_prob
278
+ )
279
+ self.intermediate = ConvActivation(cin=c1, cout=c2, groups=config.intermediate_groups, act=config.hidden_act)
280
+ self.output = ConvDropoutLayerNorm(
281
+ cin=c2, cout=c3, groups=config.output_groups, dropout_prob=config.hidden_dropout_prob
282
+ )
283
+
284
+ def forward(self, hidden_states, attention_mask, output_attentions):
285
+ att = self.attention(hidden_states, attention_mask, output_attentions)
286
+ attention_output = att["context_layer"]
287
+
288
+ post_attention_output = self.post_attention(attention_output, hidden_states)
289
+ intermediate_output = self.intermediate(post_attention_output)
290
+ layer_output = self.output(intermediate_output, post_attention_output)
291
+
292
+ output_dict = {"feature_map": layer_output}
293
+ if output_attentions:
294
+ output_dict["attention_score"] = att["attention_score"]
295
+
296
+ return output_dict
297
+
298
+
299
+ class SqueezeBertEncoder(nn.Module):
300
+ def __init__(self, config):
301
+ super().__init__()
302
+
303
+ assert config.embedding_size == config.hidden_size, (
304
+ "If you want embedding_size != intermediate hidden_size, "
305
+ "please insert a Conv1d layer to adjust the number of channels "
306
+ "before the first SqueezeBertModule."
307
+ )
308
+
309
+ self.layers = nn.ModuleList(SqueezeBertModule(config) for _ in range(config.num_hidden_layers))
310
+
311
+ def forward(
312
+ self,
313
+ hidden_states,
314
+ attention_mask=None,
315
+ head_mask=None,
316
+ output_attentions=False,
317
+ output_hidden_states=False,
318
+ return_dict=True,
319
+ ):
320
+ if head_mask is None:
321
+ head_mask_is_all_none = True
322
+ elif head_mask.count(None) == len(head_mask):
323
+ head_mask_is_all_none = True
324
+ else:
325
+ head_mask_is_all_none = False
326
+ assert head_mask_is_all_none is True, "head_mask is not yet supported in the SqueezeBert implementation."
327
+
328
+ # [batch_size, sequence_length, hidden_size] --> [batch_size, hidden_size, sequence_length]
329
+ hidden_states = hidden_states.permute(0, 2, 1)
330
+
331
+ all_hidden_states = () if output_hidden_states else None
332
+ all_attentions = () if output_attentions else None
333
+
334
+ for layer in self.layers:
335
+ if output_hidden_states:
336
+ hidden_states = hidden_states.permute(0, 2, 1)
337
+ all_hidden_states += (hidden_states,)
338
+ hidden_states = hidden_states.permute(0, 2, 1)
339
+
340
+ layer_output = layer.forward(hidden_states, attention_mask, output_attentions)
341
+
342
+ hidden_states = layer_output["feature_map"]
343
+
344
+ if output_attentions:
345
+ all_attentions += (layer_output["attention_score"],)
346
+
347
+ # [batch_size, hidden_size, sequence_length] --> [batch_size, sequence_length, hidden_size]
348
+ hidden_states = hidden_states.permute(0, 2, 1)
349
+
350
+ if output_hidden_states:
351
+ all_hidden_states += (hidden_states,)
352
+
353
+ if not return_dict:
354
+ return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
355
+ return BaseModelOutput(
356
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
357
+ )
358
+
359
+
360
+ class SqueezeBertPooler(nn.Module):
361
+ def __init__(self, config):
362
+ super().__init__()
363
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
364
+ self.activation = nn.Tanh()
365
+
366
+ def forward(self, hidden_states):
367
+ # We "pool" the model by simply taking the hidden state corresponding
368
+ # to the first token.
369
+ first_token_tensor = hidden_states[:, 0]
370
+ pooled_output = self.dense(first_token_tensor)
371
+ pooled_output = self.activation(pooled_output)
372
+ return pooled_output
373
+
374
+
375
+ class SqueezeBertPredictionHeadTransform(nn.Module):
376
+ def __init__(self, config):
377
+ super().__init__()
378
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
379
+ if isinstance(config.hidden_act, str):
380
+ self.transform_act_fn = ACT2FN[config.hidden_act]
381
+ else:
382
+ self.transform_act_fn = config.hidden_act
383
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
384
+
385
+ def forward(self, hidden_states):
386
+ hidden_states = self.dense(hidden_states)
387
+ hidden_states = self.transform_act_fn(hidden_states)
388
+ hidden_states = self.LayerNorm(hidden_states)
389
+ return hidden_states
390
+
391
+
392
+ class SqueezeBertLMPredictionHead(nn.Module):
393
+ def __init__(self, config):
394
+ super().__init__()
395
+ self.transform = SqueezeBertPredictionHeadTransform(config)
396
+
397
+ # The output weights are the same as the input embeddings, but there is
398
+ # an output-only bias for each token.
399
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
400
+
401
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
402
+
403
+ # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
404
+ self.decoder.bias = self.bias
405
+
406
+ def forward(self, hidden_states):
407
+ hidden_states = self.transform(hidden_states)
408
+ hidden_states = self.decoder(hidden_states)
409
+ return hidden_states
410
+
411
+
412
+ class SqueezeBertOnlyMLMHead(nn.Module):
413
+ def __init__(self, config):
414
+ super().__init__()
415
+ self.predictions = SqueezeBertLMPredictionHead(config)
416
+
417
+ def forward(self, sequence_output):
418
+ prediction_scores = self.predictions(sequence_output)
419
+ return prediction_scores
420
+
421
+
422
+ class SqueezeBertPreTrainedModel(PreTrainedModel):
423
+ """
424
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
425
+ models.
426
+ """
427
+
428
+ config_class = SqueezeBertConfig
429
+ base_model_prefix = "transformer"
430
+
431
+ def _init_weights(self, module):
432
+ """Initialize the weights"""
433
+ if isinstance(module, (nn.Linear, nn.Conv1d)):
434
+ # Slightly different from the TF version which uses truncated_normal for initialization
435
+ # cf https://github.com/pytorch/pytorch/pull/5617
436
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
437
+ if module.bias is not None:
438
+ module.bias.data.zero_()
439
+ elif isinstance(module, nn.Embedding):
440
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
441
+ if module.padding_idx is not None:
442
+ module.weight.data[module.padding_idx].zero_()
443
+ elif isinstance(module, SqueezeBertLayerNorm):
444
+ module.bias.data.zero_()
445
+ module.weight.data.fill_(1.0)
446
+
447
+
448
+ SQUEEZEBERT_START_DOCSTRING = r"""
449
+
450
+ The SqueezeBERT model was proposed in [SqueezeBERT: What can computer vision teach NLP about efficient neural
451
+ networks?](https://arxiv.org/abs/2006.11316) by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W.
452
+ Keutzer
453
+
454
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
455
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
456
+ etc.)
457
+
458
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
459
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
460
+ and behavior.
461
+
462
+ For best results finetuning SqueezeBERT on text classification tasks, it is recommended to use the
463
+ *squeezebert/squeezebert-mnli-headless* checkpoint as a starting point.
464
+
465
+ Parameters:
466
+ config ([`SqueezeBertConfig`]): Model configuration class with all the parameters of the model.
467
+ Initializing with a config file does not load the weights associated with the model, only the
468
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
469
+
470
+ Hierarchy:
471
+
472
+ ```
473
+ Internal class hierarchy:
474
+ SqueezeBertModel
475
+ SqueezeBertEncoder
476
+ SqueezeBertModule
477
+ SqueezeBertSelfAttention
478
+ ConvActivation
479
+ ConvDropoutLayerNorm
480
+ ```
481
+
482
+ Data layouts:
483
+
484
+ ```
485
+ Input data is in [batch, sequence_length, hidden_size] format.
486
+
487
+ Data inside the encoder is in [batch, hidden_size, sequence_length] format. But, if `output_hidden_states == True`, the data from inside the encoder is returned in [batch, sequence_length, hidden_size] format.
488
+
489
+ The final output of the encoder is in [batch, sequence_length, hidden_size] format.
490
+ ```
491
+ """
492
+
493
+ SQUEEZEBERT_INPUTS_DOCSTRING = r"""
494
+ Args:
495
+ input_ids (`torch.LongTensor` of shape `({0})`):
496
+ Indices of input sequence tokens in the vocabulary.
497
+
498
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
499
+ [`PreTrainedTokenizer.__call__`] for details.
500
+
501
+ [What are input IDs?](../glossary#input-ids)
502
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
503
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
504
+
505
+ - 1 for tokens that are **not masked**,
506
+ - 0 for tokens that are **masked**.
507
+
508
+ [What are attention masks?](../glossary#attention-mask)
509
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
510
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
511
+ 1]`:
512
+
513
+ - 0 corresponds to a *sentence A* token,
514
+ - 1 corresponds to a *sentence B* token.
515
+
516
+ [What are token type IDs?](../glossary#token-type-ids)
517
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
518
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
519
+ config.max_position_embeddings - 1]`.
520
+
521
+ [What are position IDs?](../glossary#position-ids)
522
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
523
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
524
+
525
+ - 1 indicates the head is **not masked**,
526
+ - 0 indicates the head is **masked**.
527
+
528
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
529
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
530
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
531
+ model's internal embedding lookup matrix.
532
+ output_attentions (`bool`, *optional*):
533
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
534
+ tensors for more detail.
535
+ output_hidden_states (`bool`, *optional*):
536
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
537
+ more detail.
538
+ return_dict (`bool`, *optional*):
539
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
540
+ """
541
+
542
+
543
+ @add_start_docstrings(
544
+ "The bare SqueezeBERT Model transformer outputting raw hidden-states without any specific head on top.",
545
+ SQUEEZEBERT_START_DOCSTRING,
546
+ )
547
+ class SqueezeBertModel(SqueezeBertPreTrainedModel):
548
+ def __init__(self, config):
549
+ super().__init__(config)
550
+
551
+ self.embeddings = SqueezeBertEmbeddings(config)
552
+ self.encoder = SqueezeBertEncoder(config)
553
+ self.pooler = SqueezeBertPooler(config)
554
+
555
+ # Initialize weights and apply final processing
556
+ self.post_init()
557
+
558
+ def get_input_embeddings(self):
559
+ return self.embeddings.word_embeddings
560
+
561
+ def set_input_embeddings(self, new_embeddings):
562
+ self.embeddings.word_embeddings = new_embeddings
563
+
564
+ def _prune_heads(self, heads_to_prune):
565
+ """
566
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
567
+ class PreTrainedModel
568
+ """
569
+ for layer, heads in heads_to_prune.items():
570
+ self.encoder.layer[layer].attention.prune_heads(heads)
571
+
572
+ @add_start_docstrings_to_model_forward(SQUEEZEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
573
+ @add_code_sample_docstrings(
574
+ checkpoint=_CHECKPOINT_FOR_DOC,
575
+ output_type=BaseModelOutputWithPooling,
576
+ config_class=_CONFIG_FOR_DOC,
577
+ )
578
+ def forward(
579
+ self,
580
+ input_ids: Optional[torch.Tensor] = None,
581
+ attention_mask: Optional[torch.Tensor] = None,
582
+ token_type_ids: Optional[torch.Tensor] = None,
583
+ position_ids: Optional[torch.Tensor] = None,
584
+ head_mask: Optional[torch.Tensor] = None,
585
+ inputs_embeds: Optional[torch.FloatTensor] = None,
586
+ output_attentions: Optional[bool] = None,
587
+ output_hidden_states: Optional[bool] = None,
588
+ return_dict: Optional[bool] = None,
589
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
590
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
591
+ output_hidden_states = (
592
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
593
+ )
594
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
595
+
596
+ if input_ids is not None and inputs_embeds is not None:
597
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
598
+ elif input_ids is not None:
599
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
600
+ input_shape = input_ids.size()
601
+ elif inputs_embeds is not None:
602
+ input_shape = inputs_embeds.size()[:-1]
603
+ else:
604
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
605
+
606
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
607
+
608
+ if attention_mask is None:
609
+ attention_mask = torch.ones(input_shape, device=device)
610
+ if token_type_ids is None:
611
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
612
+
613
+ extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape)
614
+ # Prepare head mask if needed
615
+ # 1.0 in head_mask indicate we keep the head
616
+ # attention_probs has shape bsz x n_heads x N x N
617
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
618
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
619
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
620
+
621
+ embedding_output = self.embeddings(
622
+ input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
623
+ )
624
+ encoder_outputs = self.encoder(
625
+ hidden_states=embedding_output,
626
+ attention_mask=extended_attention_mask,
627
+ head_mask=head_mask,
628
+ output_attentions=output_attentions,
629
+ output_hidden_states=output_hidden_states,
630
+ return_dict=return_dict,
631
+ )
632
+ sequence_output = encoder_outputs[0]
633
+ pooled_output = self.pooler(sequence_output)
634
+
635
+ if not return_dict:
636
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
637
+
638
+ return BaseModelOutputWithPooling(
639
+ last_hidden_state=sequence_output,
640
+ pooler_output=pooled_output,
641
+ hidden_states=encoder_outputs.hidden_states,
642
+ attentions=encoder_outputs.attentions,
643
+ )
644
+
645
+
646
+ @add_start_docstrings("""SqueezeBERT Model with a `language modeling` head on top.""", SQUEEZEBERT_START_DOCSTRING)
647
+ class SqueezeBertForMaskedLM(SqueezeBertPreTrainedModel):
648
+ _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"]
649
+
650
+ def __init__(self, config):
651
+ super().__init__(config)
652
+
653
+ self.transformer = SqueezeBertModel(config)
654
+ self.cls = SqueezeBertOnlyMLMHead(config)
655
+
656
+ # Initialize weights and apply final processing
657
+ self.post_init()
658
+
659
+ def get_output_embeddings(self):
660
+ return self.cls.predictions.decoder
661
+
662
+ def set_output_embeddings(self, new_embeddings):
663
+ self.cls.predictions.decoder = new_embeddings
664
+
665
+ @add_start_docstrings_to_model_forward(SQUEEZEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
666
+ @add_code_sample_docstrings(
667
+ checkpoint=_CHECKPOINT_FOR_DOC,
668
+ output_type=MaskedLMOutput,
669
+ config_class=_CONFIG_FOR_DOC,
670
+ )
671
+ def forward(
672
+ self,
673
+ input_ids: Optional[torch.Tensor] = None,
674
+ attention_mask: Optional[torch.Tensor] = None,
675
+ token_type_ids: Optional[torch.Tensor] = None,
676
+ position_ids: Optional[torch.Tensor] = None,
677
+ head_mask: Optional[torch.Tensor] = None,
678
+ inputs_embeds: Optional[torch.Tensor] = None,
679
+ labels: Optional[torch.Tensor] = None,
680
+ output_attentions: Optional[bool] = None,
681
+ output_hidden_states: Optional[bool] = None,
682
+ return_dict: Optional[bool] = None,
683
+ ) -> Union[Tuple, MaskedLMOutput]:
684
+ r"""
685
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
686
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
687
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
688
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
689
+ """
690
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
691
+
692
+ outputs = self.transformer(
693
+ input_ids,
694
+ attention_mask=attention_mask,
695
+ token_type_ids=token_type_ids,
696
+ position_ids=position_ids,
697
+ head_mask=head_mask,
698
+ inputs_embeds=inputs_embeds,
699
+ output_attentions=output_attentions,
700
+ output_hidden_states=output_hidden_states,
701
+ return_dict=return_dict,
702
+ )
703
+
704
+ sequence_output = outputs[0]
705
+ prediction_scores = self.cls(sequence_output)
706
+
707
+ masked_lm_loss = None
708
+ if labels is not None:
709
+ loss_fct = CrossEntropyLoss() # -100 index = padding token
710
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
711
+
712
+ if not return_dict:
713
+ output = (prediction_scores,) + outputs[2:]
714
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
715
+
716
+ return MaskedLMOutput(
717
+ loss=masked_lm_loss,
718
+ logits=prediction_scores,
719
+ hidden_states=outputs.hidden_states,
720
+ attentions=outputs.attentions,
721
+ )
722
+
723
+
724
+ @add_start_docstrings(
725
+ """
726
+ SqueezeBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the
727
+ pooled output) e.g. for GLUE tasks.
728
+ """,
729
+ SQUEEZEBERT_START_DOCSTRING,
730
+ )
731
+ class SqueezeBertForSequenceClassification(SqueezeBertPreTrainedModel):
732
+ def __init__(self, config):
733
+ super().__init__(config)
734
+ self.num_labels = config.num_labels
735
+ self.config = config
736
+
737
+ self.transformer = SqueezeBertModel(config)
738
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
739
+ self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
740
+
741
+ # Initialize weights and apply final processing
742
+ self.post_init()
743
+
744
+ @add_start_docstrings_to_model_forward(SQUEEZEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
745
+ @add_code_sample_docstrings(
746
+ checkpoint=_CHECKPOINT_FOR_DOC,
747
+ output_type=SequenceClassifierOutput,
748
+ config_class=_CONFIG_FOR_DOC,
749
+ )
750
+ def forward(
751
+ self,
752
+ input_ids: Optional[torch.Tensor] = None,
753
+ attention_mask: Optional[torch.Tensor] = None,
754
+ token_type_ids: Optional[torch.Tensor] = None,
755
+ position_ids: Optional[torch.Tensor] = None,
756
+ head_mask: Optional[torch.Tensor] = None,
757
+ inputs_embeds: Optional[torch.Tensor] = None,
758
+ labels: Optional[torch.Tensor] = None,
759
+ output_attentions: Optional[bool] = None,
760
+ output_hidden_states: Optional[bool] = None,
761
+ return_dict: Optional[bool] = None,
762
+ ) -> Union[Tuple, SequenceClassifierOutput]:
763
+ r"""
764
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
765
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
766
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
767
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
768
+ """
769
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
770
+
771
+ outputs = self.transformer(
772
+ input_ids,
773
+ attention_mask=attention_mask,
774
+ token_type_ids=token_type_ids,
775
+ position_ids=position_ids,
776
+ head_mask=head_mask,
777
+ inputs_embeds=inputs_embeds,
778
+ output_attentions=output_attentions,
779
+ output_hidden_states=output_hidden_states,
780
+ return_dict=return_dict,
781
+ )
782
+
783
+ pooled_output = outputs[1]
784
+
785
+ pooled_output = self.dropout(pooled_output)
786
+ logits = self.classifier(pooled_output)
787
+
788
+ loss = None
789
+ if labels is not None:
790
+ if self.config.problem_type is None:
791
+ if self.num_labels == 1:
792
+ self.config.problem_type = "regression"
793
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
794
+ self.config.problem_type = "single_label_classification"
795
+ else:
796
+ self.config.problem_type = "multi_label_classification"
797
+
798
+ if self.config.problem_type == "regression":
799
+ loss_fct = MSELoss()
800
+ if self.num_labels == 1:
801
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
802
+ else:
803
+ loss = loss_fct(logits, labels)
804
+ elif self.config.problem_type == "single_label_classification":
805
+ loss_fct = CrossEntropyLoss()
806
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
807
+ elif self.config.problem_type == "multi_label_classification":
808
+ loss_fct = BCEWithLogitsLoss()
809
+ loss = loss_fct(logits, labels)
810
+
811
+ if not return_dict:
812
+ output = (logits,) + outputs[2:]
813
+ return ((loss,) + output) if loss is not None else output
814
+
815
+ return SequenceClassifierOutput(
816
+ loss=loss,
817
+ logits=logits,
818
+ hidden_states=outputs.hidden_states,
819
+ attentions=outputs.attentions,
820
+ )
821
+
822
+
823
+ @add_start_docstrings(
824
+ """
825
+ SqueezeBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
826
+ a softmax) e.g. for RocStories/SWAG tasks.
827
+ """,
828
+ SQUEEZEBERT_START_DOCSTRING,
829
+ )
830
+ class SqueezeBertForMultipleChoice(SqueezeBertPreTrainedModel):
831
+ def __init__(self, config):
832
+ super().__init__(config)
833
+
834
+ self.transformer = SqueezeBertModel(config)
835
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
836
+ self.classifier = nn.Linear(config.hidden_size, 1)
837
+
838
+ # Initialize weights and apply final processing
839
+ self.post_init()
840
+
841
+ @add_start_docstrings_to_model_forward(
842
+ SQUEEZEBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
843
+ )
844
+ @add_code_sample_docstrings(
845
+ checkpoint=_CHECKPOINT_FOR_DOC,
846
+ output_type=MultipleChoiceModelOutput,
847
+ config_class=_CONFIG_FOR_DOC,
848
+ )
849
+ def forward(
850
+ self,
851
+ input_ids: Optional[torch.Tensor] = None,
852
+ attention_mask: Optional[torch.Tensor] = None,
853
+ token_type_ids: Optional[torch.Tensor] = None,
854
+ position_ids: Optional[torch.Tensor] = None,
855
+ head_mask: Optional[torch.Tensor] = None,
856
+ inputs_embeds: Optional[torch.Tensor] = None,
857
+ labels: Optional[torch.Tensor] = None,
858
+ output_attentions: Optional[bool] = None,
859
+ output_hidden_states: Optional[bool] = None,
860
+ return_dict: Optional[bool] = None,
861
+ ) -> Union[Tuple, MultipleChoiceModelOutput]:
862
+ r"""
863
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
864
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
865
+ num_choices-1]` where *num_choices* is the size of the second dimension of the input tensors. (see
866
+ *input_ids* above)
867
+ """
868
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
869
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
870
+
871
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
872
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
873
+ token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
874
+ position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
875
+ inputs_embeds = (
876
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
877
+ if inputs_embeds is not None
878
+ else None
879
+ )
880
+
881
+ outputs = self.transformer(
882
+ input_ids,
883
+ attention_mask=attention_mask,
884
+ token_type_ids=token_type_ids,
885
+ position_ids=position_ids,
886
+ head_mask=head_mask,
887
+ inputs_embeds=inputs_embeds,
888
+ output_attentions=output_attentions,
889
+ output_hidden_states=output_hidden_states,
890
+ return_dict=return_dict,
891
+ )
892
+
893
+ pooled_output = outputs[1]
894
+
895
+ pooled_output = self.dropout(pooled_output)
896
+ logits = self.classifier(pooled_output)
897
+ reshaped_logits = logits.view(-1, num_choices)
898
+
899
+ loss = None
900
+ if labels is not None:
901
+ loss_fct = CrossEntropyLoss()
902
+ loss = loss_fct(reshaped_logits, labels)
903
+
904
+ if not return_dict:
905
+ output = (reshaped_logits,) + outputs[2:]
906
+ return ((loss,) + output) if loss is not None else output
907
+
908
+ return MultipleChoiceModelOutput(
909
+ loss=loss,
910
+ logits=reshaped_logits,
911
+ hidden_states=outputs.hidden_states,
912
+ attentions=outputs.attentions,
913
+ )
914
+
915
+
916
+ @add_start_docstrings(
917
+ """
918
+ SqueezeBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
919
+ for Named-Entity-Recognition (NER) tasks.
920
+ """,
921
+ SQUEEZEBERT_START_DOCSTRING,
922
+ )
923
+ class SqueezeBertForTokenClassification(SqueezeBertPreTrainedModel):
924
+ def __init__(self, config):
925
+ super().__init__(config)
926
+ self.num_labels = config.num_labels
927
+
928
+ self.transformer = SqueezeBertModel(config)
929
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
930
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
931
+
932
+ # Initialize weights and apply final processing
933
+ self.post_init()
934
+
935
+ @add_start_docstrings_to_model_forward(SQUEEZEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
936
+ @add_code_sample_docstrings(
937
+ checkpoint=_CHECKPOINT_FOR_DOC,
938
+ output_type=TokenClassifierOutput,
939
+ config_class=_CONFIG_FOR_DOC,
940
+ )
941
+ def forward(
942
+ self,
943
+ input_ids: Optional[torch.Tensor] = None,
944
+ attention_mask: Optional[torch.Tensor] = None,
945
+ token_type_ids: Optional[torch.Tensor] = None,
946
+ position_ids: Optional[torch.Tensor] = None,
947
+ head_mask: Optional[torch.Tensor] = None,
948
+ inputs_embeds: Optional[torch.Tensor] = None,
949
+ labels: Optional[torch.Tensor] = None,
950
+ output_attentions: Optional[bool] = None,
951
+ output_hidden_states: Optional[bool] = None,
952
+ return_dict: Optional[bool] = None,
953
+ ) -> Union[Tuple, TokenClassifierOutput]:
954
+ r"""
955
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
956
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
957
+ """
958
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
959
+
960
+ outputs = self.transformer(
961
+ input_ids,
962
+ attention_mask=attention_mask,
963
+ token_type_ids=token_type_ids,
964
+ position_ids=position_ids,
965
+ head_mask=head_mask,
966
+ inputs_embeds=inputs_embeds,
967
+ output_attentions=output_attentions,
968
+ output_hidden_states=output_hidden_states,
969
+ return_dict=return_dict,
970
+ )
971
+
972
+ sequence_output = outputs[0]
973
+
974
+ sequence_output = self.dropout(sequence_output)
975
+ logits = self.classifier(sequence_output)
976
+
977
+ loss = None
978
+ if labels is not None:
979
+ loss_fct = CrossEntropyLoss()
980
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
981
+
982
+ if not return_dict:
983
+ output = (logits,) + outputs[2:]
984
+ return ((loss,) + output) if loss is not None else output
985
+
986
+ return TokenClassifierOutput(
987
+ loss=loss,
988
+ logits=logits,
989
+ hidden_states=outputs.hidden_states,
990
+ attentions=outputs.attentions,
991
+ )
992
+
993
+
994
+ @add_start_docstrings(
995
+ """
996
+ SqueezeBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
997
+ linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
998
+ """,
999
+ SQUEEZEBERT_START_DOCSTRING,
1000
+ )
1001
+ class SqueezeBertForQuestionAnswering(SqueezeBertPreTrainedModel):
1002
+ def __init__(self, config):
1003
+ super().__init__(config)
1004
+ self.num_labels = config.num_labels
1005
+
1006
+ self.transformer = SqueezeBertModel(config)
1007
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1008
+
1009
+ # Initialize weights and apply final processing
1010
+ self.post_init()
1011
+
1012
+ @add_start_docstrings_to_model_forward(SQUEEZEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1013
+ @add_code_sample_docstrings(
1014
+ checkpoint=_CHECKPOINT_FOR_DOC,
1015
+ output_type=QuestionAnsweringModelOutput,
1016
+ config_class=_CONFIG_FOR_DOC,
1017
+ )
1018
+ def forward(
1019
+ self,
1020
+ input_ids: Optional[torch.Tensor] = None,
1021
+ attention_mask: Optional[torch.Tensor] = None,
1022
+ token_type_ids: Optional[torch.Tensor] = None,
1023
+ position_ids: Optional[torch.Tensor] = None,
1024
+ head_mask: Optional[torch.Tensor] = None,
1025
+ inputs_embeds: Optional[torch.Tensor] = None,
1026
+ start_positions: Optional[torch.Tensor] = None,
1027
+ end_positions: Optional[torch.Tensor] = None,
1028
+ output_attentions: Optional[bool] = None,
1029
+ output_hidden_states: Optional[bool] = None,
1030
+ return_dict: Optional[bool] = None,
1031
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
1032
+ r"""
1033
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1034
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1035
+ Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence
1036
+ are not taken into account for computing the loss.
1037
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1038
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1039
+ Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence
1040
+ are not taken into account for computing the loss.
1041
+ """
1042
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1043
+
1044
+ outputs = self.transformer(
1045
+ input_ids,
1046
+ attention_mask=attention_mask,
1047
+ token_type_ids=token_type_ids,
1048
+ position_ids=position_ids,
1049
+ head_mask=head_mask,
1050
+ inputs_embeds=inputs_embeds,
1051
+ output_attentions=output_attentions,
1052
+ output_hidden_states=output_hidden_states,
1053
+ return_dict=return_dict,
1054
+ )
1055
+
1056
+ sequence_output = outputs[0]
1057
+
1058
+ logits = self.qa_outputs(sequence_output)
1059
+ start_logits, end_logits = logits.split(1, dim=-1)
1060
+ start_logits = start_logits.squeeze(-1).contiguous()
1061
+ end_logits = end_logits.squeeze(-1).contiguous()
1062
+
1063
+ total_loss = None
1064
+ if start_positions is not None and end_positions is not None:
1065
+ # If we are on multi-GPU, split add a dimension
1066
+ if len(start_positions.size()) > 1:
1067
+ start_positions = start_positions.squeeze(-1)
1068
+ if len(end_positions.size()) > 1:
1069
+ end_positions = end_positions.squeeze(-1)
1070
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1071
+ ignored_index = start_logits.size(1)
1072
+ start_positions = start_positions.clamp(0, ignored_index)
1073
+ end_positions = end_positions.clamp(0, ignored_index)
1074
+
1075
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1076
+ start_loss = loss_fct(start_logits, start_positions)
1077
+ end_loss = loss_fct(end_logits, end_positions)
1078
+ total_loss = (start_loss + end_loss) / 2
1079
+
1080
+ if not return_dict:
1081
+ output = (start_logits, end_logits) + outputs[2:]
1082
+ return ((total_loss,) + output) if total_loss is not None else output
1083
+
1084
+ return QuestionAnsweringModelOutput(
1085
+ loss=total_loss,
1086
+ start_logits=start_logits,
1087
+ end_logits=end_logits,
1088
+ hidden_states=outputs.hidden_states,
1089
+ attentions=outputs.attentions,
1090
+ )
evalkit_cambrian/lib/python3.10/site-packages/transformers/models/squeezebert/tokenization_squeezebert.py ADDED
@@ -0,0 +1,531 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The SqueezeBert authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for SqueezeBERT."""
16
+
17
+ import collections
18
+ import os
19
+ import unicodedata
20
+ from typing import List, Optional, Tuple
21
+
22
+ from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
23
+ from ...utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
29
+
30
+ PRETRAINED_VOCAB_FILES_MAP = {
31
+ "vocab_file": {
32
+ "squeezebert/squeezebert-uncased": (
33
+ "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"
34
+ ),
35
+ "squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt",
36
+ "squeezebert/squeezebert-mnli-headless": (
37
+ "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"
38
+ ),
39
+ }
40
+ }
41
+
42
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
43
+ "squeezebert/squeezebert-uncased": 512,
44
+ "squeezebert/squeezebert-mnli": 512,
45
+ "squeezebert/squeezebert-mnli-headless": 512,
46
+ }
47
+
48
+
49
+ PRETRAINED_INIT_CONFIGURATION = {
50
+ "squeezebert/squeezebert-uncased": {"do_lower_case": True},
51
+ "squeezebert/squeezebert-mnli": {"do_lower_case": True},
52
+ "squeezebert/squeezebert-mnli-headless": {"do_lower_case": True},
53
+ }
54
+
55
+
56
+ # Copied from transformers.models.bert.tokenization_bert.load_vocab
57
+ def load_vocab(vocab_file):
58
+ """Loads a vocabulary file into a dictionary."""
59
+ vocab = collections.OrderedDict()
60
+ with open(vocab_file, "r", encoding="utf-8") as reader:
61
+ tokens = reader.readlines()
62
+ for index, token in enumerate(tokens):
63
+ token = token.rstrip("\n")
64
+ vocab[token] = index
65
+ return vocab
66
+
67
+
68
+ # Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize
69
+ def whitespace_tokenize(text):
70
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
71
+ text = text.strip()
72
+ if not text:
73
+ return []
74
+ tokens = text.split()
75
+ return tokens
76
+
77
+
78
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer with Bert->SqueezeBert,BERT->SqueezeBERT
79
+ class SqueezeBertTokenizer(PreTrainedTokenizer):
80
+ r"""
81
+ Construct a SqueezeBERT tokenizer. Based on WordPiece.
82
+
83
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
84
+ this superclass for more information regarding those methods.
85
+
86
+ Args:
87
+ vocab_file (`str`):
88
+ File containing the vocabulary.
89
+ do_lower_case (`bool`, *optional*, defaults to `True`):
90
+ Whether or not to lowercase the input when tokenizing.
91
+ do_basic_tokenize (`bool`, *optional*, defaults to `True`):
92
+ Whether or not to do basic tokenization before WordPiece.
93
+ never_split (`Iterable`, *optional*):
94
+ Collection of tokens which will never be split during tokenization. Only has an effect when
95
+ `do_basic_tokenize=True`
96
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
97
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
98
+ token instead.
99
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
100
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
101
+ sequence classification or for a text and a question for question answering. It is also used as the last
102
+ token of a sequence built with special tokens.
103
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
104
+ The token used for padding, for example when batching sequences of different lengths.
105
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
106
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
107
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
108
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
109
+ The token used for masking values. This is the token used when training this model with masked language
110
+ modeling. This is the token which the model will try to predict.
111
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
112
+ Whether or not to tokenize Chinese characters.
113
+
114
+ This should likely be deactivated for Japanese (see this
115
+ [issue](https://github.com/huggingface/transformers/issues/328)).
116
+ strip_accents (`bool`, *optional*):
117
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
118
+ value for `lowercase` (as in the original SqueezeBERT).
119
+ """
120
+
121
+ vocab_files_names = VOCAB_FILES_NAMES
122
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
123
+ pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
124
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
125
+
126
+ def __init__(
127
+ self,
128
+ vocab_file,
129
+ do_lower_case=True,
130
+ do_basic_tokenize=True,
131
+ never_split=None,
132
+ unk_token="[UNK]",
133
+ sep_token="[SEP]",
134
+ pad_token="[PAD]",
135
+ cls_token="[CLS]",
136
+ mask_token="[MASK]",
137
+ tokenize_chinese_chars=True,
138
+ strip_accents=None,
139
+ **kwargs,
140
+ ):
141
+ if not os.path.isfile(vocab_file):
142
+ raise ValueError(
143
+ f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
144
+ " model use `tokenizer = SqueezeBertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
145
+ )
146
+ self.vocab = load_vocab(vocab_file)
147
+ self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
148
+ self.do_basic_tokenize = do_basic_tokenize
149
+ if do_basic_tokenize:
150
+ self.basic_tokenizer = BasicTokenizer(
151
+ do_lower_case=do_lower_case,
152
+ never_split=never_split,
153
+ tokenize_chinese_chars=tokenize_chinese_chars,
154
+ strip_accents=strip_accents,
155
+ )
156
+
157
+ self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
158
+
159
+ super().__init__(
160
+ do_lower_case=do_lower_case,
161
+ do_basic_tokenize=do_basic_tokenize,
162
+ never_split=never_split,
163
+ unk_token=unk_token,
164
+ sep_token=sep_token,
165
+ pad_token=pad_token,
166
+ cls_token=cls_token,
167
+ mask_token=mask_token,
168
+ tokenize_chinese_chars=tokenize_chinese_chars,
169
+ strip_accents=strip_accents,
170
+ **kwargs,
171
+ )
172
+
173
+ @property
174
+ def do_lower_case(self):
175
+ return self.basic_tokenizer.do_lower_case
176
+
177
+ @property
178
+ def vocab_size(self):
179
+ return len(self.vocab)
180
+
181
+ def get_vocab(self):
182
+ return dict(self.vocab, **self.added_tokens_encoder)
183
+
184
+ def _tokenize(self, text, split_special_tokens=False):
185
+ split_tokens = []
186
+ if self.do_basic_tokenize:
187
+ for token in self.basic_tokenizer.tokenize(
188
+ text, never_split=self.all_special_tokens if not split_special_tokens else None
189
+ ):
190
+ # If the token is part of the never_split set
191
+ if token in self.basic_tokenizer.never_split:
192
+ split_tokens.append(token)
193
+ else:
194
+ split_tokens += self.wordpiece_tokenizer.tokenize(token)
195
+ else:
196
+ split_tokens = self.wordpiece_tokenizer.tokenize(text)
197
+ return split_tokens
198
+
199
+ def _convert_token_to_id(self, token):
200
+ """Converts a token (str) in an id using the vocab."""
201
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
202
+
203
+ def _convert_id_to_token(self, index):
204
+ """Converts an index (integer) in a token (str) using the vocab."""
205
+ return self.ids_to_tokens.get(index, self.unk_token)
206
+
207
+ def convert_tokens_to_string(self, tokens):
208
+ """Converts a sequence of tokens (string) in a single string."""
209
+ out_string = " ".join(tokens).replace(" ##", "").strip()
210
+ return out_string
211
+
212
+ def build_inputs_with_special_tokens(
213
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
214
+ ) -> List[int]:
215
+ """
216
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
217
+ adding special tokens. A SqueezeBERT sequence has the following format:
218
+
219
+ - single sequence: `[CLS] X [SEP]`
220
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
221
+
222
+ Args:
223
+ token_ids_0 (`List[int]`):
224
+ List of IDs to which the special tokens will be added.
225
+ token_ids_1 (`List[int]`, *optional*):
226
+ Optional second list of IDs for sequence pairs.
227
+
228
+ Returns:
229
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
230
+ """
231
+ if token_ids_1 is None:
232
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
233
+ cls = [self.cls_token_id]
234
+ sep = [self.sep_token_id]
235
+ return cls + token_ids_0 + sep + token_ids_1 + sep
236
+
237
+ def get_special_tokens_mask(
238
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
239
+ ) -> List[int]:
240
+ """
241
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
242
+ special tokens using the tokenizer `prepare_for_model` method.
243
+
244
+ Args:
245
+ token_ids_0 (`List[int]`):
246
+ List of IDs.
247
+ token_ids_1 (`List[int]`, *optional*):
248
+ Optional second list of IDs for sequence pairs.
249
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
250
+ Whether or not the token list is already formatted with special tokens for the model.
251
+
252
+ Returns:
253
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
254
+ """
255
+
256
+ if already_has_special_tokens:
257
+ return super().get_special_tokens_mask(
258
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
259
+ )
260
+
261
+ if token_ids_1 is not None:
262
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
263
+ return [1] + ([0] * len(token_ids_0)) + [1]
264
+
265
+ def create_token_type_ids_from_sequences(
266
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
267
+ ) -> List[int]:
268
+ """
269
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A SqueezeBERT sequence
270
+ pair mask has the following format:
271
+
272
+ ```
273
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
274
+ | first sequence | second sequence |
275
+ ```
276
+
277
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
278
+
279
+ Args:
280
+ token_ids_0 (`List[int]`):
281
+ List of IDs.
282
+ token_ids_1 (`List[int]`, *optional*):
283
+ Optional second list of IDs for sequence pairs.
284
+
285
+ Returns:
286
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
287
+ """
288
+ sep = [self.sep_token_id]
289
+ cls = [self.cls_token_id]
290
+ if token_ids_1 is None:
291
+ return len(cls + token_ids_0 + sep) * [0]
292
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
293
+
294
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
295
+ index = 0
296
+ if os.path.isdir(save_directory):
297
+ vocab_file = os.path.join(
298
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
299
+ )
300
+ else:
301
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
302
+ with open(vocab_file, "w", encoding="utf-8") as writer:
303
+ for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
304
+ if index != token_index:
305
+ logger.warning(
306
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
307
+ " Please check that the vocabulary is not corrupted!"
308
+ )
309
+ index = token_index
310
+ writer.write(token + "\n")
311
+ index += 1
312
+ return (vocab_file,)
313
+
314
+
315
+ # Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
316
+ class BasicTokenizer(object):
317
+ """
318
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
319
+
320
+ Args:
321
+ do_lower_case (`bool`, *optional*, defaults to `True`):
322
+ Whether or not to lowercase the input when tokenizing.
323
+ never_split (`Iterable`, *optional*):
324
+ Collection of tokens which will never be split during tokenization. Only has an effect when
325
+ `do_basic_tokenize=True`
326
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
327
+ Whether or not to tokenize Chinese characters.
328
+
329
+ This should likely be deactivated for Japanese (see this
330
+ [issue](https://github.com/huggingface/transformers/issues/328)).
331
+ strip_accents (`bool`, *optional*):
332
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
333
+ value for `lowercase` (as in the original BERT).
334
+ do_split_on_punc (`bool`, *optional*, defaults to `True`):
335
+ In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
336
+ the full context of the words, such as contractions.
337
+ """
338
+
339
+ def __init__(
340
+ self,
341
+ do_lower_case=True,
342
+ never_split=None,
343
+ tokenize_chinese_chars=True,
344
+ strip_accents=None,
345
+ do_split_on_punc=True,
346
+ ):
347
+ if never_split is None:
348
+ never_split = []
349
+ self.do_lower_case = do_lower_case
350
+ self.never_split = set(never_split)
351
+ self.tokenize_chinese_chars = tokenize_chinese_chars
352
+ self.strip_accents = strip_accents
353
+ self.do_split_on_punc = do_split_on_punc
354
+
355
+ def tokenize(self, text, never_split=None):
356
+ """
357
+ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
358
+
359
+ Args:
360
+ never_split (`List[str]`, *optional*)
361
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
362
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
363
+ """
364
+ # union() returns a new set by concatenating the two sets.
365
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
366
+ text = self._clean_text(text)
367
+
368
+ # This was added on November 1st, 2018 for the multilingual and Chinese
369
+ # models. This is also applied to the English models now, but it doesn't
370
+ # matter since the English models were not trained on any Chinese data
371
+ # and generally don't have any Chinese data in them (there are Chinese
372
+ # characters in the vocabulary because Wikipedia does have some Chinese
373
+ # words in the English Wikipedia.).
374
+ if self.tokenize_chinese_chars:
375
+ text = self._tokenize_chinese_chars(text)
376
+ # prevents treating the same character with different unicode codepoints as different characters
377
+ unicode_normalized_text = unicodedata.normalize("NFC", text)
378
+ orig_tokens = whitespace_tokenize(unicode_normalized_text)
379
+ split_tokens = []
380
+ for token in orig_tokens:
381
+ if token not in never_split:
382
+ if self.do_lower_case:
383
+ token = token.lower()
384
+ if self.strip_accents is not False:
385
+ token = self._run_strip_accents(token)
386
+ elif self.strip_accents:
387
+ token = self._run_strip_accents(token)
388
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
389
+
390
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
391
+ return output_tokens
392
+
393
+ def _run_strip_accents(self, text):
394
+ """Strips accents from a piece of text."""
395
+ text = unicodedata.normalize("NFD", text)
396
+ output = []
397
+ for char in text:
398
+ cat = unicodedata.category(char)
399
+ if cat == "Mn":
400
+ continue
401
+ output.append(char)
402
+ return "".join(output)
403
+
404
+ def _run_split_on_punc(self, text, never_split=None):
405
+ """Splits punctuation on a piece of text."""
406
+ if not self.do_split_on_punc or (never_split is not None and text in never_split):
407
+ return [text]
408
+ chars = list(text)
409
+ i = 0
410
+ start_new_word = True
411
+ output = []
412
+ while i < len(chars):
413
+ char = chars[i]
414
+ if _is_punctuation(char):
415
+ output.append([char])
416
+ start_new_word = True
417
+ else:
418
+ if start_new_word:
419
+ output.append([])
420
+ start_new_word = False
421
+ output[-1].append(char)
422
+ i += 1
423
+
424
+ return ["".join(x) for x in output]
425
+
426
+ def _tokenize_chinese_chars(self, text):
427
+ """Adds whitespace around any CJK character."""
428
+ output = []
429
+ for char in text:
430
+ cp = ord(char)
431
+ if self._is_chinese_char(cp):
432
+ output.append(" ")
433
+ output.append(char)
434
+ output.append(" ")
435
+ else:
436
+ output.append(char)
437
+ return "".join(output)
438
+
439
+ def _is_chinese_char(self, cp):
440
+ """Checks whether CP is the codepoint of a CJK character."""
441
+ # This defines a "chinese character" as anything in the CJK Unicode block:
442
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
443
+ #
444
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
445
+ # despite its name. The modern Korean Hangul alphabet is a different block,
446
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
447
+ # space-separated words, so they are not treated specially and handled
448
+ # like the all of the other languages.
449
+ if (
450
+ (cp >= 0x4E00 and cp <= 0x9FFF)
451
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
452
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
453
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
454
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
455
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
456
+ or (cp >= 0xF900 and cp <= 0xFAFF)
457
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
458
+ ): #
459
+ return True
460
+
461
+ return False
462
+
463
+ def _clean_text(self, text):
464
+ """Performs invalid character removal and whitespace cleanup on text."""
465
+ output = []
466
+ for char in text:
467
+ cp = ord(char)
468
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
469
+ continue
470
+ if _is_whitespace(char):
471
+ output.append(" ")
472
+ else:
473
+ output.append(char)
474
+ return "".join(output)
475
+
476
+
477
+ class WordpieceTokenizer(object):
478
+ """Runs WordPiece tokenization."""
479
+
480
+ def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
481
+ self.vocab = vocab
482
+ self.unk_token = unk_token
483
+ self.max_input_chars_per_word = max_input_chars_per_word
484
+
485
+ def tokenize(self, text):
486
+ """
487
+ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
488
+ tokenization using the given vocabulary.
489
+
490
+ For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
491
+
492
+ Args:
493
+ text: A single token or whitespace separated tokens. This should have
494
+ already been passed through *BasicTokenizer*.
495
+
496
+ Returns:
497
+ A list of wordpiece tokens.
498
+ """
499
+
500
+ output_tokens = []
501
+ for token in whitespace_tokenize(text):
502
+ chars = list(token)
503
+ if len(chars) > self.max_input_chars_per_word:
504
+ output_tokens.append(self.unk_token)
505
+ continue
506
+
507
+ is_bad = False
508
+ start = 0
509
+ sub_tokens = []
510
+ while start < len(chars):
511
+ end = len(chars)
512
+ cur_substr = None
513
+ while start < end:
514
+ substr = "".join(chars[start:end])
515
+ if start > 0:
516
+ substr = "##" + substr
517
+ if substr in self.vocab:
518
+ cur_substr = substr
519
+ break
520
+ end -= 1
521
+ if cur_substr is None:
522
+ is_bad = True
523
+ break
524
+ sub_tokens.append(cur_substr)
525
+ start = end
526
+
527
+ if is_bad:
528
+ output_tokens.append(self.unk_token)
529
+ else:
530
+ output_tokens.extend(sub_tokens)
531
+ return output_tokens
evalkit_cambrian/lib/python3.10/site-packages/transformers/models/squeezebert/tokenization_squeezebert_fast.py ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The SqueezeBert authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for SqueezeBERT."""
16
+
17
+ import json
18
+ from typing import List, Optional, Tuple
19
+
20
+ from tokenizers import normalizers
21
+
22
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
23
+ from ...utils import logging
24
+ from .tokenization_squeezebert import SqueezeBertTokenizer
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
30
+
31
+ PRETRAINED_VOCAB_FILES_MAP = {
32
+ "vocab_file": {
33
+ "squeezebert/squeezebert-uncased": (
34
+ "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"
35
+ ),
36
+ "squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt",
37
+ "squeezebert/squeezebert-mnli-headless": (
38
+ "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"
39
+ ),
40
+ },
41
+ "tokenizer_file": {
42
+ "squeezebert/squeezebert-uncased": (
43
+ "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"
44
+ ),
45
+ "squeezebert/squeezebert-mnli": (
46
+ "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"
47
+ ),
48
+ "squeezebert/squeezebert-mnli-headless": (
49
+ "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"
50
+ ),
51
+ },
52
+ }
53
+
54
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
55
+ "squeezebert/squeezebert-uncased": 512,
56
+ "squeezebert/squeezebert-mnli": 512,
57
+ "squeezebert/squeezebert-mnli-headless": 512,
58
+ }
59
+
60
+
61
+ PRETRAINED_INIT_CONFIGURATION = {
62
+ "squeezebert/squeezebert-uncased": {"do_lower_case": True},
63
+ "squeezebert/squeezebert-mnli": {"do_lower_case": True},
64
+ "squeezebert/squeezebert-mnli-headless": {"do_lower_case": True},
65
+ }
66
+
67
+
68
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast with Bert->SqueezeBert,BERT->SqueezeBERT
69
+ class SqueezeBertTokenizerFast(PreTrainedTokenizerFast):
70
+ r"""
71
+ Construct a "fast" SqueezeBERT tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.
72
+
73
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
74
+ refer to this superclass for more information regarding those methods.
75
+
76
+ Args:
77
+ vocab_file (`str`):
78
+ File containing the vocabulary.
79
+ do_lower_case (`bool`, *optional*, defaults to `True`):
80
+ Whether or not to lowercase the input when tokenizing.
81
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
82
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
83
+ token instead.
84
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
85
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
86
+ sequence classification or for a text and a question for question answering. It is also used as the last
87
+ token of a sequence built with special tokens.
88
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
89
+ The token used for padding, for example when batching sequences of different lengths.
90
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
91
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
92
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
93
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
94
+ The token used for masking values. This is the token used when training this model with masked language
95
+ modeling. This is the token which the model will try to predict.
96
+ clean_text (`bool`, *optional*, defaults to `True`):
97
+ Whether or not to clean the text before tokenization by removing any control characters and replacing all
98
+ whitespaces by the classic one.
99
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
100
+ Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
101
+ issue](https://github.com/huggingface/transformers/issues/328)).
102
+ strip_accents (`bool`, *optional*):
103
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
104
+ value for `lowercase` (as in the original SqueezeBERT).
105
+ wordpieces_prefix (`str`, *optional*, defaults to `"##"`):
106
+ The prefix for subwords.
107
+ """
108
+
109
+ vocab_files_names = VOCAB_FILES_NAMES
110
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
111
+ pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
112
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
113
+ slow_tokenizer_class = SqueezeBertTokenizer
114
+
115
+ def __init__(
116
+ self,
117
+ vocab_file=None,
118
+ tokenizer_file=None,
119
+ do_lower_case=True,
120
+ unk_token="[UNK]",
121
+ sep_token="[SEP]",
122
+ pad_token="[PAD]",
123
+ cls_token="[CLS]",
124
+ mask_token="[MASK]",
125
+ tokenize_chinese_chars=True,
126
+ strip_accents=None,
127
+ **kwargs,
128
+ ):
129
+ super().__init__(
130
+ vocab_file,
131
+ tokenizer_file=tokenizer_file,
132
+ do_lower_case=do_lower_case,
133
+ unk_token=unk_token,
134
+ sep_token=sep_token,
135
+ pad_token=pad_token,
136
+ cls_token=cls_token,
137
+ mask_token=mask_token,
138
+ tokenize_chinese_chars=tokenize_chinese_chars,
139
+ strip_accents=strip_accents,
140
+ **kwargs,
141
+ )
142
+
143
+ normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
144
+ if (
145
+ normalizer_state.get("lowercase", do_lower_case) != do_lower_case
146
+ or normalizer_state.get("strip_accents", strip_accents) != strip_accents
147
+ or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars
148
+ ):
149
+ normalizer_class = getattr(normalizers, normalizer_state.pop("type"))
150
+ normalizer_state["lowercase"] = do_lower_case
151
+ normalizer_state["strip_accents"] = strip_accents
152
+ normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars
153
+ self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state)
154
+
155
+ self.do_lower_case = do_lower_case
156
+
157
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
158
+ """
159
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
160
+ adding special tokens. A SqueezeBERT sequence has the following format:
161
+
162
+ - single sequence: `[CLS] X [SEP]`
163
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
164
+
165
+ Args:
166
+ token_ids_0 (`List[int]`):
167
+ List of IDs to which the special tokens will be added.
168
+ token_ids_1 (`List[int]`, *optional*):
169
+ Optional second list of IDs for sequence pairs.
170
+
171
+ Returns:
172
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
173
+ """
174
+ output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
175
+
176
+ if token_ids_1 is not None:
177
+ output += token_ids_1 + [self.sep_token_id]
178
+
179
+ return output
180
+
181
+ def create_token_type_ids_from_sequences(
182
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
183
+ ) -> List[int]:
184
+ """
185
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A SqueezeBERT sequence
186
+ pair mask has the following format:
187
+
188
+ ```
189
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
190
+ | first sequence | second sequence |
191
+ ```
192
+
193
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
194
+
195
+ Args:
196
+ token_ids_0 (`List[int]`):
197
+ List of IDs.
198
+ token_ids_1 (`List[int]`, *optional*):
199
+ Optional second list of IDs for sequence pairs.
200
+
201
+ Returns:
202
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
203
+ """
204
+ sep = [self.sep_token_id]
205
+ cls = [self.cls_token_id]
206
+ if token_ids_1 is None:
207
+ return len(cls + token_ids_0 + sep) * [0]
208
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
209
+
210
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
211
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
212
+ return tuple(files)
evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vilt/__init__.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
17
+
18
+
19
+ _import_structure = {"configuration_vilt": ["VILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViltConfig"]}
20
+
21
+ try:
22
+ if not is_vision_available():
23
+ raise OptionalDependencyNotAvailable()
24
+ except OptionalDependencyNotAvailable:
25
+ pass
26
+ else:
27
+ _import_structure["feature_extraction_vilt"] = ["ViltFeatureExtractor"]
28
+ _import_structure["image_processing_vilt"] = ["ViltImageProcessor"]
29
+ _import_structure["processing_vilt"] = ["ViltProcessor"]
30
+
31
+ try:
32
+ if not is_torch_available():
33
+ raise OptionalDependencyNotAvailable()
34
+ except OptionalDependencyNotAvailable:
35
+ pass
36
+ else:
37
+ _import_structure["modeling_vilt"] = [
38
+ "VILT_PRETRAINED_MODEL_ARCHIVE_LIST",
39
+ "ViltForImageAndTextRetrieval",
40
+ "ViltForImagesAndTextClassification",
41
+ "ViltForTokenClassification",
42
+ "ViltForMaskedLM",
43
+ "ViltForQuestionAnswering",
44
+ "ViltLayer",
45
+ "ViltModel",
46
+ "ViltPreTrainedModel",
47
+ ]
48
+
49
+
50
+ if TYPE_CHECKING:
51
+ from .configuration_vilt import VILT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViltConfig
52
+
53
+ try:
54
+ if not is_vision_available():
55
+ raise OptionalDependencyNotAvailable()
56
+ except OptionalDependencyNotAvailable:
57
+ pass
58
+ else:
59
+ from .feature_extraction_vilt import ViltFeatureExtractor
60
+ from .image_processing_vilt import ViltImageProcessor
61
+ from .processing_vilt import ViltProcessor
62
+
63
+ try:
64
+ if not is_torch_available():
65
+ raise OptionalDependencyNotAvailable()
66
+ except OptionalDependencyNotAvailable:
67
+ pass
68
+ else:
69
+ from .modeling_vilt import (
70
+ VILT_PRETRAINED_MODEL_ARCHIVE_LIST,
71
+ ViltForImageAndTextRetrieval,
72
+ ViltForImagesAndTextClassification,
73
+ ViltForMaskedLM,
74
+ ViltForQuestionAnswering,
75
+ ViltForTokenClassification,
76
+ ViltLayer,
77
+ ViltModel,
78
+ ViltPreTrainedModel,
79
+ )
80
+
81
+
82
+ else:
83
+ import sys
84
+
85
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vilt/__pycache__/configuration_vilt.cpython-310.pyc ADDED
Binary file (6.07 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vilt/__pycache__/convert_vilt_original_to_pytorch.cpython-310.pyc ADDED
Binary file (8.52 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vilt/__pycache__/feature_extraction_vilt.cpython-310.pyc ADDED
Binary file (991 Bytes). View file
 
evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vilt/__pycache__/processing_vilt.cpython-310.pyc ADDED
Binary file (4.98 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vilt/configuration_vilt.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ VilT model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+ VILT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
24
+ "dandelin/vilt-b32-mlm": "https://huggingface.co/dandelin/vilt-b32-mlm/blob/main/config.json"
25
+ }
26
+
27
+
28
+ class ViltConfig(PretrainedConfig):
29
+ r"""
30
+ This is the configuration class to store the configuration of a [`ViLTModel`]. It is used to instantiate an ViLT
31
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
32
+ defaults will yield a similar configuration to that of the ViLT
33
+ [dandelin/vilt-b32-mlm](https://huggingface.co/dandelin/vilt-b32-mlm) architecture.
34
+
35
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
36
+ documentation from [`PretrainedConfig`] for more information.
37
+
38
+ Args:
39
+ vocab_size (`int`, *optional*, defaults to 30522):
40
+ Vocabulary size of the text part of the model. Defines the number of different tokens that can be
41
+ represented by the `inputs_ids` passed when calling [`ViltModel`].
42
+ type_vocab_size (`int`, *optional*, defaults to 2):
43
+ The vocabulary size of the `token_type_ids` passed when calling [`ViltModel`]. This is used when encoding
44
+ text.
45
+ modality_type_vocab_size (`int`, *optional*, defaults to 2):
46
+ The vocabulary size of the modalities passed when calling [`ViltModel`]. This is used after concatening the
47
+ embeddings of the text and image modalities.
48
+ max_position_embeddings (`int`, *optional*, defaults to 40):
49
+ The maximum sequence length that this model might ever be used with.
50
+ hidden_size (`int`, *optional*, defaults to 768):
51
+ Dimensionality of the encoder layers and the pooler layer.
52
+ num_hidden_layers (`int`, *optional*, defaults to 12):
53
+ Number of hidden layers in the Transformer encoder.
54
+ num_attention_heads (`int`, *optional*, defaults to 12):
55
+ Number of attention heads for each attention layer in the Transformer encoder.
56
+ intermediate_size (`int`, *optional*, defaults to 3072):
57
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
58
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
59
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
60
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
61
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
62
+ The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.
63
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
64
+ The dropout ratio for the attention probabilities.
65
+ initializer_range (`float`, *optional*, defaults to 0.02):
66
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
67
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
68
+ The epsilon used by the layer normalization layers.
69
+ image_size (`int`, *optional*, defaults to 384):
70
+ The size (resolution) of each image.
71
+ patch_size (`int`, *optional*, defaults to 32):
72
+ The size (resolution) of each patch.
73
+ num_channels (`int`, *optional*, defaults to 3):
74
+ The number of input channels.
75
+ qkv_bias (`bool`, *optional*, defaults to `True`):
76
+ Whether to add a bias to the queries, keys and values.
77
+ max_image_length (`int`, *optional*, defaults to -1):
78
+ The maximum number of patches to take as input for the Transformer encoder. If set to a positive integer,
79
+ the encoder will sample `max_image_length` patches at maximum. If set to -1, will not be taken into
80
+ account.
81
+ num_images (`int`, *optional*, defaults to -1):
82
+ The number of images to use for natural language visual reasoning. If set to a positive integer, will be
83
+ used by [`ViltForImagesAndTextClassification`] for defining the classifier head.
84
+
85
+ Example:
86
+
87
+ ```python
88
+ >>> from transformers import ViLTModel, ViLTConfig
89
+
90
+ >>> # Initializing a ViLT dandelin/vilt-b32-mlm style configuration
91
+ >>> configuration = ViLTConfig()
92
+
93
+ >>> # Initializing a model from the dandelin/vilt-b32-mlm style configuration
94
+ >>> model = ViLTModel(configuration)
95
+
96
+ >>> # Accessing the model configuration
97
+ >>> configuration = model.config
98
+ ```"""
99
+
100
+ model_type = "vilt"
101
+
102
+ def __init__(
103
+ self,
104
+ vocab_size=30522,
105
+ type_vocab_size=2,
106
+ modality_type_vocab_size=2,
107
+ max_position_embeddings=40,
108
+ hidden_size=768,
109
+ num_hidden_layers=12,
110
+ num_attention_heads=12,
111
+ intermediate_size=3072,
112
+ hidden_act="gelu",
113
+ hidden_dropout_prob=0.0,
114
+ attention_probs_dropout_prob=0.0,
115
+ initializer_range=0.02,
116
+ layer_norm_eps=1e-12,
117
+ image_size=384,
118
+ patch_size=32,
119
+ num_channels=3,
120
+ qkv_bias=True,
121
+ max_image_length=-1,
122
+ tie_word_embeddings=False,
123
+ num_images=-1,
124
+ **kwargs,
125
+ ):
126
+ super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)
127
+
128
+ self.vocab_size = vocab_size
129
+ self.type_vocab_size = type_vocab_size
130
+ self.modality_type_vocab_size = modality_type_vocab_size
131
+ self.max_position_embeddings = max_position_embeddings
132
+
133
+ self.hidden_size = hidden_size
134
+ self.num_hidden_layers = num_hidden_layers
135
+ self.num_attention_heads = num_attention_heads
136
+ self.intermediate_size = intermediate_size
137
+ self.hidden_act = hidden_act
138
+ self.hidden_dropout_prob = hidden_dropout_prob
139
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
140
+ self.initializer_range = initializer_range
141
+ self.layer_norm_eps = layer_norm_eps
142
+
143
+ self.image_size = image_size
144
+ self.patch_size = patch_size
145
+ self.num_channels = num_channels
146
+ self.qkv_bias = qkv_bias
147
+ self.max_image_length = max_image_length
148
+ self.num_images = num_images
evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vilt/convert_vilt_original_to_pytorch.py ADDED
@@ -0,0 +1,300 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert ViLT checkpoints from the original Github repository."""
16
+
17
+
18
+ import argparse
19
+ import json
20
+ from pathlib import Path
21
+
22
+ import requests
23
+ import torch
24
+ from huggingface_hub import hf_hub_download
25
+ from PIL import Image
26
+
27
+ from transformers import (
28
+ BertTokenizer,
29
+ ViltConfig,
30
+ ViltForImageAndTextRetrieval,
31
+ ViltForImagesAndTextClassification,
32
+ ViltForMaskedLM,
33
+ ViltForQuestionAnswering,
34
+ ViltImageProcessor,
35
+ ViltProcessor,
36
+ )
37
+ from transformers.utils import logging
38
+
39
+
40
+ logging.set_verbosity_info()
41
+ logger = logging.get_logger(__name__)
42
+
43
+
44
+ # here we list all keys to be renamed (original name on the left, our name on the right)
45
+ def create_rename_keys(config, vqa_model=False, nlvr_model=False, irtr_model=False):
46
+ rename_keys = []
47
+ for i in range(config.num_hidden_layers):
48
+ # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
49
+ rename_keys.append((f"transformer.blocks.{i}.norm1.weight", f"vilt.encoder.layer.{i}.layernorm_before.weight"))
50
+ rename_keys.append((f"transformer.blocks.{i}.norm1.bias", f"vilt.encoder.layer.{i}.layernorm_before.bias"))
51
+ rename_keys.append(
52
+ (f"transformer.blocks.{i}.attn.proj.weight", f"vilt.encoder.layer.{i}.attention.output.dense.weight")
53
+ )
54
+ rename_keys.append(
55
+ (f"transformer.blocks.{i}.attn.proj.bias", f"vilt.encoder.layer.{i}.attention.output.dense.bias")
56
+ )
57
+ rename_keys.append((f"transformer.blocks.{i}.norm2.weight", f"vilt.encoder.layer.{i}.layernorm_after.weight"))
58
+ rename_keys.append((f"transformer.blocks.{i}.norm2.bias", f"vilt.encoder.layer.{i}.layernorm_after.bias"))
59
+ rename_keys.append(
60
+ (f"transformer.blocks.{i}.mlp.fc1.weight", f"vilt.encoder.layer.{i}.intermediate.dense.weight")
61
+ )
62
+ rename_keys.append((f"transformer.blocks.{i}.mlp.fc1.bias", f"vilt.encoder.layer.{i}.intermediate.dense.bias"))
63
+ rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.weight", f"vilt.encoder.layer.{i}.output.dense.weight"))
64
+ rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.bias", f"vilt.encoder.layer.{i}.output.dense.bias"))
65
+
66
+ # embeddings
67
+ rename_keys.extend(
68
+ [
69
+ # text embeddings
70
+ ("text_embeddings.word_embeddings.weight", "vilt.embeddings.text_embeddings.word_embeddings.weight"),
71
+ (
72
+ "text_embeddings.position_embeddings.weight",
73
+ "vilt.embeddings.text_embeddings.position_embeddings.weight",
74
+ ),
75
+ ("text_embeddings.position_ids", "vilt.embeddings.text_embeddings.position_ids"),
76
+ (
77
+ "text_embeddings.token_type_embeddings.weight",
78
+ "vilt.embeddings.text_embeddings.token_type_embeddings.weight",
79
+ ),
80
+ ("text_embeddings.LayerNorm.weight", "vilt.embeddings.text_embeddings.LayerNorm.weight"),
81
+ ("text_embeddings.LayerNorm.bias", "vilt.embeddings.text_embeddings.LayerNorm.bias"),
82
+ # patch embeddings
83
+ ("transformer.cls_token", "vilt.embeddings.cls_token"),
84
+ ("transformer.patch_embed.proj.weight", "vilt.embeddings.patch_embeddings.projection.weight"),
85
+ ("transformer.patch_embed.proj.bias", "vilt.embeddings.patch_embeddings.projection.bias"),
86
+ ("transformer.pos_embed", "vilt.embeddings.position_embeddings"),
87
+ # token type embeddings
88
+ ("token_type_embeddings.weight", "vilt.embeddings.token_type_embeddings.weight"),
89
+ ]
90
+ )
91
+
92
+ # final layernorm + pooler
93
+ rename_keys.extend(
94
+ [
95
+ ("transformer.norm.weight", "vilt.layernorm.weight"),
96
+ ("transformer.norm.bias", "vilt.layernorm.bias"),
97
+ ("pooler.dense.weight", "vilt.pooler.dense.weight"),
98
+ ("pooler.dense.bias", "vilt.pooler.dense.bias"),
99
+ ]
100
+ )
101
+
102
+ # classifier head(s)
103
+ if vqa_model:
104
+ # classification head
105
+ rename_keys.extend(
106
+ [
107
+ ("vqa_classifier.0.weight", "classifier.0.weight"),
108
+ ("vqa_classifier.0.bias", "classifier.0.bias"),
109
+ ("vqa_classifier.1.weight", "classifier.1.weight"),
110
+ ("vqa_classifier.1.bias", "classifier.1.bias"),
111
+ ("vqa_classifier.3.weight", "classifier.3.weight"),
112
+ ("vqa_classifier.3.bias", "classifier.3.bias"),
113
+ ]
114
+ )
115
+ elif nlvr_model:
116
+ # classification head
117
+ rename_keys.extend(
118
+ [
119
+ ("nlvr2_classifier.0.weight", "classifier.0.weight"),
120
+ ("nlvr2_classifier.0.bias", "classifier.0.bias"),
121
+ ("nlvr2_classifier.1.weight", "classifier.1.weight"),
122
+ ("nlvr2_classifier.1.bias", "classifier.1.bias"),
123
+ ("nlvr2_classifier.3.weight", "classifier.3.weight"),
124
+ ("nlvr2_classifier.3.bias", "classifier.3.bias"),
125
+ ]
126
+ )
127
+ else:
128
+ pass
129
+
130
+ return rename_keys
131
+
132
+
133
+ # we split up the matrix of each encoder layer into queries, keys and values
134
+ def read_in_q_k_v(state_dict, config):
135
+ for i in range(config.num_hidden_layers):
136
+ prefix = "vilt."
137
+ # read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
138
+ in_proj_weight = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.weight")
139
+ in_proj_bias = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.bias")
140
+ # next, add query, keys and values (in that order) to the state dict
141
+ state_dict[f"{prefix}encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[
142
+ : config.hidden_size, :
143
+ ]
144
+ state_dict[f"{prefix}encoder.layer.{i}.attention.attention.query.bias"] = in_proj_bias[: config.hidden_size]
145
+ state_dict[f"{prefix}encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[
146
+ config.hidden_size : config.hidden_size * 2, :
147
+ ]
148
+ state_dict[f"{prefix}encoder.layer.{i}.attention.attention.key.bias"] = in_proj_bias[
149
+ config.hidden_size : config.hidden_size * 2
150
+ ]
151
+ state_dict[f"{prefix}encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[
152
+ -config.hidden_size :, :
153
+ ]
154
+ state_dict[f"{prefix}encoder.layer.{i}.attention.attention.value.bias"] = in_proj_bias[-config.hidden_size :]
155
+
156
+
157
+ def remove_classification_head_(state_dict):
158
+ ignore_keys = ["head.weight", "head.bias"]
159
+ for k in ignore_keys:
160
+ state_dict.pop(k, None)
161
+
162
+
163
+ def rename_key(dct, old, new):
164
+ val = dct.pop(old)
165
+ dct[new] = val
166
+
167
+
168
+ @torch.no_grad()
169
+ def convert_vilt_checkpoint(checkpoint_url, pytorch_dump_folder_path):
170
+ """
171
+ Copy/paste/tweak model's weights to our ViLT structure.
172
+ """
173
+
174
+ # define configuration and initialize HuggingFace model
175
+ config = ViltConfig(image_size=384, patch_size=32, tie_word_embeddings=False)
176
+ mlm_model = False
177
+ vqa_model = False
178
+ nlvr_model = False
179
+ irtr_model = False
180
+ if "vqa" in checkpoint_url:
181
+ vqa_model = True
182
+ config.num_labels = 3129
183
+ repo_id = "huggingface/label-files"
184
+ filename = "vqa2-id2label.json"
185
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
186
+ id2label = {int(k): v for k, v in id2label.items()}
187
+ config.id2label = id2label
188
+ config.label2id = {v: k for k, v in id2label.items()}
189
+ model = ViltForQuestionAnswering(config)
190
+ elif "nlvr" in checkpoint_url:
191
+ nlvr_model = True
192
+ config.num_labels = 2
193
+ config.id2label = {0: "False", 1: "True"}
194
+ config.label2id = {v: k for k, v in config.id2label.items()}
195
+ config.modality_type_vocab_size = 3
196
+ model = ViltForImagesAndTextClassification(config)
197
+ elif "irtr" in checkpoint_url:
198
+ irtr_model = True
199
+ model = ViltForImageAndTextRetrieval(config)
200
+ elif "mlm_itm" in checkpoint_url:
201
+ mlm_model = True
202
+ model = ViltForMaskedLM(config)
203
+ else:
204
+ raise ValueError("Unknown model type")
205
+
206
+ # load state_dict of original model, remove and rename some keys
207
+ state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")["state_dict"]
208
+ rename_keys = create_rename_keys(config, vqa_model, nlvr_model, irtr_model)
209
+ for src, dest in rename_keys:
210
+ rename_key(state_dict, src, dest)
211
+ read_in_q_k_v(state_dict, config)
212
+ if mlm_model or irtr_model:
213
+ ignore_keys = ["itm_score.fc.weight", "itm_score.fc.bias"]
214
+ for k in ignore_keys:
215
+ state_dict.pop(k, None)
216
+
217
+ # load state dict into HuggingFace model
218
+ model.eval()
219
+ if mlm_model:
220
+ missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False)
221
+ assert missing_keys == ["mlm_score.decoder.bias"]
222
+ else:
223
+ model.load_state_dict(state_dict)
224
+
225
+ # Define processor
226
+ image_processor = ViltImageProcessor(size=384)
227
+ tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
228
+ processor = ViltProcessor(image_processor, tokenizer)
229
+
230
+ # Forward pass on example inputs (image + text)
231
+ if nlvr_model:
232
+ image1 = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg", stream=True).raw)
233
+ image2 = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg", stream=True).raw)
234
+ text = (
235
+ "The left image contains twice the number of dogs as the right image, and at least two dogs in total are"
236
+ " standing."
237
+ )
238
+ encoding_1 = processor(image1, text, return_tensors="pt")
239
+ encoding_2 = processor(image2, text, return_tensors="pt")
240
+ outputs = model(
241
+ input_ids=encoding_1.input_ids,
242
+ pixel_values=encoding_1.pixel_values,
243
+ pixel_values_2=encoding_2.pixel_values,
244
+ )
245
+ else:
246
+ image = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw)
247
+ if mlm_model:
248
+ text = "a bunch of [MASK] laying on a [MASK]."
249
+ else:
250
+ text = "How many cats are there?"
251
+ encoding = processor(image, text, return_tensors="pt")
252
+ outputs = model(**encoding)
253
+
254
+ # Verify outputs
255
+ if mlm_model:
256
+ expected_shape = torch.Size([1, 11, 30522])
257
+ expected_slice = torch.tensor([-12.5061, -12.5123, -12.5174])
258
+ assert outputs.logits.shape == expected_shape
259
+ assert torch.allclose(outputs.logits[0, 0, :3], expected_slice, atol=1e-4)
260
+
261
+ # verify masked token prediction equals "cats"
262
+ predicted_id = outputs.logits[0, 4, :].argmax(-1).item()
263
+ assert tokenizer.decode([predicted_id]) == "cats"
264
+ elif vqa_model:
265
+ expected_shape = torch.Size([1, 3129])
266
+ expected_slice = torch.tensor([-15.9495, -18.1472, -10.3041])
267
+ assert torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4)
268
+ assert outputs.logits.shape == expected_shape
269
+ assert torch.allclose(outputs.logits[0, 0, :3], expected_slice, atol=1e-4)
270
+
271
+ # verify vqa prediction equals "2"
272
+ predicted_idx = outputs.logits.argmax(-1).item()
273
+ assert model.config.id2label[predicted_idx] == "2"
274
+ elif nlvr_model:
275
+ expected_shape = torch.Size([1, 2])
276
+ expected_slice = torch.tensor([-2.8721, 2.1291])
277
+ assert torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4)
278
+ assert outputs.logits.shape == expected_shape
279
+
280
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
281
+ print(f"Saving model and processor to {pytorch_dump_folder_path}")
282
+ model.save_pretrained(pytorch_dump_folder_path)
283
+ processor.save_pretrained(pytorch_dump_folder_path)
284
+
285
+
286
+ if __name__ == "__main__":
287
+ parser = argparse.ArgumentParser()
288
+ # Required parameters
289
+ parser.add_argument(
290
+ "--checkpoint_url",
291
+ default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
292
+ type=str,
293
+ help="URL of the checkpoint you'd like to convert.",
294
+ )
295
+ parser.add_argument(
296
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
297
+ )
298
+
299
+ args = parser.parse_args()
300
+ convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vilt/feature_extraction_vilt.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Feature extractor class for ViLT."""
16
+
17
+ import warnings
18
+
19
+ from ...utils import logging
20
+ from .image_processing_vilt import ViltImageProcessor
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ class ViltFeatureExtractor(ViltImageProcessor):
27
+ def __init__(self, *args, **kwargs) -> None:
28
+ warnings.warn(
29
+ "The class ViltFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
30
+ " use ViltImageProcessor instead.",
31
+ FutureWarning,
32
+ )
33
+ super().__init__(*args, **kwargs)
evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vilt/image_processing_vilt.py ADDED
@@ -0,0 +1,483 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for Vilt."""
16
+
17
+ from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
18
+
19
+ import numpy as np
20
+
21
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
22
+ from ...image_transforms import PaddingMode, pad, resize, to_channel_dimension_format
23
+ from ...image_utils import (
24
+ IMAGENET_STANDARD_MEAN,
25
+ IMAGENET_STANDARD_STD,
26
+ ChannelDimension,
27
+ ImageInput,
28
+ PILImageResampling,
29
+ get_image_size,
30
+ infer_channel_dimension_format,
31
+ is_scaled_image,
32
+ make_list_of_images,
33
+ to_numpy_array,
34
+ valid_images,
35
+ )
36
+ from ...utils import TensorType, is_vision_available, logging
37
+
38
+
39
+ if is_vision_available():
40
+ import PIL
41
+
42
+
43
+ logger = logging.get_logger(__name__)
44
+
45
+
46
+ def max_across_indices(values: Iterable[Any]) -> List[Any]:
47
+ """
48
+ Return the maximum value across all indices of an iterable of values.
49
+ """
50
+ return [max(values_i) for values_i in zip(*values)]
51
+
52
+
53
+ def make_pixel_mask(
54
+ image: np.ndarray, output_size: Tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]] = None
55
+ ) -> np.ndarray:
56
+ """
57
+ Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding.
58
+
59
+ Args:
60
+ image (`np.ndarray`):
61
+ Image to make the pixel mask for.
62
+ output_size (`Tuple[int, int]`):
63
+ Output size of the mask.
64
+ """
65
+ input_height, input_width = get_image_size(image, channel_dim=input_data_format)
66
+ mask = np.zeros(output_size, dtype=np.int64)
67
+ mask[:input_height, :input_width] = 1
68
+ return mask
69
+
70
+
71
+ def get_max_height_width(
72
+ images: List[np.ndarray], input_data_format: Optional[Union[str, ChannelDimension]] = None
73
+ ) -> List[int]:
74
+ """
75
+ Get the maximum height and width across all images in a batch.
76
+ """
77
+ if input_data_format is None:
78
+ input_data_format = infer_channel_dimension_format(images[0])
79
+
80
+ if input_data_format == ChannelDimension.FIRST:
81
+ _, max_height, max_width = max_across_indices([img.shape for img in images])
82
+ elif input_data_format == ChannelDimension.LAST:
83
+ max_height, max_width, _ = max_across_indices([img.shape for img in images])
84
+ else:
85
+ raise ValueError(f"Invalid channel dimension format: {input_data_format}")
86
+ return (max_height, max_width)
87
+
88
+
89
+ def get_resize_output_image_size(
90
+ input_image: np.ndarray,
91
+ shorter: int = 800,
92
+ longer: int = 1333,
93
+ size_divisor: int = 32,
94
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
95
+ ) -> Tuple[int, int]:
96
+ input_height, input_width = get_image_size(input_image, input_data_format)
97
+ min_size, max_size = shorter, longer
98
+
99
+ scale = min_size / min(input_height, input_width)
100
+
101
+ if input_height < input_width:
102
+ new_height = min_size
103
+ new_width = scale * input_width
104
+ else:
105
+ new_height = scale * input_height
106
+ new_width = min_size
107
+
108
+ if max(new_height, new_width) > max_size:
109
+ scale = max_size / max(new_height, new_width)
110
+ new_height = scale * new_height
111
+ new_width = scale * new_width
112
+
113
+ new_height, new_width = int(new_height + 0.5), int(new_width + 0.5)
114
+ new_height = new_height // size_divisor * size_divisor
115
+ new_width = new_width // size_divisor * size_divisor
116
+
117
+ return new_height, new_width
118
+
119
+
120
+ class ViltImageProcessor(BaseImageProcessor):
121
+ r"""
122
+ Constructs a ViLT image processor.
123
+
124
+ Args:
125
+ do_resize (`bool`, *optional*, defaults to `True`):
126
+ Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
127
+ `do_resize` parameter in the `preprocess` method.
128
+ size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 384}`):
129
+ Resize the shorter side of the input to `size["shortest_edge"]`. The longer side will be limited to under
130
+ `int((1333 / 800) * size["shortest_edge"])` while preserving the aspect ratio. Only has an effect if
131
+ `do_resize` is set to `True`. Can be overridden by the `size` parameter in the `preprocess` method.
132
+ size_divisor (`int`, *optional*, defaults to 32):
133
+ The size by which to make sure both the height and width can be divided. Only has an effect if `do_resize`
134
+ is set to `True`. Can be overridden by the `size_divisor` parameter in the `preprocess` method.
135
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
136
+ Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. Can be
137
+ overridden by the `resample` parameter in the `preprocess` method.
138
+ do_rescale (`bool`, *optional*, defaults to `True`):
139
+ Wwhether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the
140
+ `do_rescale` parameter in the `preprocess` method.
141
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
142
+ Scale factor to use if rescaling the image. Only has an effect if `do_rescale` is set to `True`. Can be
143
+ overridden by the `rescale_factor` parameter in the `preprocess` method.
144
+ do_normalize (`bool`, *optional*, defaults to `True`):
145
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
146
+ method. Can be overridden by the `do_normalize` parameter in the `preprocess` method.
147
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
148
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
149
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
150
+ overridden by the `image_mean` parameter in the `preprocess` method.
151
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
152
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
153
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
154
+ Can be overridden by the `image_std` parameter in the `preprocess` method.
155
+ do_pad (`bool`, *optional*, defaults to `True`):
156
+ Whether to pad the image to the `(max_height, max_width)` of the images in the batch. Can be overridden by
157
+ the `do_pad` parameter in the `preprocess` method.
158
+ """
159
+
160
+ model_input_names = ["pixel_values"]
161
+
162
+ def __init__(
163
+ self,
164
+ do_resize: bool = True,
165
+ size: Dict[str, int] = None,
166
+ size_divisor: int = 32,
167
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
168
+ do_rescale: bool = True,
169
+ rescale_factor: Union[int, float] = 1 / 255,
170
+ do_normalize: bool = True,
171
+ image_mean: Optional[Union[float, List[float]]] = None,
172
+ image_std: Optional[Union[float, List[float]]] = None,
173
+ do_pad: bool = True,
174
+ **kwargs,
175
+ ) -> None:
176
+ if "pad_and_return_pixel_mask" in kwargs:
177
+ do_pad = kwargs.pop("pad_and_return_pixel_mask")
178
+
179
+ super().__init__(**kwargs)
180
+ size = size if size is not None else {"shortest_edge": 384}
181
+ size = get_size_dict(size, default_to_square=False)
182
+
183
+ self.do_resize = do_resize
184
+ self.size = size
185
+ self.size_divisor = size_divisor
186
+ self.resample = resample
187
+ self.do_rescale = do_rescale
188
+ self.rescale_factor = rescale_factor
189
+ self.do_normalize = do_normalize
190
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
191
+ self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
192
+ self.do_pad = do_pad
193
+
194
+ @classmethod
195
+ def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs):
196
+ """
197
+ Overrides the `from_dict` method from the base class to make sure `reduce_labels` is updated if image processor
198
+ is created using from_dict and kwargs e.g. `ViltImageProcessor.from_pretrained(checkpoint,
199
+ pad_and_return_pixel_mask=False)`
200
+ """
201
+ image_processor_dict = image_processor_dict.copy()
202
+ if "pad_and_return_pixel_mask" in kwargs:
203
+ image_processor_dict["pad_and_return_pixel_mask"] = kwargs.pop("pad_and_return_pixel_mask")
204
+ return super().from_dict(image_processor_dict, **kwargs)
205
+
206
+ def resize(
207
+ self,
208
+ image: np.ndarray,
209
+ size: Dict[str, int],
210
+ size_divisor: int = 32,
211
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
212
+ data_format: Optional[Union[str, ChannelDimension]] = None,
213
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
214
+ **kwargs,
215
+ ) -> np.ndarray:
216
+ """
217
+ Resize an image.
218
+
219
+ Resizes the shorter side of the image to `size["shortest_edge"]` while preserving the aspect ratio. If the
220
+ longer side is larger than the max size `(int(`size["shortest_edge"]` * 1333 / 800))`, the longer side is then
221
+ resized to the max size while preserving the aspect ratio.
222
+
223
+ Args:
224
+ image (`np.ndarray`):
225
+ Image to resize.
226
+ size (`Dict[str, int]`):
227
+ Controls the size of the output image. Should be of the form `{"shortest_edge": int}`.
228
+ size_divisor (`int`, defaults to 32):
229
+ The image is resized to a size that is a multiple of this value.
230
+ resample (`PILImageResampling` filter, *optional*, defaults to `PILImageResampling.BICUBIC`):
231
+ Resampling filter to use when resiizing the image.
232
+ data_format (`str` or `ChannelDimension`, *optional*):
233
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
234
+ input_data_format (`str` or `ChannelDimension`, *optional*):
235
+ The channel dimension format of the input image. If not provided, it will be inferred.
236
+ """
237
+ size = get_size_dict(size, default_to_square=False)
238
+ if "shortest_edge" not in size:
239
+ raise ValueError(f"The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}")
240
+ shorter = size["shortest_edge"]
241
+ longer = int(1333 / 800 * shorter)
242
+ output_size = get_resize_output_image_size(
243
+ image, shorter=shorter, longer=longer, size_divisor=size_divisor, input_data_format=input_data_format
244
+ )
245
+ return resize(
246
+ image,
247
+ size=output_size,
248
+ resample=resample,
249
+ data_format=data_format,
250
+ input_data_format=input_data_format,
251
+ **kwargs,
252
+ )
253
+
254
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._pad_image
255
+ def _pad_image(
256
+ self,
257
+ image: np.ndarray,
258
+ output_size: Tuple[int, int],
259
+ constant_values: Union[float, Iterable[float]] = 0,
260
+ data_format: Optional[ChannelDimension] = None,
261
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
262
+ ) -> np.ndarray:
263
+ """
264
+ Pad an image with zeros to the given size.
265
+ """
266
+ input_height, input_width = get_image_size(image, channel_dim=input_data_format)
267
+ output_height, output_width = output_size
268
+
269
+ pad_bottom = output_height - input_height
270
+ pad_right = output_width - input_width
271
+ padding = ((0, pad_bottom), (0, pad_right))
272
+ padded_image = pad(
273
+ image,
274
+ padding,
275
+ mode=PaddingMode.CONSTANT,
276
+ constant_values=constant_values,
277
+ data_format=data_format,
278
+ input_data_format=input_data_format,
279
+ )
280
+ return padded_image
281
+
282
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.pad
283
+ def pad(
284
+ self,
285
+ images: List[np.ndarray],
286
+ constant_values: Union[float, Iterable[float]] = 0,
287
+ return_pixel_mask: bool = True,
288
+ return_tensors: Optional[Union[str, TensorType]] = None,
289
+ data_format: Optional[ChannelDimension] = None,
290
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
291
+ ) -> BatchFeature:
292
+ """
293
+ Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width
294
+ in the batch and optionally returns their corresponding pixel mask.
295
+
296
+ Args:
297
+ image (`np.ndarray`):
298
+ Image to pad.
299
+ constant_values (`float` or `Iterable[float]`, *optional*):
300
+ The value to use for the padding if `mode` is `"constant"`.
301
+ return_pixel_mask (`bool`, *optional*, defaults to `True`):
302
+ Whether to return a pixel mask.
303
+ return_tensors (`str` or `TensorType`, *optional*):
304
+ The type of tensors to return. Can be one of:
305
+ - Unset: Return a list of `np.ndarray`.
306
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
307
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
308
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
309
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
310
+ data_format (`str` or `ChannelDimension`, *optional*):
311
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
312
+ input_data_format (`ChannelDimension` or `str`, *optional*):
313
+ The channel dimension format of the input image. If not provided, it will be inferred.
314
+ """
315
+ pad_size = get_max_height_width(images, input_data_format=input_data_format)
316
+
317
+ padded_images = [
318
+ self._pad_image(
319
+ image,
320
+ pad_size,
321
+ constant_values=constant_values,
322
+ data_format=data_format,
323
+ input_data_format=input_data_format,
324
+ )
325
+ for image in images
326
+ ]
327
+ data = {"pixel_values": padded_images}
328
+
329
+ if return_pixel_mask:
330
+ masks = [
331
+ make_pixel_mask(image=image, output_size=pad_size, input_data_format=input_data_format)
332
+ for image in images
333
+ ]
334
+ data["pixel_mask"] = masks
335
+
336
+ return BatchFeature(data=data, tensor_type=return_tensors)
337
+
338
+ def preprocess(
339
+ self,
340
+ images: ImageInput,
341
+ do_resize: Optional[bool] = None,
342
+ size: Optional[Dict[str, int]] = None,
343
+ size_divisor: Optional[int] = None,
344
+ resample: PILImageResampling = None,
345
+ do_rescale: Optional[bool] = None,
346
+ rescale_factor: Optional[float] = None,
347
+ do_normalize: Optional[bool] = None,
348
+ image_mean: Optional[Union[float, List[float]]] = None,
349
+ image_std: Optional[Union[float, List[float]]] = None,
350
+ do_pad: Optional[bool] = None,
351
+ return_tensors: Optional[Union[str, TensorType]] = None,
352
+ data_format: ChannelDimension = ChannelDimension.FIRST,
353
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
354
+ **kwargs,
355
+ ) -> PIL.Image.Image:
356
+ """
357
+ Preprocess an image or batch of images.
358
+
359
+ Args:
360
+ images (`ImageInput`):
361
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
362
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
363
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
364
+ Whether to resize the image.
365
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
366
+ Controls the size of the image after `resize`. The shortest edge of the image is resized to
367
+ `size["shortest_edge"]` whilst preserving the aspect ratio. If the longest edge of this resized image
368
+ is > `int(size["shortest_edge"] * (1333 / 800))`, then the image is resized again to make the longest
369
+ edge equal to `int(size["shortest_edge"] * (1333 / 800))`.
370
+ size_divisor (`int`, *optional*, defaults to `self.size_divisor`):
371
+ The image is resized to a size that is a multiple of this value.
372
+ resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
373
+ Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`.
374
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
375
+ Whether to rescale the image values between [0 - 1].
376
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
377
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
378
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
379
+ Whether to normalize the image.
380
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
381
+ Image mean to normalize the image by if `do_normalize` is set to `True`.
382
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
383
+ Image standard deviation to normalize the image by if `do_normalize` is set to `True`.
384
+ do_pad (`bool`, *optional*, defaults to `self.do_pad`):
385
+ Whether to pad the image to the (max_height, max_width) in the batch. If `True`, a pixel mask is also
386
+ created and returned.
387
+ return_tensors (`str` or `TensorType`, *optional*):
388
+ The type of tensors to return. Can be one of:
389
+ - Unset: Return a list of `np.ndarray`.
390
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
391
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
392
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
393
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
394
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
395
+ The channel dimension format for the output image. Can be one of:
396
+ - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
397
+ - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
398
+ input_data_format (`ChannelDimension` or `str`, *optional*):
399
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
400
+ from the input image. Can be one of:
401
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
402
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
403
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
404
+ """
405
+ do_resize = do_resize if do_resize is not None else self.do_resize
406
+ size_divisor = size_divisor if size_divisor is not None else self.size_divisor
407
+ resample = resample if resample is not None else self.resample
408
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
409
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
410
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
411
+ image_mean = image_mean if image_mean is not None else self.image_mean
412
+ image_std = image_std if image_std is not None else self.image_std
413
+ do_pad = do_pad if do_pad is not None else self.do_pad
414
+
415
+ size = size if size is not None else self.size
416
+ size = get_size_dict(size, default_to_square=False)
417
+
418
+ images = make_list_of_images(images)
419
+
420
+ if not valid_images(images):
421
+ raise ValueError(
422
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
423
+ "torch.Tensor, tf.Tensor or jax.ndarray."
424
+ )
425
+
426
+ if do_resize and size is None or resample is None:
427
+ raise ValueError("Size and resample must be specified if do_resize is True.")
428
+
429
+ if do_rescale and rescale_factor is None:
430
+ raise ValueError("Rescale factor must be specified if do_rescale is True.")
431
+
432
+ if do_normalize and (image_mean is None or image_std is None):
433
+ raise ValueError("Image mean and std must be specified if do_normalize is True.")
434
+
435
+ # All transformations expect numpy arrays.
436
+ images = [to_numpy_array(image) for image in images]
437
+
438
+ if is_scaled_image(images[0]) and do_rescale:
439
+ logger.warning_once(
440
+ "It looks like you are trying to rescale already rescaled images. If the input"
441
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
442
+ )
443
+
444
+ if input_data_format is None:
445
+ # We assume that all images have the same channel dimension format.
446
+ input_data_format = infer_channel_dimension_format(images[0])
447
+
448
+ if do_resize:
449
+ images = [
450
+ self.resize(
451
+ image=image,
452
+ size=size,
453
+ size_divisor=size_divisor,
454
+ resample=resample,
455
+ input_data_format=input_data_format,
456
+ )
457
+ for image in images
458
+ ]
459
+
460
+ if do_rescale:
461
+ images = [
462
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
463
+ for image in images
464
+ ]
465
+
466
+ if do_normalize:
467
+ images = [
468
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
469
+ for image in images
470
+ ]
471
+
472
+ images = [
473
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
474
+ ]
475
+
476
+ if do_pad:
477
+ encoded_outputs = self.pad(
478
+ images, return_pixel_mask=True, return_tensors=return_tensors, input_data_format=data_format
479
+ )
480
+ else:
481
+ encoded_outputs = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors)
482
+
483
+ return encoded_outputs
evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vilt/modeling_vilt.py ADDED
@@ -0,0 +1,1489 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 NAVER AI Labs and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch ViLT model."""
16
+
17
+ import collections.abc
18
+ import math
19
+ from dataclasses import dataclass
20
+ from typing import List, Optional, Tuple, Union
21
+
22
+ import torch
23
+ import torch.utils.checkpoint
24
+ from torch import nn
25
+ from torch.nn import CrossEntropyLoss
26
+
27
+ from ...activations import ACT2FN
28
+ from ...modeling_outputs import (
29
+ BaseModelOutput,
30
+ BaseModelOutputWithPooling,
31
+ MaskedLMOutput,
32
+ ModelOutput,
33
+ SequenceClassifierOutput,
34
+ TokenClassifierOutput,
35
+ )
36
+ from ...modeling_utils import PreTrainedModel
37
+ from ...pytorch_utils import (
38
+ find_pruneable_heads_and_indices,
39
+ meshgrid,
40
+ prune_linear_layer,
41
+ )
42
+ from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
43
+ from .configuration_vilt import ViltConfig
44
+
45
+
46
+ logger = logging.get_logger(__name__)
47
+
48
+ _CONFIG_FOR_DOC = "ViltConfig"
49
+ _CHECKPOINT_FOR_DOC = "dandelin/vilt-b32-mlm"
50
+
51
+ VILT_PRETRAINED_MODEL_ARCHIVE_LIST = [
52
+ "dandelin/vilt-b32-mlm",
53
+ # See all ViLT models at https://huggingface.co/models?filter=vilt
54
+ ]
55
+
56
+
57
+ @dataclass
58
+ class ViltForImagesAndTextClassificationOutput(ModelOutput):
59
+ """
60
+ Class for outputs of [`ViltForImagesAndTextClassification`].
61
+
62
+ Args:
63
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
64
+ Classification (or regression if config.num_labels==1) loss.
65
+ logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
66
+ Classification (or regression if config.num_labels==1) scores (before SoftMax).
67
+ hidden_states (`List[tuple(torch.FloatTensor)]`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
68
+ List of tuples of `torch.FloatTensor` (one for each image-text pair, each tuple containing the output of
69
+ the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
70
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
71
+ attentions (`List[tuple(torch.FloatTensor)]`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
72
+ List of tuples of `torch.FloatTensor` (one for each image-text pair, each tuple containing the attention
73
+ weights of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the
74
+ attention softmax, used to compute the weighted average in the self-attention heads.
75
+ """
76
+
77
+ loss: Optional[torch.FloatTensor] = None
78
+ logits: torch.FloatTensor = None
79
+ hidden_states: Optional[List[Tuple[torch.FloatTensor]]] = None
80
+ attentions: Optional[List[Tuple[torch.FloatTensor]]] = None
81
+
82
+
83
+ class ViltEmbeddings(nn.Module):
84
+ """
85
+ Construct the text and patch embeddings.
86
+
87
+ Text embeddings are equivalent to BERT embeddings.
88
+
89
+ Patch embeddings are equivalent to ViT embeddings.
90
+ """
91
+
92
+ def __init__(self, config):
93
+ super().__init__()
94
+
95
+ # text embeddings
96
+ self.text_embeddings = TextEmbeddings(config)
97
+ # patch embeddings
98
+ self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
99
+ self.patch_embeddings = ViltPatchEmbeddings(config)
100
+ num_patches = self.patch_embeddings.num_patches
101
+ self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.hidden_size))
102
+ # modality type (text/patch) embeddings
103
+ self.token_type_embeddings = nn.Embedding(config.modality_type_vocab_size, config.hidden_size)
104
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
105
+ self.config = config
106
+
107
+ def visual_embed(self, pixel_values, pixel_mask, max_image_length=200):
108
+ _, _, ph, pw = self.patch_embeddings.projection.weight.shape
109
+
110
+ x = self.patch_embeddings(pixel_values)
111
+ x_mask = pixel_mask[:, None, :, :].float()
112
+ x_mask = nn.functional.interpolate(x_mask, size=(x.shape[2], x.shape[3])).long()
113
+ x_h = x_mask[:, 0].sum(dim=1)[:, 0]
114
+ x_w = x_mask[:, 0].sum(dim=2)[:, 0]
115
+
116
+ batch_size, num_channels, height, width = x.shape
117
+ patch_dim = self.config.image_size // self.config.patch_size
118
+ spatial_pos = self.position_embeddings[:, 1:, :].transpose(1, 2).view(1, num_channels, patch_dim, patch_dim)
119
+ pos_embed = torch.cat(
120
+ [
121
+ nn.functional.pad(
122
+ nn.functional.interpolate(
123
+ spatial_pos,
124
+ size=(h, w),
125
+ mode="bilinear",
126
+ align_corners=True,
127
+ ),
128
+ (0, width - w, 0, height - h),
129
+ )
130
+ for h, w in zip(x_h, x_w)
131
+ ],
132
+ dim=0,
133
+ )
134
+
135
+ pos_embed = pos_embed.flatten(2).transpose(1, 2)
136
+ x = x.flatten(2).transpose(1, 2)
137
+ # Set `device` here, otherwise `patch_index` will always be on `CPU` and will fail near the end for torch>=1.13
138
+ patch_index = torch.stack(
139
+ meshgrid(torch.arange(x_mask.shape[-2]), torch.arange(x_mask.shape[-1]), indexing="ij"), dim=-1
140
+ ).to(device=x_mask.device)
141
+ patch_index = patch_index[None, None, :, :, :]
142
+ patch_index = patch_index.expand(x_mask.shape[0], x_mask.shape[1], -1, -1, -1)
143
+ patch_index = patch_index.flatten(1, 3)
144
+ x_mask = x_mask.flatten(1)
145
+
146
+ if max_image_length < 0 or max_image_length is None or not isinstance(max_image_length, int):
147
+ # suppose aug is 800 x 1333, then, maximum effective res is 800 x 1333 (if one side gets bigger, the other will be constrained and be shrinked)
148
+ # (800 // self.patch_size) * (1333 // self.patch_size) is the maximum number of patches that single image can get.
149
+ # if self.patch_size = 32, 25 * 41 = 1025
150
+ # if res is 384 x 640, 12 * 20 = 240
151
+ effective_resolution = x_h * x_w
152
+ max_image_length = effective_resolution.max()
153
+ else:
154
+ effective_resolution = x_h * x_w
155
+ max_image_length = min(effective_resolution.max(), max_image_length)
156
+
157
+ valid_idx = x_mask.nonzero(as_tuple=False)
158
+ non_valid_idx = (1 - x_mask).nonzero(as_tuple=False)
159
+ unique_rows = valid_idx[:, 0].unique()
160
+ valid_row_idx = [valid_idx[valid_idx[:, 0] == u] for u in unique_rows]
161
+ non_valid_row_idx = [non_valid_idx[non_valid_idx[:, 0] == u] for u in unique_rows]
162
+
163
+ valid_nums = [v.size(0) for v in valid_row_idx]
164
+ non_valid_nums = [v.size(0) for v in non_valid_row_idx]
165
+ pad_nums = [max_image_length - v for v in valid_nums]
166
+
167
+ select = []
168
+ for i, (v, nv, p) in enumerate(zip(valid_nums, non_valid_nums, pad_nums)):
169
+ if p <= 0:
170
+ valid_choice = torch.multinomial(torch.ones(v).float(), max_image_length)
171
+ select.append(valid_row_idx[i][valid_choice])
172
+ else:
173
+ pad_choice = torch.multinomial(torch.ones(nv).float(), p, replacement=True)
174
+ select.append(torch.cat([valid_row_idx[i], non_valid_row_idx[i][pad_choice]], dim=0))
175
+
176
+ select = torch.cat(select, dim=0)
177
+ x = x[select[:, 0], select[:, 1]].view(batch_size, -1, num_channels)
178
+ x_mask = x_mask[select[:, 0], select[:, 1]].view(batch_size, -1)
179
+ # `patch_index` should be on the same device as `select` (for torch>=1.13), which is ensured at definition time.
180
+ patch_index = patch_index[select[:, 0], select[:, 1]].view(batch_size, -1, 2)
181
+ pos_embed = pos_embed[select[:, 0], select[:, 1]].view(batch_size, -1, num_channels)
182
+
183
+ cls_tokens = self.cls_token.expand(batch_size, -1, -1)
184
+ x = torch.cat((cls_tokens, x), dim=1)
185
+ pos_embed = torch.cat(
186
+ (self.position_embeddings[:, 0, :][:, None, :].expand(batch_size, -1, -1), pos_embed), dim=1
187
+ )
188
+ x = x + pos_embed
189
+ x = self.dropout(x)
190
+
191
+ x_mask = torch.cat([torch.ones(x_mask.shape[0], 1).to(x_mask), x_mask], dim=1)
192
+
193
+ return x, x_mask, (patch_index, (height, width))
194
+
195
+ def forward(
196
+ self,
197
+ input_ids,
198
+ attention_mask,
199
+ token_type_ids,
200
+ pixel_values,
201
+ pixel_mask,
202
+ inputs_embeds,
203
+ image_embeds,
204
+ image_token_type_idx=1,
205
+ ):
206
+ # PART 1: text embeddings
207
+ text_embeds = self.text_embeddings(
208
+ input_ids=input_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
209
+ )
210
+
211
+ # PART 2: patch embeddings (with interpolated position encodings)
212
+ if image_embeds is None:
213
+ image_embeds, image_masks, patch_index = self.visual_embed(
214
+ pixel_values, pixel_mask, max_image_length=self.config.max_image_length
215
+ )
216
+ else:
217
+ image_masks = pixel_mask.flatten(1)
218
+
219
+ # PART 3: add modality type embeddings
220
+ # 0 indicates text, 1 indicates image, 2 is optionally used when a second image is provided (NLVR2)
221
+ if image_token_type_idx is None:
222
+ image_token_type_idx = 1
223
+ text_embeds = text_embeds + self.token_type_embeddings(
224
+ torch.zeros_like(attention_mask, dtype=torch.long, device=text_embeds.device)
225
+ )
226
+ image_embeds = image_embeds + self.token_type_embeddings(
227
+ torch.full_like(image_masks, image_token_type_idx, dtype=torch.long, device=text_embeds.device)
228
+ )
229
+
230
+ # PART 4: concatenate
231
+ embeddings = torch.cat([text_embeds, image_embeds], dim=1)
232
+ masks = torch.cat([attention_mask, image_masks], dim=1)
233
+
234
+ return embeddings, masks
235
+
236
+
237
+ class TextEmbeddings(nn.Module):
238
+ """Construct the embeddings from word, position and token_type embeddings."""
239
+
240
+ def __init__(self, config):
241
+ super().__init__()
242
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
243
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
244
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
245
+
246
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
247
+ # any TensorFlow checkpoint file
248
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
249
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
250
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
251
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
252
+ self.register_buffer(
253
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
254
+ )
255
+ self.register_buffer(
256
+ "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
257
+ )
258
+
259
+ def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
260
+ if input_ids is not None:
261
+ input_shape = input_ids.size()
262
+ else:
263
+ input_shape = inputs_embeds.size()[:-1]
264
+
265
+ seq_length = input_shape[1]
266
+
267
+ if position_ids is None:
268
+ position_ids = self.position_ids[:, :seq_length]
269
+
270
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
271
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
272
+ # issue #5664
273
+ if token_type_ids is None:
274
+ if hasattr(self, "token_type_ids"):
275
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
276
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
277
+ token_type_ids = buffered_token_type_ids_expanded
278
+ else:
279
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
280
+
281
+ if inputs_embeds is None:
282
+ inputs_embeds = self.word_embeddings(input_ids)
283
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
284
+
285
+ embeddings = inputs_embeds + token_type_embeddings
286
+ if self.position_embedding_type == "absolute":
287
+ position_embeddings = self.position_embeddings(position_ids)
288
+ embeddings += position_embeddings
289
+ embeddings = self.LayerNorm(embeddings)
290
+ embeddings = self.dropout(embeddings)
291
+ return embeddings
292
+
293
+
294
+ class ViltPatchEmbeddings(nn.Module):
295
+ """
296
+ Image to Patch Embedding.
297
+ """
298
+
299
+ def __init__(self, config):
300
+ super().__init__()
301
+ image_size, patch_size = config.image_size, config.patch_size
302
+ num_channels, hidden_size = config.num_channels, config.hidden_size
303
+
304
+ image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
305
+ patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
306
+ num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
307
+ self.image_size = image_size
308
+ self.patch_size = patch_size
309
+ self.num_channels = num_channels
310
+ self.num_patches = num_patches
311
+
312
+ self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
313
+
314
+ def forward(self, pixel_values):
315
+ batch_size, num_channels, height, width = pixel_values.shape
316
+ if num_channels != self.num_channels:
317
+ raise ValueError(
318
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
319
+ )
320
+ x = self.projection(pixel_values)
321
+ return x
322
+
323
+
324
+ class ViltSelfAttention(nn.Module):
325
+ def __init__(self, config):
326
+ super().__init__()
327
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
328
+ raise ValueError(
329
+ f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
330
+ f"heads {config.num_attention_heads}."
331
+ )
332
+
333
+ self.num_attention_heads = config.num_attention_heads
334
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
335
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
336
+
337
+ self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
338
+ self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
339
+ self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
340
+
341
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
342
+
343
+ def transpose_for_scores(self, x):
344
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
345
+ x = x.view(*new_x_shape)
346
+ return x.permute(0, 2, 1, 3)
347
+
348
+ def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False):
349
+ mixed_query_layer = self.query(hidden_states)
350
+
351
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
352
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
353
+ query_layer = self.transpose_for_scores(mixed_query_layer)
354
+
355
+ # Take the dot product between "query" and "key" to get the raw attention scores.
356
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
357
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
358
+ if attention_mask is not None:
359
+ # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
360
+ attention_scores = attention_scores + attention_mask
361
+
362
+ # Normalize the attention scores to probabilities.
363
+ attention_probs = nn.Softmax(dim=-1)(attention_scores)
364
+
365
+ # This is actually dropping out entire tokens to attend to, which might
366
+ # seem a bit unusual, but is taken from the original Transformer paper.
367
+ attention_probs = self.dropout(attention_probs)
368
+
369
+ # Mask heads if we want to
370
+ if head_mask is not None:
371
+ attention_probs = attention_probs * head_mask
372
+
373
+ context_layer = torch.matmul(attention_probs, value_layer)
374
+
375
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
376
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
377
+ context_layer = context_layer.view(*new_context_layer_shape)
378
+
379
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
380
+
381
+ return outputs
382
+
383
+
384
+ # Copied from transformers.models.vit.modeling_vit.ViTSelfOutput with ViT->Vilt
385
+ class ViltSelfOutput(nn.Module):
386
+ """
387
+ The residual connection is defined in ViltLayer instead of here (as is the case with other models), due to the
388
+ layernorm applied before each block.
389
+ """
390
+
391
+ def __init__(self, config: ViltConfig) -> None:
392
+ super().__init__()
393
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
394
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
395
+
396
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
397
+ hidden_states = self.dense(hidden_states)
398
+ hidden_states = self.dropout(hidden_states)
399
+
400
+ return hidden_states
401
+
402
+
403
+ class ViltAttention(nn.Module):
404
+ def __init__(self, config):
405
+ super().__init__()
406
+ self.attention = ViltSelfAttention(config)
407
+ self.output = ViltSelfOutput(config)
408
+ self.pruned_heads = set()
409
+
410
+ def prune_heads(self, heads):
411
+ if len(heads) == 0:
412
+ return
413
+ heads, index = find_pruneable_heads_and_indices(
414
+ heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
415
+ )
416
+
417
+ # Prune linear layers
418
+ self.attention.query = prune_linear_layer(self.attention.query, index)
419
+ self.attention.key = prune_linear_layer(self.attention.key, index)
420
+ self.attention.value = prune_linear_layer(self.attention.value, index)
421
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
422
+
423
+ # Update hyper params and store pruned heads
424
+ self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
425
+ self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
426
+ self.pruned_heads = self.pruned_heads.union(heads)
427
+
428
+ def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False):
429
+ self_outputs = self.attention(hidden_states, attention_mask, head_mask, output_attentions)
430
+
431
+ attention_output = self.output(self_outputs[0], hidden_states)
432
+
433
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
434
+ return outputs
435
+
436
+
437
+ # Copied from transformers.models.vit.modeling_vit.ViTIntermediate with ViT->Vilt
438
+ class ViltIntermediate(nn.Module):
439
+ def __init__(self, config: ViltConfig) -> None:
440
+ super().__init__()
441
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
442
+ if isinstance(config.hidden_act, str):
443
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
444
+ else:
445
+ self.intermediate_act_fn = config.hidden_act
446
+
447
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
448
+ hidden_states = self.dense(hidden_states)
449
+ hidden_states = self.intermediate_act_fn(hidden_states)
450
+
451
+ return hidden_states
452
+
453
+
454
+ # Copied from transformers.models.vit.modeling_vit.ViTOutput with ViT->Vilt
455
+ class ViltOutput(nn.Module):
456
+ def __init__(self, config: ViltConfig) -> None:
457
+ super().__init__()
458
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
459
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
460
+
461
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
462
+ hidden_states = self.dense(hidden_states)
463
+ hidden_states = self.dropout(hidden_states)
464
+
465
+ hidden_states = hidden_states + input_tensor
466
+
467
+ return hidden_states
468
+
469
+
470
+ class ViltLayer(nn.Module):
471
+ """This corresponds to the Block class in the timm implementation."""
472
+
473
+ def __init__(self, config):
474
+ super().__init__()
475
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
476
+ self.seq_len_dim = 1
477
+ self.attention = ViltAttention(config)
478
+ self.intermediate = ViltIntermediate(config)
479
+ self.output = ViltOutput(config)
480
+ self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
481
+ self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
482
+
483
+ def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False):
484
+ self_attention_outputs = self.attention(
485
+ self.layernorm_before(hidden_states), # in ViLT, layernorm is applied before self-attention
486
+ attention_mask,
487
+ head_mask,
488
+ output_attentions=output_attentions,
489
+ )
490
+ attention_output = self_attention_outputs[0]
491
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
492
+
493
+ # first residual connection
494
+ hidden_states = attention_output + hidden_states.to(attention_output.device)
495
+
496
+ # in ViLT, layernorm is also applied after self-attention
497
+ layer_output = self.layernorm_after(hidden_states)
498
+ layer_output = self.intermediate(layer_output)
499
+
500
+ # second residual connection is done here
501
+ layer_output = self.output(layer_output, hidden_states)
502
+
503
+ outputs = (layer_output,) + outputs
504
+
505
+ return outputs
506
+
507
+
508
+ class ViltEncoder(nn.Module):
509
+ def __init__(self, config):
510
+ super().__init__()
511
+ self.config = config
512
+ self.layer = nn.ModuleList([ViltLayer(config) for _ in range(config.num_hidden_layers)])
513
+ self.gradient_checkpointing = False
514
+
515
+ def forward(
516
+ self,
517
+ hidden_states,
518
+ attention_mask=None,
519
+ head_mask=None,
520
+ output_attentions=False,
521
+ output_hidden_states=False,
522
+ return_dict=True,
523
+ ):
524
+ all_hidden_states = () if output_hidden_states else None
525
+ all_self_attentions = () if output_attentions else None
526
+
527
+ for i, layer_module in enumerate(self.layer):
528
+ if output_hidden_states:
529
+ all_hidden_states = all_hidden_states + (hidden_states,)
530
+
531
+ layer_head_mask = head_mask[i] if head_mask is not None else None
532
+
533
+ if self.gradient_checkpointing and self.training:
534
+ layer_outputs = self._gradient_checkpointing_func(
535
+ layer_module.__call__,
536
+ hidden_states,
537
+ attention_mask,
538
+ layer_head_mask,
539
+ output_attentions,
540
+ )
541
+ else:
542
+ layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, output_attentions)
543
+
544
+ hidden_states = layer_outputs[0]
545
+
546
+ if output_attentions:
547
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
548
+
549
+ if output_hidden_states:
550
+ all_hidden_states = all_hidden_states + (hidden_states,)
551
+
552
+ if not return_dict:
553
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
554
+ return BaseModelOutput(
555
+ last_hidden_state=hidden_states,
556
+ hidden_states=all_hidden_states,
557
+ attentions=all_self_attentions,
558
+ )
559
+
560
+
561
+ class ViltPreTrainedModel(PreTrainedModel):
562
+ """
563
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
564
+ models.
565
+ """
566
+
567
+ config_class = ViltConfig
568
+ base_model_prefix = "vilt"
569
+ supports_gradient_checkpointing = True
570
+ _no_split_modules = ["ViltEmbeddings", "ViltSelfAttention"]
571
+
572
+ def _init_weights(self, module):
573
+ """Initialize the weights"""
574
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
575
+ # Slightly different from the TF version which uses truncated_normal for initialization
576
+ # cf https://github.com/pytorch/pytorch/pull/5617
577
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
578
+ if module.bias is not None:
579
+ module.bias.data.zero_()
580
+ elif isinstance(module, nn.Embedding):
581
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
582
+ if module.padding_idx is not None:
583
+ module.weight.data[module.padding_idx].zero_()
584
+ elif isinstance(module, nn.LayerNorm):
585
+ module.bias.data.zero_()
586
+ module.weight.data.fill_(1.0)
587
+
588
+
589
+ VILT_START_DOCSTRING = r"""
590
+ This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ subclass. Use
591
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
592
+ behavior.
593
+
594
+ Parameters:
595
+ config ([`ViltConfig`]): Model configuration class with all the parameters of the model.
596
+ Initializing with a config file does not load the weights associated with the model, only the
597
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
598
+ """
599
+
600
+ VILT_INPUTS_DOCSTRING = r"""
601
+ Args:
602
+ input_ids (`torch.LongTensor` of shape `({0})`):
603
+ Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See
604
+ [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input
605
+ IDs?](../glossary#input-ids)
606
+
607
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
608
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
609
+ - 1 for tokens that are **not masked**,
610
+ - 0 for tokens that are **masked**.
611
+ [What are attention masks?](../glossary#attention-mask)
612
+
613
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
614
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
615
+ 1]`:
616
+ - 0 corresponds to a *sentence A* token,
617
+ - 1 corresponds to a *sentence B* token.
618
+ [What are token type IDs?](../glossary#token-type-ids)
619
+
620
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
621
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
622
+ [`ViltImageProcessor.__call__`] for details.
623
+
624
+ pixel_mask (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
625
+ Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`:
626
+
627
+ - 1 for pixels that are real (i.e. **not masked**),
628
+ - 0 for pixels that are padding (i.e. **masked**).
629
+ `What are attention masks? <../glossary.html#attention-mask>`__
630
+
631
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
632
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
633
+ - 1 indicates the head is **not masked**,
634
+ - 0 indicates the head is **masked**.
635
+
636
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
637
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
638
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
639
+ model's internal embedding lookup matrix.
640
+
641
+ image_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`, *optional*):
642
+ Optionally, instead of passing `pixel_values`, you can choose to directly pass an embedded representation.
643
+ This is useful if you want more control over how to convert `pixel_values` into patch embeddings.
644
+
645
+ output_attentions (`bool`, *optional*):
646
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
647
+ tensors for more detail.
648
+ output_hidden_states (`bool`, *optional*):
649
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
650
+ more detail.
651
+ return_dict (`bool`, *optional*):
652
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
653
+ """
654
+
655
+ VILT_IMAGES_AND_TEXT_CLASSIFICATION_INPUTS_DOCSTRING = r"""
656
+ Args:
657
+ input_ids (`torch.LongTensor` of shape `({0})`):
658
+ Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See
659
+ [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input
660
+ IDs?](../glossary#input-ids)
661
+
662
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
663
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
664
+ - 1 for tokens that are **not masked**,
665
+ - 0 for tokens that are **masked**.
666
+ [What are attention masks?](../glossary#attention-mask)
667
+
668
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
669
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
670
+ 1]`:
671
+ - 0 corresponds to a *sentence A* token,
672
+ - 1 corresponds to a *sentence B* token.
673
+ [What are token type IDs?](../glossary#token-type-ids)
674
+
675
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_images, num_channels, height, width)`):
676
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
677
+ [`ViltImageProcessor.__call__`] for details.
678
+
679
+ pixel_mask (`torch.LongTensor` of shape `(batch_size, num_images, height, width)`, *optional*):
680
+ Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`:
681
+
682
+ - 1 for pixels that are real (i.e. **not masked**),
683
+ - 0 for pixels that are padding (i.e. **masked**).
684
+ `What are attention masks? <../glossary.html#attention-mask>`__
685
+
686
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
687
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
688
+ - 1 indicates the head is **not masked**,
689
+ - 0 indicates the head is **masked**.
690
+
691
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
692
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
693
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
694
+ model's internal embedding lookup matrix.
695
+
696
+ image_embeds (`torch.FloatTensor` of shape `(batch_size, num_images, num_patches, hidden_size)`, *optional*):
697
+ Optionally, instead of passing `pixel_values`, you can choose to directly pass an embedded representation.
698
+ This is useful if you want more control over how to convert `pixel_values` into patch embeddings.
699
+
700
+ output_attentions (`bool`, *optional*):
701
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
702
+ tensors for more detail.
703
+ output_hidden_states (`bool`, *optional*):
704
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
705
+ more detail.
706
+ return_dict (`bool`, *optional*):
707
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
708
+ """
709
+
710
+
711
+ @add_start_docstrings(
712
+ "The bare ViLT Model transformer outputting raw hidden-states without any specific head on top.",
713
+ VILT_START_DOCSTRING,
714
+ )
715
+ class ViltModel(ViltPreTrainedModel):
716
+ def __init__(self, config, add_pooling_layer=True):
717
+ super().__init__(config)
718
+ self.config = config
719
+
720
+ self.embeddings = ViltEmbeddings(config)
721
+ self.encoder = ViltEncoder(config)
722
+
723
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
724
+ self.pooler = ViltPooler(config) if add_pooling_layer else None
725
+
726
+ # Initialize weights and apply final processing
727
+ self.post_init()
728
+
729
+ def get_input_embeddings(self):
730
+ return self.embeddings.text_embeddings.word_embeddings
731
+
732
+ def set_input_embeddings(self, value):
733
+ self.embeddings.text_embeddings.word_embeddings = value
734
+
735
+ def _prune_heads(self, heads_to_prune):
736
+ """
737
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
738
+ class PreTrainedModel
739
+ """
740
+ for layer, heads in heads_to_prune.items():
741
+ self.encoder.layer[layer].attention.prune_heads(heads)
742
+
743
+ @add_start_docstrings_to_model_forward(VILT_INPUTS_DOCSTRING)
744
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)
745
+ def forward(
746
+ self,
747
+ input_ids: Optional[torch.LongTensor] = None,
748
+ attention_mask: Optional[torch.FloatTensor] = None,
749
+ token_type_ids: Optional[torch.LongTensor] = None,
750
+ pixel_values: Optional[torch.FloatTensor] = None,
751
+ pixel_mask: Optional[torch.LongTensor] = None,
752
+ head_mask: Optional[torch.FloatTensor] = None,
753
+ inputs_embeds: Optional[torch.FloatTensor] = None,
754
+ image_embeds: Optional[torch.FloatTensor] = None,
755
+ image_token_type_idx: Optional[int] = None,
756
+ output_attentions: Optional[bool] = None,
757
+ output_hidden_states: Optional[bool] = None,
758
+ return_dict: Optional[bool] = None,
759
+ ) -> Union[BaseModelOutputWithPooling, Tuple[torch.FloatTensor]]:
760
+ r"""
761
+ Returns:
762
+
763
+ Examples:
764
+
765
+ ```python
766
+ >>> from transformers import ViltProcessor, ViltModel
767
+ >>> from PIL import Image
768
+ >>> import requests
769
+
770
+ >>> # prepare image and text
771
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
772
+ >>> image = Image.open(requests.get(url, stream=True).raw)
773
+ >>> text = "hello world"
774
+
775
+ >>> processor = ViltProcessor.from_pretrained("dandelin/vilt-b32-mlm")
776
+ >>> model = ViltModel.from_pretrained("dandelin/vilt-b32-mlm")
777
+
778
+ >>> inputs = processor(image, text, return_tensors="pt")
779
+ >>> outputs = model(**inputs)
780
+ >>> last_hidden_states = outputs.last_hidden_state
781
+ ```"""
782
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
783
+ output_hidden_states = (
784
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
785
+ )
786
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
787
+
788
+ if input_ids is not None and inputs_embeds is not None:
789
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
790
+ elif input_ids is not None:
791
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
792
+ input_shape = input_ids.size()
793
+ elif inputs_embeds is not None:
794
+ input_shape = inputs_embeds.size()[:-1]
795
+ else:
796
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
797
+
798
+ text_batch_size, seq_length = input_shape
799
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
800
+
801
+ if attention_mask is None:
802
+ attention_mask = torch.ones(((text_batch_size, seq_length)), device=device)
803
+
804
+ if pixel_values is not None and image_embeds is not None:
805
+ raise ValueError("You cannot specify both pixel_values and image_embeds at the same time")
806
+ elif pixel_values is None and image_embeds is None:
807
+ raise ValueError("You have to specify either pixel_values or image_embeds")
808
+
809
+ image_batch_size = pixel_values.shape[0] if pixel_values is not None else image_embeds.shape[0]
810
+ if image_batch_size != text_batch_size:
811
+ raise ValueError("The text inputs and image inputs need to have the same batch size")
812
+ if pixel_mask is None:
813
+ pixel_mask = torch.ones((image_batch_size, self.config.image_size, self.config.image_size), device=device)
814
+
815
+ # Prepare head mask if needed
816
+ # 1.0 in head_mask indicate we keep the head
817
+ # attention_probs has shape bsz x n_heads x N x N
818
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
819
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
820
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
821
+
822
+ embedding_output, attention_mask = self.embeddings(
823
+ input_ids,
824
+ attention_mask,
825
+ token_type_ids,
826
+ pixel_values,
827
+ pixel_mask,
828
+ inputs_embeds,
829
+ image_embeds,
830
+ image_token_type_idx=image_token_type_idx,
831
+ )
832
+
833
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
834
+ # ourselves in which case we just need to make it broadcastable to all heads.
835
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
836
+
837
+ encoder_outputs = self.encoder(
838
+ embedding_output,
839
+ attention_mask=extended_attention_mask,
840
+ head_mask=head_mask,
841
+ output_attentions=output_attentions,
842
+ output_hidden_states=output_hidden_states,
843
+ return_dict=return_dict,
844
+ )
845
+ sequence_output = encoder_outputs[0]
846
+ sequence_output = self.layernorm(sequence_output)
847
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
848
+
849
+ if not return_dict:
850
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
851
+
852
+ return BaseModelOutputWithPooling(
853
+ last_hidden_state=sequence_output,
854
+ pooler_output=pooled_output,
855
+ hidden_states=encoder_outputs.hidden_states,
856
+ attentions=encoder_outputs.attentions,
857
+ )
858
+
859
+
860
+ class ViltPooler(nn.Module):
861
+ def __init__(self, config):
862
+ super().__init__()
863
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
864
+ self.activation = nn.Tanh()
865
+
866
+ def forward(self, hidden_states):
867
+ # We "pool" the model by simply taking the hidden state corresponding
868
+ # to the first token.
869
+ first_token_tensor = hidden_states[:, 0]
870
+ pooled_output = self.dense(first_token_tensor)
871
+ pooled_output = self.activation(pooled_output)
872
+ return pooled_output
873
+
874
+
875
+ @add_start_docstrings(
876
+ """
877
+ ViLT Model with a language modeling head on top as done during pretraining.
878
+ """,
879
+ VILT_START_DOCSTRING,
880
+ )
881
+ class ViltForMaskedLM(ViltPreTrainedModel):
882
+ _tied_weights_keys = ["mlm_score.decoder.weight", "mlm_score.decoder.bias"]
883
+
884
+ def __init__(self, config):
885
+ super().__init__(config)
886
+
887
+ self.vilt = ViltModel(config)
888
+ self.mlm_score = ViltMLMHead(config)
889
+
890
+ # Initialize weights and apply final processing
891
+ self.post_init()
892
+
893
+ def get_output_embeddings(self):
894
+ return self.mlm_score.decoder
895
+
896
+ def set_output_embeddings(self, new_embeddings):
897
+ self.mlm_score.decoder = new_embeddings
898
+
899
+ @add_start_docstrings_to_model_forward(VILT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
900
+ @replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC)
901
+ def forward(
902
+ self,
903
+ input_ids: Optional[torch.LongTensor] = None,
904
+ attention_mask: Optional[torch.FloatTensor] = None,
905
+ token_type_ids: Optional[torch.LongTensor] = None,
906
+ pixel_values: Optional[torch.FloatTensor] = None,
907
+ pixel_mask: Optional[torch.LongTensor] = None,
908
+ head_mask: Optional[torch.FloatTensor] = None,
909
+ inputs_embeds: Optional[torch.FloatTensor] = None,
910
+ image_embeds: Optional[torch.FloatTensor] = None,
911
+ labels: Optional[torch.LongTensor] = None,
912
+ output_attentions: Optional[bool] = None,
913
+ output_hidden_states: Optional[bool] = None,
914
+ return_dict: Optional[bool] = None,
915
+ ) -> Union[MaskedLMOutput, Tuple[torch.FloatTensor]]:
916
+ r"""
917
+ labels (*torch.LongTensor* of shape *(batch_size, sequence_length)*, *optional*):
918
+ Labels for computing the masked language modeling loss. Indices should be in *[-100, 0, ...,
919
+ config.vocab_size]* (see *input_ids* docstring) Tokens with indices set to *-100* are ignored (masked), the
920
+ loss is only computed for the tokens with labels in *[0, ..., config.vocab_size]*
921
+
922
+ Returns:
923
+
924
+ Examples:
925
+
926
+ ```python
927
+ >>> from transformers import ViltProcessor, ViltForMaskedLM
928
+ >>> import requests
929
+ >>> from PIL import Image
930
+ >>> import re
931
+ >>> import torch
932
+
933
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
934
+ >>> image = Image.open(requests.get(url, stream=True).raw)
935
+ >>> text = "a bunch of [MASK] laying on a [MASK]."
936
+
937
+ >>> processor = ViltProcessor.from_pretrained("dandelin/vilt-b32-mlm")
938
+ >>> model = ViltForMaskedLM.from_pretrained("dandelin/vilt-b32-mlm")
939
+
940
+ >>> # prepare inputs
941
+ >>> encoding = processor(image, text, return_tensors="pt")
942
+
943
+ >>> # forward pass
944
+ >>> outputs = model(**encoding)
945
+
946
+ >>> tl = len(re.findall("\[MASK\]", text))
947
+ >>> inferred_token = [text]
948
+
949
+ >>> # gradually fill in the MASK tokens, one by one
950
+ >>> with torch.no_grad():
951
+ ... for i in range(tl):
952
+ ... encoded = processor.tokenizer(inferred_token)
953
+ ... input_ids = torch.tensor(encoded.input_ids)
954
+ ... encoded = encoded["input_ids"][0][1:-1]
955
+ ... outputs = model(input_ids=input_ids, pixel_values=encoding.pixel_values)
956
+ ... mlm_logits = outputs.logits[0] # shape (seq_len, vocab_size)
957
+ ... # only take into account text features (minus CLS and SEP token)
958
+ ... mlm_logits = mlm_logits[1 : input_ids.shape[1] - 1, :]
959
+ ... mlm_values, mlm_ids = mlm_logits.softmax(dim=-1).max(dim=-1)
960
+ ... # only take into account text
961
+ ... mlm_values[torch.tensor(encoded) != 103] = 0
962
+ ... select = mlm_values.argmax().item()
963
+ ... encoded[select] = mlm_ids[select].item()
964
+ ... inferred_token = [processor.decode(encoded)]
965
+
966
+ >>> selected_token = ""
967
+ >>> encoded = processor.tokenizer(inferred_token)
968
+ >>> output = processor.decode(encoded.input_ids[0], skip_special_tokens=True)
969
+ >>> print(output)
970
+ a bunch of cats laying on a couch.
971
+ ```"""
972
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
973
+
974
+ outputs = self.vilt(
975
+ input_ids,
976
+ attention_mask=attention_mask,
977
+ token_type_ids=token_type_ids,
978
+ pixel_values=pixel_values,
979
+ pixel_mask=pixel_mask,
980
+ head_mask=head_mask,
981
+ inputs_embeds=inputs_embeds,
982
+ image_embeds=image_embeds,
983
+ output_attentions=output_attentions,
984
+ output_hidden_states=output_hidden_states,
985
+ return_dict=return_dict,
986
+ )
987
+
988
+ sequence_output, pooled_output = outputs[:2]
989
+ # split up final hidden states into text and image features
990
+ text_seq_len = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
991
+ text_features, _ = (sequence_output[:, :text_seq_len], sequence_output[:, text_seq_len:])
992
+
993
+ mlm_logits = self.mlm_score(text_features)
994
+
995
+ masked_lm_loss = None
996
+ if labels is not None:
997
+ loss_fct = CrossEntropyLoss() # -100 index = padding token
998
+ # move labels to correct device to enable PP
999
+ labels = labels.to(mlm_logits.device)
1000
+ masked_lm_loss = loss_fct(mlm_logits.view(-1, self.config.vocab_size), labels.view(-1))
1001
+
1002
+ if not return_dict:
1003
+ output = (mlm_logits,) + outputs[2:]
1004
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1005
+
1006
+ return MaskedLMOutput(
1007
+ loss=masked_lm_loss,
1008
+ logits=mlm_logits,
1009
+ hidden_states=outputs.hidden_states,
1010
+ attentions=outputs.attentions,
1011
+ )
1012
+
1013
+
1014
+ class ViltPredictionHeadTransform(nn.Module):
1015
+ def __init__(self, config):
1016
+ super().__init__()
1017
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
1018
+ if isinstance(config.hidden_act, str):
1019
+ self.transform_act_fn = ACT2FN[config.hidden_act]
1020
+ else:
1021
+ self.transform_act_fn = config.hidden_act
1022
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
1023
+
1024
+ def forward(self, hidden_states):
1025
+ hidden_states = self.dense(hidden_states)
1026
+ hidden_states = self.transform_act_fn(hidden_states)
1027
+ hidden_states = self.LayerNorm(hidden_states)
1028
+ return hidden_states
1029
+
1030
+
1031
+ class ViltMLMHead(nn.Module):
1032
+ def __init__(self, config, weight=None):
1033
+ super().__init__()
1034
+ self.config = config
1035
+ self.transform = ViltPredictionHeadTransform(config)
1036
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1037
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
1038
+ if weight is not None:
1039
+ self.decoder.weight = weight
1040
+
1041
+ # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
1042
+ self.decoder.bias = self.bias
1043
+
1044
+ def forward(self, x):
1045
+ x = self.transform(x)
1046
+ x = self.decoder(x)
1047
+ return x
1048
+
1049
+
1050
+ @add_start_docstrings(
1051
+ """
1052
+ Vilt Model transformer with a classifier head on top (a linear layer on top of the final hidden state of the [CLS]
1053
+ token) for visual question answering, e.g. for VQAv2.
1054
+ """,
1055
+ VILT_START_DOCSTRING,
1056
+ )
1057
+ class ViltForQuestionAnswering(ViltPreTrainedModel):
1058
+ def __init__(self, config):
1059
+ super().__init__(config)
1060
+
1061
+ self.num_labels = config.num_labels
1062
+ self.vilt = ViltModel(config)
1063
+
1064
+ # Classifier head
1065
+ self.classifier = nn.Sequential(
1066
+ nn.Linear(config.hidden_size, config.hidden_size * 2),
1067
+ nn.LayerNorm(config.hidden_size * 2),
1068
+ nn.GELU(),
1069
+ nn.Linear(config.hidden_size * 2, config.num_labels),
1070
+ )
1071
+
1072
+ # Initialize weights and apply final processing
1073
+ self.post_init()
1074
+
1075
+ @add_start_docstrings_to_model_forward(VILT_INPUTS_DOCSTRING)
1076
+ @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
1077
+ def forward(
1078
+ self,
1079
+ input_ids: Optional[torch.LongTensor] = None,
1080
+ attention_mask: Optional[torch.FloatTensor] = None,
1081
+ token_type_ids: Optional[torch.LongTensor] = None,
1082
+ pixel_values: Optional[torch.FloatTensor] = None,
1083
+ pixel_mask: Optional[torch.LongTensor] = None,
1084
+ head_mask: Optional[torch.FloatTensor] = None,
1085
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1086
+ image_embeds: Optional[torch.FloatTensor] = None,
1087
+ labels: Optional[torch.LongTensor] = None,
1088
+ output_attentions: Optional[bool] = None,
1089
+ output_hidden_states: Optional[bool] = None,
1090
+ return_dict: Optional[bool] = None,
1091
+ ) -> Union[SequenceClassifierOutput, Tuple[torch.FloatTensor]]:
1092
+ r"""
1093
+ labels (`torch.FloatTensor` of shape `(batch_size, num_labels)`, *optional*):
1094
+ Labels for computing the visual question answering loss. This tensor must be either a one-hot encoding of
1095
+ all answers that are applicable for a given example in the batch, or a soft encoding indicating which
1096
+ answers are applicable, where 1.0 is the highest score.
1097
+
1098
+ Returns:
1099
+
1100
+ Examples:
1101
+
1102
+ ```python
1103
+ >>> from transformers import ViltProcessor, ViltForQuestionAnswering
1104
+ >>> import requests
1105
+ >>> from PIL import Image
1106
+
1107
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1108
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1109
+ >>> text = "How many cats are there?"
1110
+
1111
+ >>> processor = ViltProcessor.from_pretrained("dandelin/vilt-b32-finetuned-vqa")
1112
+ >>> model = ViltForQuestionAnswering.from_pretrained("dandelin/vilt-b32-finetuned-vqa")
1113
+
1114
+ >>> # prepare inputs
1115
+ >>> encoding = processor(image, text, return_tensors="pt")
1116
+
1117
+ >>> # forward pass
1118
+ >>> outputs = model(**encoding)
1119
+ >>> logits = outputs.logits
1120
+ >>> idx = logits.argmax(-1).item()
1121
+ >>> print("Predicted answer:", model.config.id2label[idx])
1122
+ Predicted answer: 2
1123
+ ```"""
1124
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1125
+
1126
+ outputs = self.vilt(
1127
+ input_ids,
1128
+ attention_mask=attention_mask,
1129
+ token_type_ids=token_type_ids,
1130
+ pixel_values=pixel_values,
1131
+ pixel_mask=pixel_mask,
1132
+ head_mask=head_mask,
1133
+ inputs_embeds=inputs_embeds,
1134
+ image_embeds=image_embeds,
1135
+ output_attentions=output_attentions,
1136
+ output_hidden_states=output_hidden_states,
1137
+ return_dict=return_dict,
1138
+ )
1139
+
1140
+ pooler_output = outputs.pooler_output if return_dict else outputs[1]
1141
+
1142
+ logits = self.classifier(pooler_output)
1143
+
1144
+ loss = None
1145
+ if labels is not None:
1146
+ # move labels to correct device to enable PP
1147
+ labels = labels.to(logits.device)
1148
+ loss = nn.functional.binary_cross_entropy_with_logits(logits, labels) * labels.shape[1]
1149
+ # see https://github.com/jnhwkim/ban-vqa/blob/master/train.py#L19
1150
+
1151
+ if not return_dict:
1152
+ output = (logits,) + outputs[2:]
1153
+ return ((loss,) + output) if loss is not None else output
1154
+
1155
+ return SequenceClassifierOutput(
1156
+ loss=loss,
1157
+ logits=logits,
1158
+ hidden_states=outputs.hidden_states,
1159
+ attentions=outputs.attentions,
1160
+ )
1161
+
1162
+
1163
+ @add_start_docstrings(
1164
+ """
1165
+ Vilt Model transformer with a classifier head on top (a linear layer on top of the final hidden state of the [CLS]
1166
+ token) for image-to-text or text-to-image retrieval, e.g. MSCOCO and F30K.
1167
+ """,
1168
+ VILT_START_DOCSTRING,
1169
+ )
1170
+ class ViltForImageAndTextRetrieval(ViltPreTrainedModel):
1171
+ def __init__(self, config):
1172
+ super().__init__(config)
1173
+
1174
+ self.vilt = ViltModel(config)
1175
+
1176
+ # Classifier head
1177
+ self.rank_output = nn.Linear(config.hidden_size, 1)
1178
+
1179
+ # Initialize weights and apply final processing
1180
+ self.post_init()
1181
+
1182
+ @add_start_docstrings_to_model_forward(VILT_INPUTS_DOCSTRING)
1183
+ @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
1184
+ def forward(
1185
+ self,
1186
+ input_ids: Optional[torch.LongTensor] = None,
1187
+ attention_mask: Optional[torch.FloatTensor] = None,
1188
+ token_type_ids: Optional[torch.LongTensor] = None,
1189
+ pixel_values: Optional[torch.FloatTensor] = None,
1190
+ pixel_mask: Optional[torch.LongTensor] = None,
1191
+ head_mask: Optional[torch.FloatTensor] = None,
1192
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1193
+ image_embeds: Optional[torch.FloatTensor] = None,
1194
+ labels: Optional[torch.LongTensor] = None,
1195
+ output_attentions: Optional[bool] = None,
1196
+ output_hidden_states: Optional[bool] = None,
1197
+ return_dict: Optional[bool] = None,
1198
+ ) -> Union[SequenceClassifierOutput, Tuple[torch.FloatTensor]]:
1199
+ r"""
1200
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1201
+ Labels are currently not supported.
1202
+
1203
+ Returns:
1204
+
1205
+ Examples:
1206
+
1207
+ ```python
1208
+ >>> from transformers import ViltProcessor, ViltForImageAndTextRetrieval
1209
+ >>> import requests
1210
+ >>> from PIL import Image
1211
+
1212
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1213
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1214
+ >>> texts = ["An image of two cats chilling on a couch", "A football player scoring a goal"]
1215
+
1216
+ >>> processor = ViltProcessor.from_pretrained("dandelin/vilt-b32-finetuned-coco")
1217
+ >>> model = ViltForImageAndTextRetrieval.from_pretrained("dandelin/vilt-b32-finetuned-coco")
1218
+
1219
+ >>> # forward pass
1220
+ >>> scores = dict()
1221
+ >>> for text in texts:
1222
+ ... # prepare inputs
1223
+ ... encoding = processor(image, text, return_tensors="pt")
1224
+ ... outputs = model(**encoding)
1225
+ ... scores[text] = outputs.logits[0, :].item()
1226
+ ```"""
1227
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1228
+
1229
+ outputs = self.vilt(
1230
+ input_ids,
1231
+ attention_mask=attention_mask,
1232
+ token_type_ids=token_type_ids,
1233
+ pixel_values=pixel_values,
1234
+ pixel_mask=pixel_mask,
1235
+ head_mask=head_mask,
1236
+ inputs_embeds=inputs_embeds,
1237
+ image_embeds=image_embeds,
1238
+ output_attentions=output_attentions,
1239
+ output_hidden_states=output_hidden_states,
1240
+ return_dict=return_dict,
1241
+ )
1242
+
1243
+ pooler_output = outputs.pooler_output if return_dict else outputs[1]
1244
+
1245
+ logits = self.rank_output(pooler_output)
1246
+
1247
+ loss = None
1248
+ if labels is not None:
1249
+ # move labels to correct device to enable PP
1250
+ labels = labels.to(logits.device)
1251
+ raise NotImplementedError("Training is not yet supported.")
1252
+
1253
+ if not return_dict:
1254
+ output = (logits,) + outputs[2:]
1255
+ return ((loss,) + output) if loss is not None else output
1256
+
1257
+ return SequenceClassifierOutput(
1258
+ loss=loss,
1259
+ logits=logits,
1260
+ hidden_states=outputs.hidden_states,
1261
+ attentions=outputs.attentions,
1262
+ )
1263
+
1264
+
1265
+ @add_start_docstrings(
1266
+ """
1267
+ Vilt Model transformer with a classifier head on top for natural language visual reasoning, e.g. NLVR2.
1268
+ """,
1269
+ VILT_IMAGES_AND_TEXT_CLASSIFICATION_INPUTS_DOCSTRING,
1270
+ )
1271
+ class ViltForImagesAndTextClassification(ViltPreTrainedModel):
1272
+ def __init__(self, config):
1273
+ super().__init__(config)
1274
+
1275
+ self.num_labels = config.num_labels
1276
+ self.vilt = ViltModel(config)
1277
+
1278
+ # Classifier head
1279
+ num_images = config.num_images
1280
+ self.classifier = nn.Sequential(
1281
+ nn.Linear(config.hidden_size * num_images, config.hidden_size * num_images),
1282
+ nn.LayerNorm(config.hidden_size * num_images),
1283
+ nn.GELU(),
1284
+ nn.Linear(config.hidden_size * num_images, config.num_labels),
1285
+ )
1286
+
1287
+ # Initialize weights and apply final processing
1288
+ self.post_init()
1289
+
1290
+ @add_start_docstrings_to_model_forward(VILT_INPUTS_DOCSTRING)
1291
+ @replace_return_docstrings(output_type=ViltForImagesAndTextClassificationOutput, config_class=_CONFIG_FOR_DOC)
1292
+ def forward(
1293
+ self,
1294
+ input_ids: Optional[torch.LongTensor] = None,
1295
+ attention_mask: Optional[torch.FloatTensor] = None,
1296
+ token_type_ids: Optional[torch.LongTensor] = None,
1297
+ pixel_values: Optional[torch.FloatTensor] = None,
1298
+ pixel_mask: Optional[torch.LongTensor] = None,
1299
+ head_mask: Optional[torch.FloatTensor] = None,
1300
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1301
+ image_embeds: Optional[torch.FloatTensor] = None,
1302
+ labels: Optional[torch.LongTensor] = None,
1303
+ output_attentions: Optional[bool] = None,
1304
+ output_hidden_states: Optional[bool] = None,
1305
+ return_dict: Optional[bool] = None,
1306
+ ) -> Union[ViltForImagesAndTextClassificationOutput, Tuple[torch.FloatTensor]]:
1307
+ r"""
1308
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1309
+ Binary classification labels.
1310
+
1311
+ Returns:
1312
+
1313
+ Examples:
1314
+
1315
+ ```python
1316
+ >>> from transformers import ViltProcessor, ViltForImagesAndTextClassification
1317
+ >>> import requests
1318
+ >>> from PIL import Image
1319
+
1320
+ >>> image1 = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg", stream=True).raw)
1321
+ >>> image2 = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_1.jpg", stream=True).raw)
1322
+ >>> text = "The left image contains twice the number of dogs as the right image."
1323
+
1324
+ >>> processor = ViltProcessor.from_pretrained("dandelin/vilt-b32-finetuned-nlvr2")
1325
+ >>> model = ViltForImagesAndTextClassification.from_pretrained("dandelin/vilt-b32-finetuned-nlvr2")
1326
+
1327
+ >>> # prepare inputs
1328
+ >>> encoding = processor([image1, image2], text, return_tensors="pt")
1329
+
1330
+ >>> # forward pass
1331
+ >>> outputs = model(input_ids=encoding.input_ids, pixel_values=encoding.pixel_values.unsqueeze(0))
1332
+ >>> logits = outputs.logits
1333
+ >>> idx = logits.argmax(-1).item()
1334
+ >>> print("Predicted answer:", model.config.id2label[idx])
1335
+ Predicted answer: True
1336
+ ```"""
1337
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1338
+ output_hidden_states = (
1339
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1340
+ )
1341
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1342
+
1343
+ if pixel_values is not None and pixel_values.ndim == 4:
1344
+ # add dummy num_images dimension
1345
+ pixel_values = pixel_values.unsqueeze(1)
1346
+
1347
+ if image_embeds is not None and image_embeds.ndim == 3:
1348
+ # add dummy num_images dimension
1349
+ image_embeds = image_embeds.unsqueeze(1)
1350
+
1351
+ num_images = pixel_values.shape[1] if pixel_values is not None else None
1352
+ if num_images is None:
1353
+ num_images = image_embeds.shape[1] if image_embeds is not None else None
1354
+ if num_images != self.config.num_images:
1355
+ raise ValueError(
1356
+ "Make sure to match the number of images in the model with the number of images in the input."
1357
+ )
1358
+ pooler_outputs = []
1359
+ hidden_states = [] if output_hidden_states else None
1360
+ attentions = [] if output_attentions else None
1361
+ for i in range(num_images):
1362
+ # forward every image through the model
1363
+ outputs = self.vilt(
1364
+ input_ids,
1365
+ attention_mask=attention_mask,
1366
+ token_type_ids=token_type_ids,
1367
+ pixel_values=pixel_values[:, i, :, :, :] if pixel_values is not None else None,
1368
+ pixel_mask=pixel_mask[:, i, :, :] if pixel_mask is not None else None,
1369
+ head_mask=head_mask,
1370
+ inputs_embeds=inputs_embeds,
1371
+ image_embeds=image_embeds[:, i, :, :] if image_embeds is not None else None,
1372
+ image_token_type_idx=i + 1,
1373
+ output_attentions=output_attentions,
1374
+ output_hidden_states=output_hidden_states,
1375
+ return_dict=return_dict,
1376
+ )
1377
+ pooler_output = outputs.pooler_output if return_dict else outputs[1]
1378
+ pooler_outputs.append(pooler_output)
1379
+ if output_hidden_states:
1380
+ hidden_states.append(outputs.hidden_states)
1381
+ if output_attentions:
1382
+ attentions.append(outputs.attentions)
1383
+
1384
+ pooled_output = torch.cat(pooler_outputs, dim=-1)
1385
+ logits = self.classifier(pooled_output)
1386
+
1387
+ loss = None
1388
+ if labels is not None:
1389
+ loss_fct = CrossEntropyLoss()
1390
+ # move labels to correct device to enable PP
1391
+ labels = labels.to(logits.device)
1392
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1393
+
1394
+ if not return_dict:
1395
+ output = (logits, hidden_states, attentions)
1396
+ return ((loss,) + output) if loss is not None else output
1397
+
1398
+ return ViltForImagesAndTextClassificationOutput(
1399
+ loss=loss,
1400
+ logits=logits,
1401
+ hidden_states=hidden_states,
1402
+ attentions=attentions,
1403
+ )
1404
+
1405
+
1406
+ @add_start_docstrings(
1407
+ """
1408
+ ViLT Model with a token classification head on top (a linear layer on top of the final hidden-states of the text
1409
+ tokens) e.g. for Named-Entity-Recognition (NER) tasks.
1410
+ """,
1411
+ VILT_START_DOCSTRING,
1412
+ )
1413
+ class ViltForTokenClassification(ViltPreTrainedModel):
1414
+ def __init__(self, config):
1415
+ super().__init__(config)
1416
+
1417
+ self.num_labels = config.num_labels
1418
+ self.vilt = ViltModel(config, add_pooling_layer=False)
1419
+
1420
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1421
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1422
+
1423
+ # Initialize weights and apply final processing
1424
+ self.post_init()
1425
+
1426
+ @add_start_docstrings_to_model_forward(VILT_INPUTS_DOCSTRING)
1427
+ @replace_return_docstrings(output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC)
1428
+ def forward(
1429
+ self,
1430
+ input_ids: Optional[torch.LongTensor] = None,
1431
+ attention_mask: Optional[torch.FloatTensor] = None,
1432
+ token_type_ids: Optional[torch.LongTensor] = None,
1433
+ pixel_values: Optional[torch.FloatTensor] = None,
1434
+ pixel_mask: Optional[torch.LongTensor] = None,
1435
+ head_mask: Optional[torch.FloatTensor] = None,
1436
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1437
+ image_embeds: Optional[torch.FloatTensor] = None,
1438
+ labels: Optional[torch.LongTensor] = None,
1439
+ output_attentions: Optional[bool] = None,
1440
+ output_hidden_states: Optional[bool] = None,
1441
+ return_dict: Optional[bool] = None,
1442
+ ) -> Union[TokenClassifierOutput, Tuple[torch.FloatTensor]]:
1443
+ r"""
1444
+ labels (`torch.LongTensor` of shape `(batch_size, text_sequence_length)`, *optional*):
1445
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1446
+
1447
+ Returns:
1448
+ """
1449
+
1450
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1451
+
1452
+ outputs = self.vilt(
1453
+ input_ids,
1454
+ attention_mask=attention_mask,
1455
+ token_type_ids=token_type_ids,
1456
+ pixel_values=pixel_values,
1457
+ pixel_mask=pixel_mask,
1458
+ head_mask=head_mask,
1459
+ inputs_embeds=inputs_embeds,
1460
+ image_embeds=image_embeds,
1461
+ output_attentions=output_attentions,
1462
+ output_hidden_states=output_hidden_states,
1463
+ return_dict=return_dict,
1464
+ )
1465
+
1466
+ sequence_output = outputs[0]
1467
+
1468
+ text_input_size = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
1469
+
1470
+ sequence_output = self.dropout(sequence_output)
1471
+ logits = self.classifier(sequence_output[:, :text_input_size])
1472
+
1473
+ loss = None
1474
+ if labels is not None:
1475
+ loss_fct = CrossEntropyLoss()
1476
+ # move labels to correct device to enable PP
1477
+ labels = labels.to(logits.device)
1478
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1479
+
1480
+ if not return_dict:
1481
+ output = (logits,) + outputs[2:]
1482
+ return ((loss,) + output) if loss is not None else output
1483
+
1484
+ return TokenClassifierOutput(
1485
+ loss=loss,
1486
+ logits=logits,
1487
+ hidden_states=outputs.hidden_states,
1488
+ attentions=outputs.attentions,
1489
+ )
evalkit_cambrian/lib/python3.10/site-packages/transformers/models/vilt/processing_vilt.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Processor class for ViLT.
17
+ """
18
+
19
+ import warnings
20
+ from typing import List, Optional, Union
21
+
22
+ from ...processing_utils import ProcessorMixin
23
+ from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
24
+ from ...utils import TensorType
25
+
26
+
27
+ class ViltProcessor(ProcessorMixin):
28
+ r"""
29
+ Constructs a ViLT processor which wraps a BERT tokenizer and ViLT image processor into a single processor.
30
+
31
+ [`ViltProcessor`] offers all the functionalities of [`ViltImageProcessor`] and [`BertTokenizerFast`]. See the
32
+ docstring of [`~ViltProcessor.__call__`] and [`~ViltProcessor.decode`] for more information.
33
+
34
+ Args:
35
+ image_processor (`ViltImageProcessor`, *optional*):
36
+ An instance of [`ViltImageProcessor`]. The image processor is a required input.
37
+ tokenizer (`BertTokenizerFast`, *optional*):
38
+ An instance of ['BertTokenizerFast`]. The tokenizer is a required input.
39
+ """
40
+
41
+ attributes = ["image_processor", "tokenizer"]
42
+ image_processor_class = "ViltImageProcessor"
43
+ tokenizer_class = ("BertTokenizer", "BertTokenizerFast")
44
+
45
+ def __init__(self, image_processor=None, tokenizer=None, **kwargs):
46
+ feature_extractor = None
47
+ if "feature_extractor" in kwargs:
48
+ warnings.warn(
49
+ "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
50
+ " instead.",
51
+ FutureWarning,
52
+ )
53
+ feature_extractor = kwargs.pop("feature_extractor")
54
+
55
+ image_processor = image_processor if image_processor is not None else feature_extractor
56
+ if image_processor is None:
57
+ raise ValueError("You need to specify an `image_processor`.")
58
+ if tokenizer is None:
59
+ raise ValueError("You need to specify a `tokenizer`.")
60
+
61
+ super().__init__(image_processor, tokenizer)
62
+ self.current_processor = self.image_processor
63
+
64
+ def __call__(
65
+ self,
66
+ images,
67
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
68
+ add_special_tokens: bool = True,
69
+ padding: Union[bool, str, PaddingStrategy] = False,
70
+ truncation: Union[bool, str, TruncationStrategy] = None,
71
+ max_length: Optional[int] = None,
72
+ stride: int = 0,
73
+ pad_to_multiple_of: Optional[int] = None,
74
+ return_token_type_ids: Optional[bool] = None,
75
+ return_attention_mask: Optional[bool] = None,
76
+ return_overflowing_tokens: bool = False,
77
+ return_special_tokens_mask: bool = False,
78
+ return_offsets_mapping: bool = False,
79
+ return_length: bool = False,
80
+ verbose: bool = True,
81
+ return_tensors: Optional[Union[str, TensorType]] = None,
82
+ **kwargs,
83
+ ) -> BatchEncoding:
84
+ """
85
+ This method uses [`ViltImageProcessor.__call__`] method to prepare image(s) for the model, and
86
+ [`BertTokenizerFast.__call__`] to prepare text for the model.
87
+
88
+ Please refer to the docstring of the above two methods for more information.
89
+ """
90
+ encoding = self.tokenizer(
91
+ text=text,
92
+ add_special_tokens=add_special_tokens,
93
+ padding=padding,
94
+ truncation=truncation,
95
+ max_length=max_length,
96
+ stride=stride,
97
+ pad_to_multiple_of=pad_to_multiple_of,
98
+ return_token_type_ids=return_token_type_ids,
99
+ return_attention_mask=return_attention_mask,
100
+ return_overflowing_tokens=return_overflowing_tokens,
101
+ return_special_tokens_mask=return_special_tokens_mask,
102
+ return_offsets_mapping=return_offsets_mapping,
103
+ return_length=return_length,
104
+ verbose=verbose,
105
+ return_tensors=return_tensors,
106
+ **kwargs,
107
+ )
108
+ # add pixel_values + pixel_mask
109
+ encoding_image_processor = self.image_processor(images, return_tensors=return_tensors)
110
+ encoding.update(encoding_image_processor)
111
+
112
+ return encoding
113
+
114
+ def batch_decode(self, *args, **kwargs):
115
+ """
116
+ This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
117
+ refer to the docstring of this method for more information.
118
+ """
119
+ return self.tokenizer.batch_decode(*args, **kwargs)
120
+
121
+ def decode(self, *args, **kwargs):
122
+ """
123
+ This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
124
+ the docstring of this method for more information.
125
+ """
126
+ return self.tokenizer.decode(*args, **kwargs)
127
+
128
+ @property
129
+ def model_input_names(self):
130
+ tokenizer_input_names = self.tokenizer.model_input_names
131
+ image_processor_input_names = self.image_processor.model_input_names
132
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
133
+
134
+ @property
135
+ def feature_extractor_class(self):
136
+ warnings.warn(
137
+ "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",
138
+ FutureWarning,
139
+ )
140
+ return self.image_processor_class
141
+
142
+ @property
143
+ def feature_extractor(self):
144
+ warnings.warn(
145
+ "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.",
146
+ FutureWarning,
147
+ )
148
+ return self.image_processor
evalkit_tf446/lib/python3.10/site-packages/nvidia/cufft/lib/libcufft.so.10 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca5c6f81d584906b4d32b984ad8704dd65bf75bdab4334ed22ce7eef7501a95a
3
+ size 279161544
evalkit_tf449/lib/python3.10/site-packages/nvidia/cublas/lib/libcublasLt.so.12 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:44a813aa2da08830f9083f81d0eb73f1ae4052a4d9b0b0de480a8f6cd9eb3078
3
+ size 441938896
infer_4_47_1/lib/python3.10/site-packages/gradio/_frontend_code/chatbot/dist/Index.svelte ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <script context="module">export { default as BaseChatBot } from "./shared/ChatBot.svelte";
2
+ </script>
3
+
4
+ <script>import ChatBot from "./shared/ChatBot.svelte";
5
+ import { Block, BlockLabel } from "@gradio/atoms";
6
+ import { Chat } from "@gradio/icons";
7
+ import { StatusTracker } from "@gradio/statustracker";
8
+ import { normalise_tuples, normalise_messages } from "./shared/utils";
9
+ export let elem_id = "";
10
+ export let elem_classes = [];
11
+ export let visible = true;
12
+ export let value = [];
13
+ export let scale = null;
14
+ export let min_width = void 0;
15
+ export let label;
16
+ export let show_label = true;
17
+ export let root;
18
+ export let _selectable = false;
19
+ export let likeable = false;
20
+ export let feedback_options = ["Like", "Dislike"];
21
+ export let feedback_value = null;
22
+ export let show_share_button = false;
23
+ export let rtl = false;
24
+ export let show_copy_button = true;
25
+ export let show_copy_all_button = false;
26
+ export let sanitize_html = true;
27
+ export let layout = "bubble";
28
+ export let type = "tuples";
29
+ export let render_markdown = true;
30
+ export let line_breaks = true;
31
+ export let autoscroll = true;
32
+ export let _retryable = false;
33
+ export let _undoable = false;
34
+ export let group_consecutive_messages = true;
35
+ export let latex_delimiters;
36
+ export let gradio;
37
+ let _value = [];
38
+ $:
39
+ _value = type === "tuples" ? normalise_tuples(value, root) : normalise_messages(value, root);
40
+ export let avatar_images = [null, null];
41
+ export let like_user_message = false;
42
+ export let loading_status = void 0;
43
+ export let height;
44
+ export let resizeable;
45
+ export let min_height;
46
+ export let max_height;
47
+ export let editable = null;
48
+ export let placeholder = null;
49
+ export let examples = null;
50
+ export let theme_mode;
51
+ export let allow_file_downloads = true;
52
+ </script>
53
+
54
+ <Block
55
+ {elem_id}
56
+ {elem_classes}
57
+ {visible}
58
+ padding={false}
59
+ {scale}
60
+ {min_width}
61
+ {height}
62
+ {resizeable}
63
+ {min_height}
64
+ {max_height}
65
+ allow_overflow={true}
66
+ flex={true}
67
+ overflow_behavior="auto"
68
+ >
69
+ {#if loading_status}
70
+ <StatusTracker
71
+ autoscroll={gradio.autoscroll}
72
+ i18n={gradio.i18n}
73
+ {...loading_status}
74
+ show_progress={loading_status.show_progress === "hidden"
75
+ ? "hidden"
76
+ : "minimal"}
77
+ on:clear_status={() => gradio.dispatch("clear_status", loading_status)}
78
+ />
79
+ {/if}
80
+ <div class="wrapper">
81
+ {#if show_label}
82
+ <BlockLabel
83
+ {show_label}
84
+ Icon={Chat}
85
+ float={true}
86
+ label={label || "Chatbot"}
87
+ />
88
+ {/if}
89
+ <ChatBot
90
+ i18n={gradio.i18n}
91
+ selectable={_selectable}
92
+ {likeable}
93
+ {feedback_options}
94
+ {feedback_value}
95
+ {show_share_button}
96
+ {show_copy_all_button}
97
+ value={_value}
98
+ {latex_delimiters}
99
+ display_consecutive_in_same_bubble={group_consecutive_messages}
100
+ {render_markdown}
101
+ {theme_mode}
102
+ {editable}
103
+ pending_message={loading_status?.status === "pending"}
104
+ generating={loading_status?.status === "generating"}
105
+ {rtl}
106
+ {show_copy_button}
107
+ {like_user_message}
108
+ on:change={() => gradio.dispatch("change", value)}
109
+ on:select={(e) => gradio.dispatch("select", e.detail)}
110
+ on:like={(e) => gradio.dispatch("like", e.detail)}
111
+ on:share={(e) => gradio.dispatch("share", e.detail)}
112
+ on:error={(e) => gradio.dispatch("error", e.detail)}
113
+ on:example_select={(e) => gradio.dispatch("example_select", e.detail)}
114
+ on:option_select={(e) => gradio.dispatch("option_select", e.detail)}
115
+ on:retry={(e) => gradio.dispatch("retry", e.detail)}
116
+ on:undo={(e) => gradio.dispatch("undo", e.detail)}
117
+ on:clear={() => {
118
+ value = [];
119
+ gradio.dispatch("clear");
120
+ }}
121
+ on:copy={(e) => gradio.dispatch("copy", e.detail)}
122
+ on:edit={(e) => {
123
+ if (value === null || value.length === 0) return;
124
+ if (type === "messages") {
125
+ //@ts-ignore
126
+ value[e.detail.index].content = e.detail.value;
127
+ } else {
128
+ //@ts-ignore
129
+ value[e.detail.index[0]][e.detail.index[1]] = e.detail.value;
130
+ }
131
+ value = value;
132
+ gradio.dispatch("edit", e.detail);
133
+ }}
134
+ {avatar_images}
135
+ {sanitize_html}
136
+ {line_breaks}
137
+ {autoscroll}
138
+ {layout}
139
+ {placeholder}
140
+ {examples}
141
+ {_retryable}
142
+ {_undoable}
143
+ upload={(...args) => gradio.client.upload(...args)}
144
+ _fetch={(...args) => gradio.client.fetch(...args)}
145
+ load_component={gradio.load_component}
146
+ msg_format={type}
147
+ root={gradio.root}
148
+ {allow_file_downloads}
149
+ />
150
+ </div>
151
+ </Block>
152
+
153
+ <style>
154
+ .wrapper {
155
+ display: flex;
156
+ position: relative;
157
+ flex-direction: column;
158
+ align-items: start;
159
+ width: 100%;
160
+ height: 100%;
161
+ flex-grow: 1;
162
+ }
163
+
164
+ :global(.progress-text) {
165
+ right: auto;
166
+ }
167
+ </style>
infer_4_47_1/lib/python3.10/site-packages/gradio/_frontend_code/chatbot/dist/shared/CopyAll.svelte.d.ts ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { SvelteComponent } from "svelte";
2
+ import type { NormalisedMessage } from "../types";
3
+ declare const __propDef: {
4
+ props: {
5
+ value: NormalisedMessage[] | null;
6
+ };
7
+ events: {
8
+ [evt: string]: CustomEvent<any>;
9
+ };
10
+ slots: {};
11
+ };
12
+ export type CopyAllProps = typeof __propDef.props;
13
+ export type CopyAllEvents = typeof __propDef.events;
14
+ export type CopyAllSlots = typeof __propDef.slots;
15
+ export default class CopyAll extends SvelteComponent<CopyAllProps, CopyAllEvents, CopyAllSlots> {
16
+ }
17
+ export {};
infer_4_47_1/lib/python3.10/site-packages/gradio/_frontend_code/chatbot/dist/shared/FlagActive.svelte.d.ts ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /** @typedef {typeof __propDef.props} FlagActiveProps */
2
+ /** @typedef {typeof __propDef.events} FlagActiveEvents */
3
+ /** @typedef {typeof __propDef.slots} FlagActiveSlots */
4
+ export default class FlagActive extends SvelteComponent<{
5
+ [x: string]: never;
6
+ }, {
7
+ [evt: string]: CustomEvent<any>;
8
+ }, {}> {
9
+ }
10
+ export type FlagActiveProps = typeof __propDef.props;
11
+ export type FlagActiveEvents = typeof __propDef.events;
12
+ export type FlagActiveSlots = typeof __propDef.slots;
13
+ import { SvelteComponent } from "svelte";
14
+ declare const __propDef: {
15
+ props: {
16
+ [x: string]: never;
17
+ };
18
+ events: {
19
+ [evt: string]: CustomEvent<any>;
20
+ };
21
+ slots: {};
22
+ };
23
+ export {};
infer_4_47_1/lib/python3.10/site-packages/gradio/_frontend_code/chatbot/dist/shared/MessageBox.svelte.d.ts ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { SvelteComponent } from "svelte";
2
+ declare const __propDef: {
3
+ props: {
4
+ expanded?: boolean | undefined;
5
+ title: string;
6
+ rtl?: boolean | undefined;
7
+ };
8
+ events: {
9
+ [evt: string]: CustomEvent<any>;
10
+ };
11
+ slots: {
12
+ default: {};
13
+ };
14
+ };
15
+ export type MessageBoxProps = typeof __propDef.props;
16
+ export type MessageBoxEvents = typeof __propDef.events;
17
+ export type MessageBoxSlots = typeof __propDef.slots;
18
+ export default class MessageBox extends SvelteComponent<MessageBoxProps, MessageBoxEvents, MessageBoxSlots> {
19
+ }
20
+ export {};
infer_4_47_1/lib/python3.10/site-packages/gradio/_frontend_code/chatbot/dist/shared/Pending.svelte ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <script>import { Image } from "@gradio/image/shared";
2
+ export let layout = "bubble";
3
+ export let avatar_images = [null, null];
4
+ </script>
5
+
6
+ <div class="container">
7
+ {#if avatar_images[1] !== null}
8
+ <div class="avatar-container">
9
+ <Image class="avatar-image" src={avatar_images[1].url} alt="bot avatar" />
10
+ </div>
11
+ {/if}
12
+
13
+ <div
14
+ class="message bot pending {layout}"
15
+ class:with_avatar={avatar_images[1] !== null}
16
+ class:with_opposite_avatar={avatar_images[0] !== null}
17
+ role="status"
18
+ aria-label="Loading response"
19
+ aria-live="polite"
20
+ >
21
+ <div class="message-content">
22
+ <span class="sr-only">Loading content</span>
23
+ <div class="dots">
24
+ <div class="dot" />
25
+ <div class="dot" />
26
+ <div class="dot" />
27
+ </div>
28
+ </div>
29
+ </div>
30
+ </div>
31
+
32
+ <style>
33
+ .container {
34
+ display: flex;
35
+ margin: calc(var(--spacing-xl) * 2);
36
+ }
37
+
38
+ .bubble.pending {
39
+ border-width: 1px;
40
+ border-radius: var(--radius-lg);
41
+ border-bottom-left-radius: 0;
42
+ border-color: var(--border-color-primary);
43
+ background-color: var(--background-fill-secondary);
44
+ box-shadow: var(--shadow-drop);
45
+ align-self: flex-start;
46
+ width: fit-content;
47
+ margin-bottom: var(--spacing-xl);
48
+ }
49
+
50
+ .bubble.with_opposite_avatar {
51
+ margin-right: calc(var(--spacing-xxl) + 35px + var(--spacing-xxl));
52
+ }
53
+
54
+ .panel.pending {
55
+ margin: 0;
56
+ padding: calc(var(--spacing-lg) * 2) calc(var(--spacing-lg) * 2);
57
+ width: 100%;
58
+ border: none;
59
+ background: none;
60
+ box-shadow: none;
61
+ border-radius: 0;
62
+ }
63
+
64
+ .panel.with_avatar {
65
+ padding-left: calc(var(--spacing-xl) * 2) !important;
66
+ padding-right: calc(var(--spacing-xl) * 2) !important;
67
+ }
68
+
69
+ .avatar-container {
70
+ align-self: flex-start;
71
+ position: relative;
72
+ display: flex;
73
+ justify-content: flex-start;
74
+ align-items: flex-start;
75
+ width: 35px;
76
+ height: 35px;
77
+ flex-shrink: 0;
78
+ bottom: 0;
79
+ border-radius: 50%;
80
+ border: 1px solid var(--border-color-primary);
81
+ margin-right: var(--spacing-xxl);
82
+ }
83
+
84
+ .message-content {
85
+ padding: var(--spacing-sm) var(--spacing-xl);
86
+ min-height: var(--size-8);
87
+ display: flex;
88
+ align-items: center;
89
+ }
90
+
91
+ .dots {
92
+ display: flex;
93
+ gap: var(--spacing-xs);
94
+ align-items: center;
95
+ }
96
+
97
+ .dot {
98
+ width: var(--size-1-5);
99
+ height: var(--size-1-5);
100
+ margin-right: var(--spacing-xs);
101
+ border-radius: 50%;
102
+ background-color: var(--body-text-color);
103
+ opacity: 0.5;
104
+ animation: pulse 1.5s infinite;
105
+ }
106
+
107
+ .dot:nth-child(2) {
108
+ animation-delay: 0.2s;
109
+ }
110
+
111
+ .dot:nth-child(3) {
112
+ animation-delay: 0.4s;
113
+ }
114
+
115
+ @keyframes pulse {
116
+ 0%,
117
+ 100% {
118
+ opacity: 0.4;
119
+ transform: scale(1);
120
+ }
121
+ 50% {
122
+ opacity: 1;
123
+ transform: scale(1.1);
124
+ }
125
+ }
126
+ </style>
infer_4_47_1/lib/python3.10/site-packages/gradio/_frontend_code/chatbot/dist/shared/Pending.svelte.d.ts ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { SvelteComponent } from "svelte";
2
+ import type { FileData } from "@gradio/client";
3
+ declare const __propDef: {
4
+ props: {
5
+ layout?: string | undefined;
6
+ avatar_images?: [FileData | null, FileData | null] | undefined;
7
+ };
8
+ events: {
9
+ [evt: string]: CustomEvent<any>;
10
+ };
11
+ slots: {};
12
+ };
13
+ export type PendingProps = typeof __propDef.props;
14
+ export type PendingEvents = typeof __propDef.events;
15
+ export type PendingSlots = typeof __propDef.slots;
16
+ export default class Pending extends SvelteComponent<PendingProps, PendingEvents, PendingSlots> {
17
+ }
18
+ export {};