koichi12 commited on
Commit
2ca8472
·
verified ·
1 Parent(s): 315f3e5

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +6 -0
  2. .venv/lib/python3.11/site-packages/transformers/__pycache__/cache_utils.cpython-311.pyc +3 -0
  3. .venv/lib/python3.11/site-packages/transformers/__pycache__/modeling_outputs.cpython-311.pyc +3 -0
  4. .venv/lib/python3.11/site-packages/transformers/__pycache__/modeling_tf_utils.cpython-311.pyc +3 -0
  5. .venv/lib/python3.11/site-packages/transformers/__pycache__/testing_utils.cpython-311.pyc +3 -0
  6. .venv/lib/python3.11/site-packages/transformers/__pycache__/tokenization_utils_base.cpython-311.pyc +3 -0
  7. .venv/lib/python3.11/site-packages/transformers/__pycache__/training_args.cpython-311.pyc +3 -0
  8. .venv/lib/python3.11/site-packages/transformers/models/__init__.py +304 -0
  9. .venv/lib/python3.11/site-packages/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py +665 -0
  10. .venv/lib/python3.11/site-packages/transformers/models/llava_onevision/__init__.py +30 -0
  11. .venv/lib/python3.11/site-packages/transformers/models/llava_onevision/__pycache__/__init__.cpython-311.pyc +0 -0
  12. .venv/lib/python3.11/site-packages/transformers/models/llava_onevision/__pycache__/configuration_llava_onevision.cpython-311.pyc +0 -0
  13. .venv/lib/python3.11/site-packages/transformers/models/llava_onevision/__pycache__/image_processing_llava_onevision.cpython-311.pyc +0 -0
  14. .venv/lib/python3.11/site-packages/transformers/models/llava_onevision/__pycache__/modeling_llava_onevision.cpython-311.pyc +0 -0
  15. .venv/lib/python3.11/site-packages/transformers/models/llava_onevision/__pycache__/processing_llava_onevision.cpython-311.pyc +0 -0
  16. .venv/lib/python3.11/site-packages/transformers/models/llava_onevision/__pycache__/video_processing_llava_onevision.cpython-311.pyc +0 -0
  17. .venv/lib/python3.11/site-packages/transformers/models/llava_onevision/configuration_llava_onevision.py +190 -0
  18. .venv/lib/python3.11/site-packages/transformers/models/llava_onevision/image_processing_llava_onevision.py +715 -0
  19. .venv/lib/python3.11/site-packages/transformers/models/llava_onevision/modeling_llava_onevision.py +812 -0
  20. .venv/lib/python3.11/site-packages/transformers/models/llava_onevision/processing_llava_onevision.py +319 -0
  21. .venv/lib/python3.11/site-packages/transformers/models/llava_onevision/video_processing_llava_onevision.py +338 -0
  22. .venv/lib/python3.11/site-packages/transformers/models/myt5/__init__.py +26 -0
  23. .venv/lib/python3.11/site-packages/transformers/models/myt5/__pycache__/__init__.cpython-311.pyc +0 -0
  24. .venv/lib/python3.11/site-packages/transformers/models/myt5/__pycache__/tokenization_myt5.cpython-311.pyc +0 -0
  25. .venv/lib/python3.11/site-packages/transformers/models/myt5/tokenization_myt5.py +380 -0
  26. .venv/lib/python3.11/site-packages/transformers/models/rembert/__init__.py +30 -0
  27. .venv/lib/python3.11/site-packages/transformers/models/rembert/__pycache__/__init__.cpython-311.pyc +0 -0
  28. .venv/lib/python3.11/site-packages/transformers/models/rembert/__pycache__/configuration_rembert.cpython-311.pyc +0 -0
  29. .venv/lib/python3.11/site-packages/transformers/models/rembert/__pycache__/modeling_rembert.cpython-311.pyc +0 -0
  30. .venv/lib/python3.11/site-packages/transformers/models/rembert/__pycache__/modeling_tf_rembert.cpython-311.pyc +0 -0
  31. .venv/lib/python3.11/site-packages/transformers/models/rembert/__pycache__/tokenization_rembert.cpython-311.pyc +0 -0
  32. .venv/lib/python3.11/site-packages/transformers/models/rembert/__pycache__/tokenization_rembert_fast.cpython-311.pyc +0 -0
  33. .venv/lib/python3.11/site-packages/transformers/models/rembert/configuration_rembert.py +162 -0
  34. .venv/lib/python3.11/site-packages/transformers/models/rembert/modeling_rembert.py +1517 -0
  35. .venv/lib/python3.11/site-packages/transformers/models/rembert/modeling_tf_rembert.py +1721 -0
  36. .venv/lib/python3.11/site-packages/transformers/models/rembert/tokenization_rembert.py +265 -0
  37. .venv/lib/python3.11/site-packages/transformers/models/rembert/tokenization_rembert_fast.py +232 -0
  38. .venv/lib/python3.11/site-packages/transformers/models/wav2vec2/__init__.py +32 -0
  39. .venv/lib/python3.11/site-packages/transformers/models/wav2vec2/__pycache__/__init__.cpython-311.pyc +0 -0
  40. .venv/lib/python3.11/site-packages/transformers/models/wav2vec2/__pycache__/configuration_wav2vec2.cpython-311.pyc +0 -0
  41. .venv/lib/python3.11/site-packages/transformers/models/wav2vec2/__pycache__/feature_extraction_wav2vec2.cpython-311.pyc +0 -0
  42. .venv/lib/python3.11/site-packages/transformers/models/wav2vec2/__pycache__/modeling_flax_wav2vec2.cpython-311.pyc +0 -0
  43. .venv/lib/python3.11/site-packages/transformers/models/wav2vec2/__pycache__/processing_wav2vec2.cpython-311.pyc +0 -0
  44. .venv/lib/python3.11/site-packages/transformers/models/wav2vec2/__pycache__/tokenization_wav2vec2.cpython-311.pyc +0 -0
  45. .venv/lib/python3.11/site-packages/transformers/models/wav2vec2/configuration_wav2vec2.py +347 -0
  46. .venv/lib/python3.11/site-packages/transformers/models/wav2vec2/feature_extraction_wav2vec2.py +243 -0
  47. .venv/lib/python3.11/site-packages/transformers/models/wav2vec2/modeling_flax_wav2vec2.py +1428 -0
  48. .venv/lib/python3.11/site-packages/transformers/models/wav2vec2/modeling_tf_wav2vec2.py +1858 -0
  49. .venv/lib/python3.11/site-packages/transformers/models/wav2vec2/modeling_wav2vec2.py +0 -0
  50. .venv/lib/python3.11/site-packages/transformers/models/wav2vec2/processing_wav2vec2.py +186 -0
.gitattributes CHANGED
@@ -427,3 +427,9 @@ tuning-competition-baseline/.venv/lib/python3.11/site-packages/nvidia/cudnn/lib/
427
  .venv/lib/python3.11/site-packages/transformers/generation/__pycache__/tf_utils.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
428
  .venv/lib/python3.11/site-packages/transformers/__pycache__/modeling_utils.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
429
  .venv/lib/python3.11/site-packages/transformers/__pycache__/trainer.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
427
  .venv/lib/python3.11/site-packages/transformers/generation/__pycache__/tf_utils.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
428
  .venv/lib/python3.11/site-packages/transformers/__pycache__/modeling_utils.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
429
  .venv/lib/python3.11/site-packages/transformers/__pycache__/trainer.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
430
+ .venv/lib/python3.11/site-packages/transformers/__pycache__/tokenization_utils_base.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
431
+ .venv/lib/python3.11/site-packages/transformers/__pycache__/cache_utils.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
432
+ .venv/lib/python3.11/site-packages/transformers/__pycache__/modeling_outputs.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
433
+ .venv/lib/python3.11/site-packages/transformers/__pycache__/modeling_tf_utils.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
434
+ .venv/lib/python3.11/site-packages/transformers/__pycache__/training_args.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
435
+ .venv/lib/python3.11/site-packages/transformers/__pycache__/testing_utils.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
.venv/lib/python3.11/site-packages/transformers/__pycache__/cache_utils.cpython-311.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac78e0a7f936cac1a2823c1bfc4b77a84e4387d40c6e7aa7159c3ec3c687948c
3
+ size 117848
.venv/lib/python3.11/site-packages/transformers/__pycache__/modeling_outputs.cpython-311.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0523870630a2869713edfeb509a7e8b578c6d12b60f570df1980ff8967248fd2
3
+ size 127236
.venv/lib/python3.11/site-packages/transformers/__pycache__/modeling_tf_utils.cpython-311.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e15e9fd16dfb7cbb991cc343b35714ca64d0a7b2e0f5c89d63a9ff6a90e90725
3
+ size 175992
.venv/lib/python3.11/site-packages/transformers/__pycache__/testing_utils.cpython-311.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b493620b1fc9b0aaf24187c7ad47b18c0ff3b15e71af9535f0d7883ce85b51b4
3
+ size 137012
.venv/lib/python3.11/site-packages/transformers/__pycache__/tokenization_utils_base.cpython-311.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b8963a7b087496eb2d4c608e1dac4b1976dcbf7a6e85994fbc68424429714c0f
3
+ size 206631
.venv/lib/python3.11/site-packages/transformers/__pycache__/training_args.cpython-311.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c36e1189b4c27edfd98a2a4a0c43c82f487c0df42bf3f2e26ea86698c5eabd15
3
+ size 165859
.venv/lib/python3.11/site-packages/transformers/models/__init__.py ADDED
@@ -0,0 +1,304 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from . import (
16
+ albert,
17
+ align,
18
+ altclip,
19
+ aria,
20
+ audio_spectrogram_transformer,
21
+ auto,
22
+ autoformer,
23
+ bamba,
24
+ bark,
25
+ bart,
26
+ barthez,
27
+ bartpho,
28
+ beit,
29
+ bert,
30
+ bert_generation,
31
+ bert_japanese,
32
+ bertweet,
33
+ big_bird,
34
+ bigbird_pegasus,
35
+ biogpt,
36
+ bit,
37
+ blenderbot,
38
+ blenderbot_small,
39
+ blip,
40
+ blip_2,
41
+ bloom,
42
+ bridgetower,
43
+ bros,
44
+ byt5,
45
+ camembert,
46
+ canine,
47
+ chameleon,
48
+ chinese_clip,
49
+ clap,
50
+ clip,
51
+ clipseg,
52
+ clvp,
53
+ code_llama,
54
+ codegen,
55
+ cohere,
56
+ cohere2,
57
+ colpali,
58
+ conditional_detr,
59
+ convbert,
60
+ convnext,
61
+ convnextv2,
62
+ cpm,
63
+ cpmant,
64
+ ctrl,
65
+ cvt,
66
+ dac,
67
+ data2vec,
68
+ dbrx,
69
+ deberta,
70
+ deberta_v2,
71
+ decision_transformer,
72
+ deformable_detr,
73
+ deit,
74
+ deprecated,
75
+ depth_anything,
76
+ detr,
77
+ dialogpt,
78
+ diffllama,
79
+ dinat,
80
+ dinov2,
81
+ dinov2_with_registers,
82
+ distilbert,
83
+ dit,
84
+ donut,
85
+ dpr,
86
+ dpt,
87
+ efficientnet,
88
+ electra,
89
+ emu3,
90
+ encodec,
91
+ encoder_decoder,
92
+ ernie,
93
+ esm,
94
+ falcon,
95
+ falcon_mamba,
96
+ fastspeech2_conformer,
97
+ flaubert,
98
+ flava,
99
+ fnet,
100
+ focalnet,
101
+ fsmt,
102
+ funnel,
103
+ fuyu,
104
+ gemma,
105
+ gemma2,
106
+ git,
107
+ glm,
108
+ glpn,
109
+ gpt2,
110
+ gpt_bigcode,
111
+ gpt_neo,
112
+ gpt_neox,
113
+ gpt_neox_japanese,
114
+ gpt_sw3,
115
+ gptj,
116
+ granite,
117
+ granitemoe,
118
+ grounding_dino,
119
+ groupvit,
120
+ herbert,
121
+ hiera,
122
+ hubert,
123
+ ibert,
124
+ idefics,
125
+ idefics2,
126
+ idefics3,
127
+ ijepa,
128
+ imagegpt,
129
+ informer,
130
+ instructblip,
131
+ instructblipvideo,
132
+ jamba,
133
+ jetmoe,
134
+ kosmos2,
135
+ layoutlm,
136
+ layoutlmv2,
137
+ layoutlmv3,
138
+ layoutxlm,
139
+ led,
140
+ levit,
141
+ lilt,
142
+ llama,
143
+ llava,
144
+ llava_next,
145
+ llava_next_video,
146
+ llava_onevision,
147
+ longformer,
148
+ longt5,
149
+ luke,
150
+ lxmert,
151
+ m2m_100,
152
+ mamba,
153
+ mamba2,
154
+ marian,
155
+ markuplm,
156
+ mask2former,
157
+ maskformer,
158
+ mbart,
159
+ mbart50,
160
+ megatron_bert,
161
+ megatron_gpt2,
162
+ mgp_str,
163
+ mimi,
164
+ mistral,
165
+ mixtral,
166
+ mllama,
167
+ mluke,
168
+ mobilebert,
169
+ mobilenet_v1,
170
+ mobilenet_v2,
171
+ mobilevit,
172
+ mobilevitv2,
173
+ modernbert,
174
+ moonshine,
175
+ moshi,
176
+ mpnet,
177
+ mpt,
178
+ mra,
179
+ mt5,
180
+ musicgen,
181
+ musicgen_melody,
182
+ mvp,
183
+ myt5,
184
+ nemotron,
185
+ nllb,
186
+ nllb_moe,
187
+ nougat,
188
+ nystromformer,
189
+ olmo,
190
+ olmo2,
191
+ olmoe,
192
+ omdet_turbo,
193
+ oneformer,
194
+ openai,
195
+ opt,
196
+ owlv2,
197
+ owlvit,
198
+ paligemma,
199
+ patchtsmixer,
200
+ patchtst,
201
+ pegasus,
202
+ pegasus_x,
203
+ perceiver,
204
+ persimmon,
205
+ phi,
206
+ phi3,
207
+ phimoe,
208
+ phobert,
209
+ pix2struct,
210
+ pixtral,
211
+ plbart,
212
+ poolformer,
213
+ pop2piano,
214
+ prophetnet,
215
+ pvt,
216
+ pvt_v2,
217
+ qwen2,
218
+ qwen2_audio,
219
+ qwen2_moe,
220
+ qwen2_vl,
221
+ rag,
222
+ recurrent_gemma,
223
+ reformer,
224
+ regnet,
225
+ rembert,
226
+ resnet,
227
+ roberta,
228
+ roberta_prelayernorm,
229
+ roc_bert,
230
+ roformer,
231
+ rt_detr,
232
+ rwkv,
233
+ sam,
234
+ seamless_m4t,
235
+ seamless_m4t_v2,
236
+ segformer,
237
+ seggpt,
238
+ sew,
239
+ sew_d,
240
+ siglip,
241
+ speech_encoder_decoder,
242
+ speech_to_text,
243
+ speecht5,
244
+ splinter,
245
+ squeezebert,
246
+ stablelm,
247
+ starcoder2,
248
+ superpoint,
249
+ swiftformer,
250
+ swin,
251
+ swin2sr,
252
+ swinv2,
253
+ switch_transformers,
254
+ t5,
255
+ table_transformer,
256
+ tapas,
257
+ textnet,
258
+ time_series_transformer,
259
+ timesformer,
260
+ timm_backbone,
261
+ timm_wrapper,
262
+ trocr,
263
+ tvp,
264
+ udop,
265
+ umt5,
266
+ unispeech,
267
+ unispeech_sat,
268
+ univnet,
269
+ upernet,
270
+ video_llava,
271
+ videomae,
272
+ vilt,
273
+ vipllava,
274
+ vision_encoder_decoder,
275
+ vision_text_dual_encoder,
276
+ visual_bert,
277
+ vit,
278
+ vit_mae,
279
+ vit_msn,
280
+ vitdet,
281
+ vitmatte,
282
+ vitpose,
283
+ vitpose_backbone,
284
+ vits,
285
+ vivit,
286
+ wav2vec2,
287
+ wav2vec2_bert,
288
+ wav2vec2_conformer,
289
+ wav2vec2_phoneme,
290
+ wav2vec2_with_lm,
291
+ wavlm,
292
+ whisper,
293
+ x_clip,
294
+ xglm,
295
+ xlm,
296
+ xlm_roberta,
297
+ xlm_roberta_xl,
298
+ xlnet,
299
+ xmod,
300
+ yolos,
301
+ yoso,
302
+ zamba,
303
+ zoedepth,
304
+ )
.venv/lib/python3.11/site-packages/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py ADDED
@@ -0,0 +1,665 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Classes to support TF Encoder-Decoder architectures"""
16
+
17
+ from __future__ import annotations
18
+
19
+ import inspect
20
+ import re
21
+ import warnings
22
+ from typing import Optional, Tuple, Union
23
+
24
+ import numpy as np
25
+ import tensorflow as tf
26
+
27
+ from ...configuration_utils import PretrainedConfig
28
+ from ...modeling_tf_outputs import TFBaseModelOutput, TFSeq2SeqLMOutput
29
+ from ...modeling_tf_utils import (
30
+ TFCausalLanguageModelingLoss,
31
+ TFModelInputType,
32
+ TFPreTrainedModel,
33
+ get_initializer,
34
+ keras,
35
+ unpack_inputs,
36
+ )
37
+ from ...tf_utils import shape_list
38
+ from ...utils import (
39
+ ModelOutput,
40
+ add_start_docstrings,
41
+ add_start_docstrings_to_model_forward,
42
+ logging,
43
+ replace_return_docstrings,
44
+ )
45
+ from ..auto.configuration_auto import AutoConfig
46
+ from ..auto.modeling_tf_auto import TFAutoModel, TFAutoModelForCausalLM
47
+ from .configuration_encoder_decoder import EncoderDecoderConfig
48
+
49
+
50
+ logger = logging.get_logger(__name__)
51
+
52
+ _CONFIG_FOR_DOC = "EncoderDecoderConfig"
53
+
54
+ DEPRECATION_WARNING = (
55
+ "Version v4.17.0 introduces a better way to train encoder-decoder models by computing the loss inside the"
56
+ " encoder-decoder framework rather than in the decoder itself. You may observe training discrepancies if"
57
+ " fine-tuning a model trained with versions anterior to 4.17.0. The decoder_input_ids are now created based on the"
58
+ " labels, no need to pass them yourself anymore."
59
+ )
60
+
61
+ ENCODER_DECODER_START_DOCSTRING = r"""
62
+ This class can be used to initialize a sequence-to-sequence model with any pretrained autoencoding model as the
63
+ encoder and any pretrained autoregressive model as the decoder. The encoder is loaded via
64
+ [`~TFAutoModel.from_pretrained`] function and the decoder is loaded via [`~TFAutoModelForCausalLM.from_pretrained`]
65
+ function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream
66
+ generative task, like summarization.
67
+
68
+ The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation
69
+ tasks was shown in [Leveraging Pre-trained Checkpoints for Sequence Generation
70
+ Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi
71
+ Zhou, Wei Li, Peter J. Liu.
72
+
73
+ After such an Encoder Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other models
74
+ (see the examples for more information).
75
+
76
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
77
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
78
+ etc.)
79
+
80
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
81
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
82
+ behavior.
83
+
84
+ Parameters:
85
+ config ([`EncoderDecoderConfig`]): Model configuration class with all the parameters of the model.
86
+ Initializing with a config file does not load the weights associated with the model, only the
87
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
88
+ """
89
+
90
+ ENCODER_DECODER_INPUTS_DOCSTRING = r"""
91
+ Args:
92
+ input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):
93
+ Indices of input sequence tokens in the vocabulary.
94
+
95
+ Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
96
+ [`PreTrainedTokenizer.__call__`] for details.
97
+
98
+ [What are input IDs?](../glossary#input-ids)
99
+ attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
100
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
101
+
102
+ - 1 for tokens that are **not masked**,
103
+ - 0 for tokens that are **masked**.
104
+
105
+ [What are attention masks?](../glossary#attention-mask)
106
+ decoder_input_ids (`np.ndarray` or `tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
107
+ Indices of decoder input sequence tokens in the vocabulary.
108
+
109
+ Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
110
+ [`PreTrainedTokenizer.__call__`] for details.
111
+
112
+ [What are input IDs?](../glossary#input-ids)
113
+
114
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
115
+ `past_key_values`).
116
+
117
+ Provide for sequence to sequence training to the decoder. Indices can be obtained using
118
+ [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for
119
+ details.
120
+ decoder_attention_mask (`np.ndarray` or `tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
121
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
122
+ be used by default.
123
+ encoder_outputs (`tuple(tuple(tf.Tensor)`, *optional*):
124
+ This tuple must consist of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
125
+ `last_hidden_state` (`tf.Tensor` of shape `({0}, hidden_size)`) is a tensor of hidden-states at the output
126
+ of the last layer of the encoder. Used in the cross-attention of the decoder.
127
+ past_key_values (`tuple(tuple(tf.Tensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
128
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
129
+
130
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
131
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
132
+ `decoder_input_ids` of shape `({0})`.
133
+ inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
134
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
135
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
136
+ model's internal embedding lookup matrix.
137
+ decoder_inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
138
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
139
+ representation. This is useful if you want more control over how to convert `decoder_input_ids` indices
140
+ into associated vectors than the model's internal embedding lookup matrix.
141
+ labels (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
142
+ Labels for computing the masked language modeling loss for the decoder. Indices should be in `[-100, 0,
143
+ ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored
144
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
145
+ use_cache (`bool`, *optional*):
146
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
147
+ `past_key_values`).
148
+ output_attentions (`bool`, *optional*):
149
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
150
+ tensors for more detail.
151
+ output_hidden_states (`bool`, *optional*):
152
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
153
+ more detail.
154
+ return_dict (`bool`, *optional*):
155
+ If set to `True`, the model will return a [`~utils.Seq2SeqLMOutput`] instead of a plain tuple.
156
+ training (`bool`, *optional*, defaults to `False`):
157
+ Whether or not to use the model in training mode (some modules like dropout modules have different
158
+ behaviors between training and evaluation).
159
+ kwargs (*optional*): Remaining dictionary of keyword arguments. Keyword arguments come in two flavors:
160
+
161
+ - Without a prefix which will be input as `**encoder_kwargs` for the encoder forward function.
162
+ - With a *decoder_* prefix which will be input as `**decoder_kwargs`` for the decoder forward function.
163
+ """
164
+
165
+
166
+ def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int):
167
+ if pad_token_id is None:
168
+ raise ValueError("Make sure to set the pad_token_id attribute of the model's configuration.")
169
+ pad_token_id = tf.cast(pad_token_id, input_ids.dtype)
170
+
171
+ if decoder_start_token_id is None:
172
+ raise ValueError("Make sure to set the decoder_start_token_id attribute of the model's configuration.")
173
+ decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype)
174
+
175
+ start_tokens = tf.fill((shape_list(input_ids)[0], 1), decoder_start_token_id)
176
+ shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1)
177
+ # replace possible -100 values in labels by `pad_token_id`
178
+ shifted_input_ids = tf.where(
179
+ shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids
180
+ )
181
+
182
+ # "Verify that `labels` has only positive values and -100"
183
+ assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype))
184
+
185
+ # Make sure the assertion op is called by wrapping the result in an identity no-op
186
+ with tf.control_dependencies([assert_gte0]):
187
+ shifted_input_ids = tf.identity(shifted_input_ids)
188
+
189
+ return shifted_input_ids
190
+
191
+
192
+ @add_start_docstrings(ENCODER_DECODER_START_DOCSTRING)
193
+ class TFEncoderDecoderModel(TFPreTrainedModel, TFCausalLanguageModelingLoss):
194
+ r"""
195
+ [`TFEncoderDecoderModel`] is a generic model class that will be instantiated as a transformer architecture with one
196
+ of the base model classes of the library as encoder and another one as decoder when created with the
197
+ [`~TFAutoModel.from_pretrained`] class method for the encoder and [`~TFAutoModelForCausalLM.from_pretrained`] class
198
+ method for the decoder.
199
+ """
200
+
201
+ config_class = EncoderDecoderConfig
202
+ base_model_prefix = "encoder_decoder"
203
+ load_weight_prefix = "tf_encoder_decoder_model"
204
+
205
+ def __init__(
206
+ self,
207
+ config: Optional[PretrainedConfig] = None,
208
+ encoder: Optional[TFPreTrainedModel] = None,
209
+ decoder: Optional[TFPreTrainedModel] = None,
210
+ ):
211
+ if config is None and (encoder is None or decoder is None):
212
+ raise ValueError("Either a configuration or an encoder and a decoder has to be provided.")
213
+ if config is None:
214
+ config = EncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config)
215
+ else:
216
+ if not isinstance(config, self.config_class):
217
+ raise ValueError(f"config: {config} has to be of type {self.config_class}")
218
+
219
+ if config.decoder.cross_attention_hidden_size is not None:
220
+ if config.decoder.cross_attention_hidden_size != config.encoder.hidden_size:
221
+ raise ValueError(
222
+ "If `cross_attention_hidden_size` is specified in the decoder's configuration, it has to be equal"
223
+ f" to the encoder's `hidden_size`. Got {config.decoder.cross_attention_hidden_size} for"
224
+ f" `config.decoder.cross_attention_hidden_size` and {config.encoder.hidden_size} for"
225
+ " `config.encoder.hidden_size`."
226
+ )
227
+
228
+ # initialize with config
229
+ super().__init__(config)
230
+
231
+ if encoder is None:
232
+ encoder = TFAutoModel.from_config(config.encoder, name="encoder")
233
+
234
+ if decoder is None:
235
+ decoder = TFAutoModelForCausalLM.from_config(config.decoder, name="decoder")
236
+
237
+ self.encoder = encoder
238
+ self.decoder = decoder
239
+
240
+ if self.encoder.config.to_dict() != self.config.encoder.to_dict():
241
+ logger.warning(
242
+ f"Config of the encoder: {self.encoder.__class__} is overwritten by shared encoder config:"
243
+ f" {self.config.encoder}"
244
+ )
245
+ if self.decoder.config.to_dict() != self.config.decoder.to_dict():
246
+ logger.warning(
247
+ f"Config of the decoder: {self.decoder.__class__} is overwritten by shared decoder config:"
248
+ f" {self.config.decoder}"
249
+ )
250
+
251
+ # make sure that the individual model's config refers to the shared config
252
+ # so that the updates to the config will be synced
253
+ self.encoder.config = self.config.encoder
254
+ self.decoder.config = self.config.decoder
255
+
256
+ # encoder outputs might need to be projected to different dimension for decoder
257
+ if (
258
+ self.encoder.config.hidden_size != self.decoder.config.hidden_size
259
+ and self.decoder.config.cross_attention_hidden_size is None
260
+ ):
261
+ self.enc_to_dec_proj = keras.layers.Dense(
262
+ units=self.decoder.config.hidden_size,
263
+ kernel_initializer=get_initializer(config.encoder.initializer_range),
264
+ name="enc_to_dec_proj",
265
+ )
266
+
267
+ if self.encoder.get_output_embeddings() is not None:
268
+ raise ValueError(
269
+ f"The encoder {self.encoder} should not have a LM Head. Please use a model without LM Head"
270
+ )
271
+
272
+ decoder_signature = set(inspect.signature(self.decoder.call).parameters.keys())
273
+ if "encoder_hidden_states" not in decoder_signature:
274
+ raise ValueError(
275
+ "The selected decoder is not prepared for the encoder hidden states to be passed. Please see the "
276
+ "following discussion on GitHub: https://github.com/huggingface/transformers/issues/23350"
277
+ )
278
+
279
+ def get_encoder(self):
280
+ return self.encoder
281
+
282
+ def get_decoder(self):
283
+ return self.decoder
284
+
285
+ def get_input_embeddings(self):
286
+ return self.encoder.get_input_embeddings()
287
+
288
+ def get_output_embeddings(self):
289
+ return self.decoder.get_output_embeddings()
290
+
291
+ def set_output_embeddings(self, new_embeddings):
292
+ return self.decoder.set_output_embeddings(new_embeddings)
293
+
294
+ def tf_to_pt_weight_rename(self, tf_weight):
295
+ # Matt: The TF and PT weights don't align because our TF base classes have an extra layer compared to PT models
296
+ # (the main model stem is in the MainLayer class). If we remove that layer, then weight names sync up as normal.
297
+ # However, the name of that extra layer is the name of the MainLayer in the base model. We make the assumption
298
+ # here that the config model_type is the same as the name of the MainLayer. I don't know of anywhere that's
299
+ # not the case, and I wasn't sure how else to go from the config to the correct MainLayer name!
300
+
301
+ # This override is only needed in the case where we're crossloading weights from PT. However, since weights are
302
+ # often safetensors now, we don't know if we're going to be crossloading until we sniff the weights file.
303
+ # Therefore, we specify tf_to_pt_weight_rename anyway, and let the super method figure out if it needs it
304
+ # or not.
305
+ encoder_model_type = self.config.encoder.model_type
306
+ if "encoder" in tf_weight and "decoder" not in tf_weight:
307
+ return (re.sub(rf"encoder\.{encoder_model_type}\.", "encoder.", tf_weight),)
308
+ else:
309
+ return (tf_weight,)
310
+
311
+ @classmethod
312
+ def from_encoder_decoder_pretrained(
313
+ cls,
314
+ encoder_pretrained_model_name_or_path: str = None,
315
+ decoder_pretrained_model_name_or_path: str = None,
316
+ *model_args,
317
+ **kwargs,
318
+ ) -> TFPreTrainedModel:
319
+ r"""
320
+ Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model
321
+ checkpoints.
322
+
323
+
324
+ Params:
325
+ encoder_pretrained_model_name_or_path (`str`, *optional*):
326
+ Information necessary to initiate the encoder. Can be either:
327
+
328
+ - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
329
+ - A path to a *directory* containing model weights saved using
330
+ [`~TFPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
331
+ - A path or url to a *pytorch index checkpoint file* (e.g, `./pt_model/`). In this case,
332
+ `encoder_from_pt` should be set to `True`.
333
+
334
+ decoder_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`):
335
+ Information necessary to initiate the decoder. Can be either:
336
+
337
+ - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
338
+ - A path to a *directory* containing model weights saved using
339
+ [`~TFPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
340
+ - A path or url to a *pytorch checkpoint file* (e.g, `./pt_model/`). In this case,
341
+ `decoder_from_pt` should be set to `True`.
342
+
343
+ model_args (remaining positional arguments, *optional*):
344
+ All remaning positional arguments will be passed to the underlying model's `__init__` method.
345
+
346
+ kwargs (remaining dictionary of keyword arguments, *optional*):
347
+ Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
348
+ `output_attentions=True`).
349
+
350
+ - To update the encoder configuration, use the prefix *encoder_* for each configuration parameter.
351
+ - To update the decoder configuration, use the prefix *decoder_* for each configuration parameter.
352
+ - To update the parent model configuration, do not use a prefix for each configuration parameter.
353
+
354
+ Behaves differently depending on whether a `config` is provided or automatically loaded.
355
+
356
+ Example:
357
+
358
+ ```python
359
+ >>> from transformers import TFEncoderDecoderModel
360
+
361
+ >>> # initialize a bert2gpt2 from two pretrained BERT models. Note that the cross-attention layers will be randomly initialized
362
+ >>> model = TFEncoderDecoderModel.from_encoder_decoder_pretrained("google-bert/bert-base-uncased", "openai-community/gpt2")
363
+ >>> # saving model after fine-tuning
364
+ >>> model.save_pretrained("./bert2gpt2")
365
+ >>> # load fine-tuned model
366
+ >>> model = TFEncoderDecoderModel.from_pretrained("./bert2gpt2")
367
+ ```"""
368
+
369
+ kwargs_encoder = {
370
+ argument[len("encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("encoder_")
371
+ }
372
+
373
+ kwargs_decoder = {
374
+ argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
375
+ }
376
+
377
+ # remove encoder, decoder kwargs from kwargs
378
+ for key in kwargs_encoder.keys():
379
+ del kwargs["encoder_" + key]
380
+ for key in kwargs_decoder.keys():
381
+ del kwargs["decoder_" + key]
382
+
383
+ # Load and initialize the encoder and decoder
384
+ # The distinction between encoder and decoder at the model level is made
385
+ # by the value of the flag `is_decoder` that we need to set correctly.
386
+ encoder = kwargs_encoder.pop("model", None)
387
+ if encoder is None:
388
+ if encoder_pretrained_model_name_or_path is None:
389
+ raise ValueError(
390
+ "If `encoder_model` is not defined as an argument, a `encoder_pretrained_model_name_or_path` has "
391
+ "to be defined."
392
+ )
393
+
394
+ if "config" not in kwargs_encoder:
395
+ encoder_config = AutoConfig.from_pretrained(encoder_pretrained_model_name_or_path)
396
+ if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True:
397
+ logger.info(
398
+ f"Initializing {encoder_pretrained_model_name_or_path} as a encoder model "
399
+ "from a decoder model. Cross-attention and casual mask are disabled."
400
+ )
401
+ encoder_config.is_decoder = False
402
+ encoder_config.add_cross_attention = False
403
+
404
+ kwargs_encoder["config"] = encoder_config
405
+
406
+ kwargs_encoder["name"] = "encoder"
407
+ kwargs_encoder["load_weight_prefix"] = cls.load_weight_prefix
408
+ encoder = TFAutoModel.from_pretrained(encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder)
409
+
410
+ decoder = kwargs_decoder.pop("model", None)
411
+ if decoder is None:
412
+ if decoder_pretrained_model_name_or_path is None:
413
+ raise ValueError(
414
+ "If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has "
415
+ "to be defined."
416
+ )
417
+
418
+ if "config" not in kwargs_decoder:
419
+ decoder_config = AutoConfig.from_pretrained(decoder_pretrained_model_name_or_path)
420
+ if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False:
421
+ logger.info(
422
+ f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention"
423
+ f" layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if"
424
+ f" {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers."
425
+ )
426
+ decoder_config.is_decoder = True
427
+ decoder_config.add_cross_attention = True
428
+
429
+ kwargs_decoder["config"] = decoder_config
430
+
431
+ if kwargs_decoder["config"].is_decoder is False or kwargs_decoder["config"].add_cross_attention is False:
432
+ logger.warning(
433
+ f"Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. "
434
+ f"In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, "
435
+ "make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` "
436
+ "passed to `.from_encoder_decoder_pretrained(...)` are set to `True` or do not pass a "
437
+ "`decoder_config` to `.from_encoder_decoder_pretrained(...)`"
438
+ )
439
+
440
+ kwargs_decoder["name"] = "decoder"
441
+ kwargs_decoder["load_weight_prefix"] = cls.load_weight_prefix
442
+ decoder = TFAutoModelForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder)
443
+
444
+ # Make sure these 2 `keras.Model` have fixed names so `from_pretrained` could load model weights correctly.
445
+ if encoder.name != "encoder":
446
+ raise ValueError("encoder model must be created with the name `encoder`.")
447
+ if decoder.name != "decoder":
448
+ raise ValueError("decoder model must be created with the name `decoder`.")
449
+
450
+ # instantiate config with corresponding kwargs
451
+ config = EncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config, **kwargs)
452
+ return cls(encoder=encoder, decoder=decoder, config=config)
453
+
454
+ @unpack_inputs
455
+ @add_start_docstrings_to_model_forward(ENCODER_DECODER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
456
+ @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
457
+ def call(
458
+ self,
459
+ input_ids: TFModelInputType | None = None,
460
+ attention_mask: np.ndarray | tf.Tensor | None = None,
461
+ decoder_input_ids: np.ndarray | tf.Tensor | None = None,
462
+ decoder_attention_mask: np.ndarray | tf.Tensor | None = None,
463
+ encoder_outputs: np.ndarray | tf.Tensor | None = None,
464
+ past_key_values: Tuple[Tuple[tf.Tensor]] | None = None,
465
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
466
+ decoder_inputs_embeds: np.ndarray | tf.Tensor | None = None,
467
+ labels: np.ndarray | tf.Tensor | None = None,
468
+ use_cache: Optional[bool] = None,
469
+ output_attentions: Optional[bool] = None,
470
+ output_hidden_states: Optional[bool] = None,
471
+ return_dict: Optional[bool] = None,
472
+ training: bool = False,
473
+ **kwargs,
474
+ ) -> Union[TFSeq2SeqLMOutput, Tuple[tf.Tensor]]:
475
+ r"""
476
+ Returns:
477
+
478
+ Examples:
479
+
480
+ ```python
481
+ >>> from transformers import TFEncoderDecoderModel, BertTokenizer
482
+
483
+ >>> # initialize a bert2gpt2 from a pretrained BERT and GPT2 models. Note that the cross-attention layers will be randomly initialized
484
+ >>> model = TFEncoderDecoderModel.from_encoder_decoder_pretrained("google-bert/bert-base-cased", "openai-community/gpt2")
485
+
486
+ >>> tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-cased")
487
+
488
+ >>> # forward
489
+ >>> input_ids = tokenizer.encode(
490
+ ... "Hello, my dog is cute", add_special_tokens=True, return_tensors="tf"
491
+ ... ) # Batch size 1
492
+ >>> outputs = model(input_ids=input_ids, decoder_input_ids=input_ids)
493
+
494
+ >>> # training
495
+ >>> outputs = model(input_ids=input_ids, decoder_input_ids=input_ids, labels=input_ids)
496
+ >>> loss, logits = outputs.loss, outputs.logits
497
+
498
+ >>> # save and load from pretrained
499
+ >>> model.save_pretrained("bert2gpt2")
500
+ >>> model = TFEncoderDecoderModel.from_pretrained("bert2gpt2")
501
+
502
+ >>> # generation
503
+ >>> generated = model.generate(input_ids, decoder_start_token_id=model.config.decoder.bos_token_id)
504
+ ```"""
505
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
506
+
507
+ kwargs_encoder = {argument: value for argument, value in kwargs.items() if not argument.startswith("decoder_")}
508
+
509
+ kwargs_decoder = {
510
+ argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
511
+ }
512
+
513
+ # Let the user be responsible for the expected format.
514
+ if encoder_outputs is not None:
515
+ if return_dict and not isinstance(encoder_outputs, ModelOutput):
516
+ raise ValueError(
517
+ "If `return_dict=True` and `encoder_outputs` is provided, it should be an instance of "
518
+ f"`ModelOutput`. Got an instance {type(encoder_outputs)} for `encoder_outputs`."
519
+ )
520
+
521
+ if encoder_outputs is None:
522
+ encoder_inputs = {
523
+ "input_ids": input_ids,
524
+ "attention_mask": attention_mask,
525
+ "inputs_embeds": inputs_embeds,
526
+ "output_attentions": output_attentions,
527
+ "output_hidden_states": output_hidden_states,
528
+ "return_dict": return_dict,
529
+ "training": training,
530
+ }
531
+
532
+ # Add arguments to encoder from `kwargs_encoder`
533
+ encoder_inputs.update(kwargs_encoder)
534
+
535
+ # Handle the case where the inputs are passed as a single dict which contains `labels`.
536
+ # The `labels` shouldn't be passed to `self.encoder` below, because it is a based model without this
537
+ # parameter (otherwise, an error occurs when `input_processing` is called inside `self.encoder.call()`).
538
+ if "labels" in encoder_inputs:
539
+ labels = encoder_inputs.pop("labels")
540
+
541
+ # handle the init case where `dummy_inputs` returns a dict containing `decoder_input_ids`.
542
+ if "decoder_input_ids" in encoder_inputs:
543
+ decoder_input_ids = encoder_inputs.pop("decoder_input_ids")
544
+ # handle the init case where `dummy_inputs` returns a dict containing `decoder_input_ids`.
545
+ if "decoder_attention_mask" in encoder_inputs:
546
+ decoder_attention_mask = encoder_inputs.pop("decoder_attention_mask")
547
+
548
+ encoder_outputs = self.encoder(**encoder_inputs)
549
+
550
+ encoder_hidden_states = encoder_outputs[0]
551
+
552
+ # optionally project encoder_hidden_states
553
+ if (
554
+ self.encoder.config.hidden_size != self.decoder.config.hidden_size
555
+ and self.decoder.config.cross_attention_hidden_size is None
556
+ ):
557
+ encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states)
558
+
559
+ if (labels is not None) and (decoder_input_ids is None and decoder_inputs_embeds is None):
560
+ decoder_input_ids = shift_tokens_right(
561
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
562
+ )
563
+
564
+ decoder_inputs = {
565
+ "input_ids": decoder_input_ids,
566
+ "attention_mask": decoder_attention_mask,
567
+ "encoder_hidden_states": encoder_hidden_states,
568
+ "encoder_attention_mask": attention_mask,
569
+ "inputs_embeds": decoder_inputs_embeds,
570
+ "output_attentions": output_attentions,
571
+ "output_hidden_states": output_hidden_states,
572
+ "use_cache": use_cache,
573
+ "past_key_values": past_key_values,
574
+ "return_dict": return_dict,
575
+ "training": training,
576
+ }
577
+
578
+ # Add arguments to decoder from `kwargs_decoder`
579
+ decoder_inputs.update(kwargs_decoder)
580
+
581
+ decoder_outputs = self.decoder(**decoder_inputs)
582
+
583
+ logits = decoder_outputs[0]
584
+
585
+ # Compute loss independent from decoder (as some shift the logits inside them)
586
+ loss = None
587
+ if labels is not None:
588
+ warnings.warn(DEPRECATION_WARNING, FutureWarning)
589
+ loss = self.hf_compute_loss(labels, logits)
590
+
591
+ if not return_dict:
592
+ past_key_values = None
593
+ if use_cache:
594
+ past_key_values = decoder_outputs[1]
595
+ # The starting index of the remaining elements in `decoder_outputs`
596
+ start_index = sum([1 if x is not None else 0 for x in (loss, logits, past_key_values)])
597
+
598
+ if not isinstance(encoder_outputs, tuple):
599
+ encoder_outputs = encoder_outputs.to_tuple()
600
+ output = (loss, logits, past_key_values) + decoder_outputs[start_index:] + encoder_outputs
601
+ output = tuple([x for x in output if x is not None])
602
+ return output
603
+
604
+ return TFSeq2SeqLMOutput(
605
+ loss=loss,
606
+ logits=decoder_outputs.logits,
607
+ past_key_values=decoder_outputs.past_key_values,
608
+ decoder_hidden_states=decoder_outputs.hidden_states,
609
+ decoder_attentions=decoder_outputs.attentions,
610
+ cross_attentions=decoder_outputs.cross_attentions,
611
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
612
+ encoder_hidden_states=encoder_outputs.hidden_states,
613
+ encoder_attentions=encoder_outputs.attentions,
614
+ )
615
+
616
+ def prepare_inputs_for_generation(
617
+ self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, encoder_outputs=None, **kwargs
618
+ ):
619
+ decoder_inputs = self.decoder.prepare_inputs_for_generation(input_ids, past_key_values=past_key_values)
620
+ decoder_attention_mask = decoder_inputs["attention_mask"] if "attention_mask" in decoder_inputs else None
621
+ past_key_values = decoder_inputs.get("past_key_values")
622
+ if past_key_values is None:
623
+ past_key_values = decoder_inputs.get("past") # e.g. on TF GPT2
624
+ input_dict = {
625
+ "input_ids": None, # needs to be passed to make Keras.layer.__call__ happy
626
+ "attention_mask": attention_mask,
627
+ "decoder_attention_mask": decoder_attention_mask,
628
+ "decoder_input_ids": decoder_inputs["input_ids"],
629
+ # TODO (joao): the `TFBaseModelOutput` wrapper should not be needed after the generate refactor is complete
630
+ "encoder_outputs": TFBaseModelOutput(last_hidden_state=encoder_outputs[0]),
631
+ "past_key_values": past_key_values,
632
+ "use_cache": use_cache,
633
+ }
634
+ return input_dict
635
+
636
+ def prepare_decoder_input_ids_from_labels(self, labels: tf.Tensor):
637
+ return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
638
+
639
+ def resize_token_embeddings(self, *args, **kwargs):
640
+ raise NotImplementedError(
641
+ "Resizing the embedding layers via the TFEncoderDecoderModel directly is not supported.Please use the"
642
+ " respective methods of the wrapped objects (model.encoder.resize_token_embeddings(...) or"
643
+ " model.decoder.resize_token_embeddings(...))"
644
+ )
645
+
646
+ def _reorder_cache(self, past, beam_idx):
647
+ # apply decoder cache reordering here
648
+ return self.decoder._reorder_cache(past, beam_idx)
649
+
650
+ def build(self, input_shape=None):
651
+ if self.built:
652
+ return
653
+ self.built = True
654
+ if getattr(self, "enc_to_dec_proj", None) is not None:
655
+ with tf.name_scope(self.enc_to_dec_proj.name):
656
+ self.enc_to_dec_proj.build([None, None, self.encoder.config.hidden_size])
657
+ if getattr(self, "encoder", None) is not None:
658
+ with tf.name_scope(self.encoder.name):
659
+ self.encoder.build(None)
660
+ if getattr(self, "decoder", None) is not None:
661
+ with tf.name_scope(self.decoder.name):
662
+ self.decoder.build(None)
663
+
664
+
665
+ __all__ = ["TFEncoderDecoderModel"]
.venv/lib/python3.11/site-packages/transformers/models/llava_onevision/__init__.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import _LazyModule
17
+ from ...utils.import_utils import define_import_structure
18
+
19
+
20
+ if TYPE_CHECKING:
21
+ from .configuration_llava_onevision import *
22
+ from .image_processing_llava_onevision import *
23
+ from .modeling_llava_onevision import *
24
+ from .processing_llava_onevision import *
25
+ from .video_processing_llava_onevision import *
26
+ else:
27
+ import sys
28
+
29
+ _file = globals()["__file__"]
30
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
.venv/lib/python3.11/site-packages/transformers/models/llava_onevision/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (949 Bytes). View file
 
.venv/lib/python3.11/site-packages/transformers/models/llava_onevision/__pycache__/configuration_llava_onevision.cpython-311.pyc ADDED
Binary file (7.11 kB). View file
 
.venv/lib/python3.11/site-packages/transformers/models/llava_onevision/__pycache__/image_processing_llava_onevision.cpython-311.pyc ADDED
Binary file (36 kB). View file
 
.venv/lib/python3.11/site-packages/transformers/models/llava_onevision/__pycache__/modeling_llava_onevision.cpython-311.pyc ADDED
Binary file (43.6 kB). View file
 
.venv/lib/python3.11/site-packages/transformers/models/llava_onevision/__pycache__/processing_llava_onevision.cpython-311.pyc ADDED
Binary file (17.1 kB). View file
 
.venv/lib/python3.11/site-packages/transformers/models/llava_onevision/__pycache__/video_processing_llava_onevision.cpython-311.pyc ADDED
Binary file (19.3 kB). View file
 
.venv/lib/python3.11/site-packages/transformers/models/llava_onevision/configuration_llava_onevision.py ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import (
19
+ logging,
20
+ )
21
+ from ..auto import CONFIG_MAPPING, AutoConfig
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+
27
+ class LlavaOnevisionConfig(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`LlavaOnevisionForConditionalGeneration`]. It is used to instantiate an
30
+ Llava-NeXT model according to the specified arguments, defining the model architecture. Instantiating a configuration
31
+ with the defaults will yield a similar configuration to that of the [llava-hf/llava-onevision-qwen2-7b-ov-hf](https://huggingface.co/llava-hf/llava-onevision-qwen2-7b-ov-hf)
32
+ model.
33
+
34
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
35
+ documentation from [`PretrainedConfig`] for more information.
36
+
37
+ Args:
38
+ vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `SiglipVisionConfig`):
39
+ The config object or dictionary of the vision backbone.
40
+ text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `Qwen2Config`):
41
+ The config object or dictionary of the text backbone.
42
+ image_token_index (`int`, *optional*, defaults to 151646):
43
+ The image token index to encode the image prompt.
44
+ video_token_index (`int`, *optional*, defaults to 151647):
45
+ The video token index to encode the video prompt.
46
+ projector_hidden_act (`str`, *optional*, defaults to `"gelu"`):
47
+ The activation function used by the multimodal projector.
48
+ vision_feature_select_strategy (`str`, *optional*, defaults to `"full"`):
49
+ The feature selection strategy used to select the vision feature from the vision backbone.
50
+ Can be one of `"default"` or `"full"`. If `"default"`, the CLS token is removed from the vision features.
51
+ If `"full"`, the full vision features are used.
52
+ vision_feature_layer (`int`, *optional*, defaults to -1):
53
+ The index of the layer to select the vision feature.
54
+ vision_aspect_ratio (`str`, *optional*, defaults to `"anyres_max_9"`):
55
+ Aspect ratio used when processong image features. The default value is "anyres_max_9".
56
+ image_grid_pinpoints (`List`, *optional*):
57
+ A list of possible resolutions to use for processing high resolution images. Each item in the list should be a tuple or list
58
+ of the form `(height, width)`.
59
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
60
+ Whether the model's input and output word embeddings should be tied.
61
+ multimodal_projector_bias (`bool`, *optional*, defaults to `True`):
62
+ Whether to use bias in the multimodal projector.
63
+
64
+ Example:
65
+
66
+ ```python
67
+ >>> from transformers import LlavaOnevisionForConditionalGeneration, LlavaOnevisionConfig, SiglipVisionConfig, Qwen2Config
68
+
69
+ >>> # Initializing a CLIP-vision config
70
+ >>> vision_config = SiglipVisionConfig()
71
+
72
+ >>> # Initializing a Llama config
73
+ >>> text_config = Qwen2Config()
74
+
75
+ >>> # Initializing a Llava-Next llava-hf/llava-onevision-qwen2-7b-ov-hf style configuration
76
+ >>> configuration = LlavaOnevisionConfig(vision_config, text_config)
77
+
78
+ >>> # Initializing a model from the llava-hf/llava-onevision-qwen2-7b-ov-hf style configuration
79
+ >>> model = LlavaOnevisionForConditionalGeneration(configuration)
80
+
81
+ >>> # Accessing the model configuration
82
+ >>> configuration = model.config
83
+ ```"""
84
+
85
+ model_type = "llava_onevision"
86
+ sub_configs = {"text_config": AutoConfig, "vision_config": AutoConfig}
87
+
88
+ def __init__(
89
+ self,
90
+ vision_config=None,
91
+ text_config=None,
92
+ image_token_index=151646,
93
+ video_token_index=151647,
94
+ projector_hidden_act="gelu",
95
+ vision_feature_select_strategy="full",
96
+ vision_feature_layer=-1,
97
+ vision_aspect_ratio="anyres_max_9",
98
+ image_grid_pinpoints=None,
99
+ tie_word_embeddings=False,
100
+ multimodal_projector_bias=True,
101
+ **kwargs,
102
+ ):
103
+ self.image_token_index = image_token_index
104
+ self.video_token_index = video_token_index
105
+ self.projector_hidden_act = projector_hidden_act
106
+ self.multimodal_projector_bias = multimodal_projector_bias
107
+
108
+ if vision_feature_select_strategy not in ["default", "full"]:
109
+ raise ValueError(
110
+ "vision_feature_select_strategy should be one of 'default', 'full'."
111
+ f"Got: {vision_feature_select_strategy}"
112
+ )
113
+
114
+ self.vision_feature_select_strategy = vision_feature_select_strategy
115
+ self.vision_feature_layer = vision_feature_layer
116
+ self.vision_aspect_ratio = vision_aspect_ratio
117
+ image_grid_pinpoints = (
118
+ image_grid_pinpoints
119
+ if image_grid_pinpoints is not None
120
+ else [
121
+ [384, 384],
122
+ [384, 768],
123
+ [384, 1152],
124
+ [384, 1536],
125
+ [384, 1920],
126
+ [384, 2304],
127
+ [768, 384],
128
+ [768, 768],
129
+ [768, 1152],
130
+ [768, 1536],
131
+ [768, 1920],
132
+ [768, 2304],
133
+ [1152, 384],
134
+ [1152, 768],
135
+ [1152, 1152],
136
+ [1152, 1536],
137
+ [1152, 1920],
138
+ [1152, 2304],
139
+ [1536, 384],
140
+ [1536, 768],
141
+ [1536, 1152],
142
+ [1536, 1536],
143
+ [1536, 1920],
144
+ [1536, 2304],
145
+ [1920, 384],
146
+ [1920, 768],
147
+ [1920, 1152],
148
+ [1920, 1536],
149
+ [1920, 1920],
150
+ [1920, 2304],
151
+ [2304, 384],
152
+ [2304, 768],
153
+ [2304, 1152],
154
+ [2304, 1536],
155
+ [2304, 1920],
156
+ [2304, 2304],
157
+ ]
158
+ )
159
+ self.image_grid_pinpoints = image_grid_pinpoints
160
+
161
+ if isinstance(vision_config, dict):
162
+ vision_config["model_type"] = (
163
+ vision_config["model_type"] if "model_type" in vision_config else "siglip_vision_model"
164
+ )
165
+ vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config)
166
+ elif vision_config is None:
167
+ vision_config = CONFIG_MAPPING["siglip_vision_model"](
168
+ hidden_size=1152,
169
+ intermediate_size=4304,
170
+ patch_size=14,
171
+ image_size=384,
172
+ num_hidden_layers=26,
173
+ num_attention_heads=14,
174
+ vision_use_head=False,
175
+ )
176
+
177
+ self.vision_config = vision_config
178
+
179
+ if isinstance(text_config, dict):
180
+ text_config["model_type"] = text_config["model_type"] if "model_type" in text_config else "qwen2"
181
+ text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config)
182
+ elif text_config is None:
183
+ text_config = CONFIG_MAPPING["qwen2"]()
184
+
185
+ self.text_config = text_config
186
+
187
+ super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)
188
+
189
+
190
+ __all__ = ["LlavaOnevisionConfig"]
.venv/lib/python3.11/site-packages/transformers/models/llava_onevision/image_processing_llava_onevision.py ADDED
@@ -0,0 +1,715 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for LLaVa-Onevision."""
16
+
17
+ import math
18
+ from typing import Dict, Iterable, List, Optional, Tuple, Union
19
+
20
+ import numpy as np
21
+
22
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict, select_best_resolution
23
+ from ...image_transforms import (
24
+ PaddingMode,
25
+ convert_to_rgb,
26
+ pad,
27
+ resize,
28
+ to_channel_dimension_format,
29
+ )
30
+ from ...image_utils import (
31
+ OPENAI_CLIP_MEAN,
32
+ OPENAI_CLIP_STD,
33
+ ChannelDimension,
34
+ ImageInput,
35
+ PILImageResampling,
36
+ get_image_size,
37
+ infer_channel_dimension_format,
38
+ is_scaled_image,
39
+ is_valid_image,
40
+ to_numpy_array,
41
+ valid_images,
42
+ validate_preprocess_arguments,
43
+ )
44
+ from ...utils import TensorType, is_vision_available, logging
45
+
46
+
47
+ logger = logging.get_logger(__name__)
48
+
49
+
50
+ if is_vision_available():
51
+ from PIL import Image
52
+
53
+
54
+ # Copied from transformers.models.llava_next.image_processing_llava_next.make_batched_images
55
+ def make_batched_images(images) -> List[List[ImageInput]]:
56
+ """
57
+ Accepts images in list or nested list format, and makes a list of images for preprocessing.
58
+
59
+ Args:
60
+ images (`Union[List[List[ImageInput]], List[ImageInput], ImageInput]`):
61
+ The input image.
62
+
63
+ Returns:
64
+ list: A list of images.
65
+ """
66
+ if isinstance(images, (list, tuple)) and isinstance(images[0], (list, tuple)) and is_valid_image(images[0][0]):
67
+ return [img for img_list in images for img in img_list]
68
+
69
+ elif isinstance(images, (list, tuple)) and is_valid_image(images[0]):
70
+ return images
71
+
72
+ elif is_valid_image(images):
73
+ return [images]
74
+
75
+ raise ValueError(f"Could not make batched video from {images}")
76
+
77
+
78
+ # Copied from transformers.models.llava_next.image_processing_llava_next.divide_to_patches
79
+ def divide_to_patches(image: np.array, patch_size: int, input_data_format) -> List[np.array]:
80
+ """
81
+ Divides an image into patches of a specified size.
82
+
83
+ Args:
84
+ image (`np.array`):
85
+ The input image.
86
+ patch_size (`int`):
87
+ The size of each patch.
88
+ input_data_format (`ChannelDimension` or `str`):
89
+ The channel dimension format of the input image.
90
+
91
+ Returns:
92
+ list: A list of np.array representing the patches.
93
+ """
94
+ patches = []
95
+ height, width = get_image_size(image, channel_dim=input_data_format)
96
+ for i in range(0, height, patch_size):
97
+ for j in range(0, width, patch_size):
98
+ if input_data_format == ChannelDimension.LAST:
99
+ patch = image[i : i + patch_size, j : j + patch_size]
100
+ else:
101
+ patch = image[:, i : i + patch_size, j : j + patch_size]
102
+ patches.append(patch)
103
+
104
+ return patches
105
+
106
+
107
+ # Copied from transformers.models.llava_next.image_processing_llava_next.expand_to_square
108
+ def expand_to_square(image: np.array, background_color, input_data_format) -> np.array:
109
+ """
110
+ Expands an image to a square by adding a background color.
111
+ """
112
+
113
+ height, width = get_image_size(image, channel_dim=input_data_format)
114
+ if width == height:
115
+ return image
116
+ elif width > height:
117
+ result = np.ones((width, width, image.shape[2]), dtype=image.dtype) * background_color
118
+ result[(width - height) // 2 : (width - height) // 2 + height, :] = image
119
+ return result
120
+ else:
121
+ result = np.ones((height, height, image.shape[2]), dtype=image.dtype) * background_color
122
+ result[:, (height - width) // 2 : (height - width) // 2 + width] = image
123
+ return result
124
+
125
+
126
+ # Copied from transformers.models.llava_next.image_processing_llava_next._get_patch_output_size
127
+ def _get_patch_output_size(image, target_resolution, input_data_format):
128
+ original_height, original_width = get_image_size(image, channel_dim=input_data_format)
129
+ target_height, target_width = target_resolution
130
+
131
+ scale_w = target_width / original_width
132
+ scale_h = target_height / original_height
133
+
134
+ if scale_w < scale_h:
135
+ new_width = target_width
136
+ new_height = min(math.ceil(original_height * scale_w), target_height)
137
+ else:
138
+ new_height = target_height
139
+ new_width = min(math.ceil(original_width * scale_h), target_width)
140
+
141
+ return new_height, new_width
142
+
143
+
144
+ class LlavaOnevisionImageProcessor(BaseImageProcessor):
145
+ r"""
146
+ Constructs a LLaVa-Onevisino-Video video processor. Based on [`SiglipImageProcessor`] with incorporation of processing each video frame.
147
+
148
+ Args:
149
+ do_resize (`bool`, *optional*, defaults to `True`):
150
+ Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
151
+ `do_resize` in the `preprocess` method.
152
+ size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`):
153
+ Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with
154
+ the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess`
155
+ method.
156
+ image_grid_pinpoints (`List` *optional*, defaults to `[[672, 336], [336, 672], [672, 672], [336, 1008], [1008, 336]]`):
157
+ A list of possible resolutions to use for processing high resolution images. The best resolution is selected
158
+ based on the original size of the image. Can be overridden by `image_grid_pinpoints` in the `preprocess`
159
+ method. Not used for processinf videos.
160
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
161
+ Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
162
+ do_rescale (`bool`, *optional*, defaults to `True`):
163
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
164
+ the `preprocess` method.
165
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
166
+ Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
167
+ method.
168
+ do_normalize (`bool`, *optional*, defaults to `True`):
169
+ Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method.
170
+ image_mean (`float` or `List[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`):
171
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
172
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
173
+ image_std (`float` or `List[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`):
174
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
175
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
176
+ Can be overridden by the `image_std` parameter in the `preprocess` method.
177
+ do_pad (`bool`, *optional*, defaults to `True`):
178
+ Whether to pad the image. If `True`, will pad the patch dimension of the images in the batch to the largest
179
+ number of patches in the batch. Padding will be applied to the bottom and right with zeros.
180
+ do_convert_rgb (`bool`, *optional*, defaults to `True`):
181
+ Whether to convert the image to RGB.
182
+ """
183
+
184
+ model_input_names = ["pixel_values_videos"]
185
+
186
+ def __init__(
187
+ self,
188
+ do_resize: bool = True,
189
+ size: Dict[str, int] = None,
190
+ image_grid_pinpoints: List = None,
191
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
192
+ do_rescale: bool = True,
193
+ rescale_factor: Union[int, float] = 1 / 255,
194
+ do_normalize: bool = True,
195
+ image_mean: Optional[Union[float, List[float]]] = None,
196
+ image_std: Optional[Union[float, List[float]]] = None,
197
+ do_pad: Optional[bool] = True,
198
+ do_convert_rgb: bool = True,
199
+ **kwargs,
200
+ ) -> None:
201
+ super().__init__(**kwargs)
202
+ size = size if size is not None else {"height": 384, "width": 384}
203
+ size = get_size_dict(size, default_to_square=False)
204
+ image_grid_pinpoints = (
205
+ image_grid_pinpoints
206
+ if image_grid_pinpoints is not None
207
+ else [
208
+ [384, 384],
209
+ [384, 768],
210
+ [384, 1152],
211
+ [384, 1536],
212
+ [384, 1920],
213
+ [384, 2304],
214
+ [768, 384],
215
+ [768, 768],
216
+ [768, 1152],
217
+ [768, 1536],
218
+ [768, 1920],
219
+ [768, 2304],
220
+ [1152, 384],
221
+ [1152, 768],
222
+ [1152, 1152],
223
+ [1152, 1536],
224
+ [1152, 1920],
225
+ [1152, 2304],
226
+ [1536, 384],
227
+ [1536, 768],
228
+ [1536, 1152],
229
+ [1536, 1536],
230
+ [1536, 1920],
231
+ [1536, 2304],
232
+ [1920, 384],
233
+ [1920, 768],
234
+ [1920, 1152],
235
+ [1920, 1536],
236
+ [1920, 1920],
237
+ [1920, 2304],
238
+ [2304, 384],
239
+ [2304, 768],
240
+ [2304, 1152],
241
+ [2304, 1536],
242
+ [2304, 1920],
243
+ [2304, 2304],
244
+ ]
245
+ )
246
+
247
+ self.do_resize = do_resize
248
+ self.size = size
249
+ self.image_grid_pinpoints = image_grid_pinpoints
250
+ self.resample = resample
251
+ self.do_rescale = do_rescale
252
+ self.rescale_factor = rescale_factor
253
+ self.do_normalize = do_normalize
254
+ self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
255
+ self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
256
+ self.do_pad = do_pad
257
+ self.do_convert_rgb = do_convert_rgb
258
+
259
+ # Copied from transformers.models.llava_next.image_processing_llava_next.LlavaNextImageProcessor.pad
260
+ def pad(
261
+ self,
262
+ image: np.ndarray,
263
+ padding: Union[int, Tuple[int, int], Iterable[Tuple[int, int]]],
264
+ mode: PaddingMode = PaddingMode.CONSTANT,
265
+ constant_values: Union[float, Iterable[float]] = 0.0,
266
+ data_format: Optional[Union[str, ChannelDimension]] = None,
267
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
268
+ ) -> np.ndarray:
269
+ """
270
+ Pads the `image` with the specified `padding` and `mode`. Padding can be in the (`height`, `width`)
271
+ dimension of in the (`num_patches`) dimension. In the second case an iterable if tuples is expected
272
+ as input.
273
+
274
+ Args:
275
+ image (`np.ndarray`):
276
+ The image to pad.
277
+ padding (`int` or `Tuple[int, int]` or `Iterable[Tuple[int, int]]`):
278
+ Padding to apply to the edges of the height, width axes. Can be one of three formats:
279
+ - `((before_height, after_height), (before_width, after_width))` unique pad widths for each axis.
280
+ - `((before, after),)` yields same before and after pad for height and width.
281
+ - `(pad,)` or int is a shortcut for before = after = pad width for all axes.
282
+ mode (`PaddingMode`):
283
+ The padding mode to use. Can be one of:
284
+ - `"constant"`: pads with a constant value.
285
+ - `"reflect"`: pads with the reflection of the vector mirrored on the first and last values of the
286
+ vector along each axis.
287
+ - `"replicate"`: pads with the replication of the last value on the edge of the array along each axis.
288
+ - `"symmetric"`: pads with the reflection of the vector mirrored along the edge of the array.
289
+ constant_values (`float` or `Iterable[float]`, *optional*):
290
+ The value to use for the padding if `mode` is `"constant"`.
291
+ data_format (`str` or `ChannelDimension`, *optional*):
292
+ The channel dimension format for the output image. Can be one of:
293
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
294
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
295
+ If unset, will use same as the input image.
296
+ input_data_format (`str` or `ChannelDimension`, *optional*):
297
+ The channel dimension format for the input image. Can be one of:
298
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
299
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
300
+ If unset, will use the inferred format of the input image.
301
+
302
+ Returns:
303
+ `np.ndarray`: The padded image.
304
+
305
+ """
306
+
307
+ # call the general `pad` if padding on `height/width`, otherwise it's the `num_patched` dim
308
+ if isinstance(padding, int) or len(padding) != 4:
309
+ return pad(image, padding, mode, constant_values, data_format, input_data_format)
310
+
311
+ if input_data_format is None:
312
+ input_data_format = infer_channel_dimension_format(image)
313
+ if mode == PaddingMode.CONSTANT:
314
+ image = np.pad(image, padding, mode="constant", constant_values=constant_values)
315
+ elif mode == PaddingMode.REFLECT:
316
+ image = np.pad(image, padding, mode="reflect")
317
+ elif mode == PaddingMode.REPLICATE:
318
+ image = np.pad(image, padding, mode="edge")
319
+ elif mode == PaddingMode.SYMMETRIC:
320
+ image = np.pad(image, padding, mode="symmetric")
321
+ else:
322
+ raise ValueError(f"Invalid padding mode: {mode}")
323
+ image = (
324
+ to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image
325
+ )
326
+ return image
327
+
328
+ # Copied from transformers.models.llava_next.image_processing_llava_next.LlavaNextImageProcessor._resize_for_patching
329
+ def _resize_for_patching(
330
+ self, image: np.array, target_resolution: tuple, resample, input_data_format: ChannelDimension
331
+ ) -> np.array:
332
+ """
333
+ Resizes an image to a target resolution while maintaining aspect ratio.
334
+
335
+ Args:
336
+ image (np.array):
337
+ The input image.
338
+ target_resolution (tuple):
339
+ The target resolution (height, width) of the image.
340
+ resample (`PILImageResampling`):
341
+ Resampling filter to use if resizing the image.
342
+ input_data_format (`ChannelDimension` or `str`):
343
+ The channel dimension format of the input image.
344
+
345
+ Returns:
346
+ np.array: The resized and padded image.
347
+ """
348
+ new_height, new_width = _get_patch_output_size(image, target_resolution, input_data_format)
349
+
350
+ # Resize the image
351
+ resized_image = resize(image, (new_height, new_width), resample=resample, input_data_format=input_data_format)
352
+
353
+ return resized_image
354
+
355
+ # Copied from transformers.models.llava_next.image_processing_llava_next.LlavaNextImageProcessor._pad_for_patching
356
+ def _pad_for_patching(
357
+ self, image: np.array, target_resolution: tuple, input_data_format: ChannelDimension
358
+ ) -> np.array:
359
+ """
360
+ Pad an image to a target resolution while maintaining aspect ratio.
361
+ """
362
+ target_height, target_width = target_resolution
363
+ new_height, new_width = _get_patch_output_size(image, target_resolution, input_data_format)
364
+
365
+ paste_x = (target_width - new_width) // 2
366
+ paste_y = (target_height - new_height) // 2
367
+
368
+ padded_image = self.pad(image, padding=((paste_y, paste_y), (paste_x, paste_x)))
369
+
370
+ return padded_image
371
+
372
+ # Copied from transformers.models.llava_next.image_processing_llava_next.LlavaNextImageProcessor.get_image_patches
373
+ def get_image_patches(
374
+ self,
375
+ image: np.array,
376
+ grid_pinpoints,
377
+ size: tuple,
378
+ patch_size: int,
379
+ resample: PILImageResampling,
380
+ data_format: ChannelDimension,
381
+ input_data_format: ChannelDimension,
382
+ ) -> List[np.array]:
383
+ """
384
+ Process an image with variable resolutions by dividing it into patches.
385
+
386
+ Args:
387
+ image (np.array):
388
+ The input image to be processed.
389
+ grid_pinpoints (List):
390
+ A string representation of a list of possible resolutions.
391
+ size (`tuple`):
392
+ Size to resize the original image to.
393
+ patch_size (`int`):
394
+ Size of the patches to divide the image into.
395
+ resample (`PILImageResampling`):
396
+ Resampling filter to use if resizing the image.
397
+ data_format (`ChannelDimension` or `str`):
398
+ The channel dimension format for the output image.
399
+ input_data_format (`ChannelDimension` or `str`):
400
+ The channel dimension format of the input image.
401
+
402
+ Returns:
403
+ List[np.array]: A list of NumPy arrays containing the processed image patches.
404
+ """
405
+ if not isinstance(grid_pinpoints, list):
406
+ raise TypeError("grid_pinpoints must be a list of possible resolutions.")
407
+
408
+ possible_resolutions = grid_pinpoints
409
+
410
+ image_size = get_image_size(image, channel_dim=input_data_format)
411
+ best_resolution = select_best_resolution(image_size, possible_resolutions)
412
+ resized_image = self._resize_for_patching(
413
+ image, best_resolution, resample=resample, input_data_format=input_data_format
414
+ )
415
+ padded_image = self._pad_for_patching(resized_image, best_resolution, input_data_format=input_data_format)
416
+
417
+ patches = divide_to_patches(padded_image, patch_size=patch_size, input_data_format=input_data_format)
418
+
419
+ # make sure that all patches are in the input data format
420
+ patches = [
421
+ to_channel_dimension_format(patch, channel_dim=data_format, input_channel_dim=input_data_format)
422
+ for patch in patches
423
+ ]
424
+
425
+ resized_original_image = resize(
426
+ image,
427
+ size=size,
428
+ resample=resample,
429
+ data_format=data_format,
430
+ input_data_format=input_data_format,
431
+ )
432
+
433
+ image_patches = [resized_original_image] + patches
434
+
435
+ return image_patches
436
+
437
+ # Copied from transformers.models.llava_next.image_processing_llava_next.LlavaNextImageProcessor._pad_for_batching
438
+ def _pad_for_batching(
439
+ self,
440
+ pixel_values: List[np.ndarray],
441
+ data_format: Optional[Union[str, ChannelDimension]] = None,
442
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
443
+ ):
444
+ """
445
+ Pads images on the `num_of_patches` dimension with zeros to form a batch of same number of patches.
446
+
447
+ Args:
448
+ pixel_values (`List[np.ndarray]`):
449
+ An array of pixel values of each images of shape (`batch_size`, `num_patches`, `image_in_3D`)
450
+ data_format (`str` or `ChannelDimension`, *optional*):
451
+ The channel dimension format for the output image. Can be one of:
452
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
453
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
454
+ If unset, will use same as the input image.
455
+ input_data_format (`str` or `ChannelDimension`, *optional*):
456
+ The channel dimension format for the input image. Can be one of:
457
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
458
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
459
+ If unset, will use the inferred format of the input image.
460
+
461
+ Returns:
462
+ List[`np.ndarray`]: The padded images.
463
+ """
464
+ max_patch = max(len(x) for x in pixel_values)
465
+ pixel_values = [
466
+ self.pad(
467
+ image,
468
+ padding=((0, max_patch - image.shape[0]), (0, 0), (0, 0), (0, 0)),
469
+ data_format=data_format,
470
+ input_data_format=input_data_format,
471
+ )
472
+ for image in pixel_values
473
+ ]
474
+
475
+ return pixel_values
476
+
477
+ def _preprocess(
478
+ self,
479
+ images: ImageInput,
480
+ do_resize: bool = None,
481
+ size: Dict[str, int] = None,
482
+ resample: PILImageResampling = None,
483
+ do_rescale: bool = None,
484
+ rescale_factor: float = None,
485
+ do_normalize: bool = None,
486
+ image_mean: Optional[Union[float, List[float]]] = None,
487
+ image_std: Optional[Union[float, List[float]]] = None,
488
+ do_convert_rgb: bool = None,
489
+ data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
490
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
491
+ ) -> Image.Image:
492
+ """
493
+ Args:
494
+ images (`ImageInput`):
495
+ Batch of frames (one video) to preprocess. Expects a batch of frames with pixel values ranging from 0 to 255. If
496
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
497
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
498
+ Whether to resize the image.
499
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
500
+ Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
501
+ the longest edge resized to keep the input aspect ratio.
502
+ resample (`int`, *optional*, defaults to `self.resample`):
503
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
504
+ has an effect if `do_resize` is set to `True`.
505
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
506
+ Whether to rescale the image.
507
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
508
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
509
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
510
+ Whether to normalize the image.
511
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
512
+ Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
513
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
514
+ Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
515
+ `True`.
516
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
517
+ The channel dimension format for the output image. Can be one of:
518
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
519
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
520
+ - Unset: Use the channel dimension format of the input image.
521
+ input_data_format (`ChannelDimension` or `str`, *optional*):
522
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
523
+ from the input image. Can be one of:
524
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
525
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
526
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
527
+ """
528
+ if do_resize:
529
+ images = [
530
+ resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
531
+ for image in images
532
+ ]
533
+
534
+ if do_rescale:
535
+ images = [
536
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
537
+ for image in images
538
+ ]
539
+
540
+ if do_normalize:
541
+ images = [
542
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
543
+ for image in images
544
+ ]
545
+
546
+ images = [
547
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
548
+ ]
549
+
550
+ return images
551
+
552
+ def preprocess(
553
+ self,
554
+ images: ImageInput,
555
+ do_resize: bool = None,
556
+ size: Dict[str, int] = None,
557
+ image_grid_pinpoints: List = None,
558
+ resample: PILImageResampling = None,
559
+ do_rescale: bool = None,
560
+ rescale_factor: float = None,
561
+ do_normalize: bool = None,
562
+ image_mean: Optional[Union[float, List[float]]] = None,
563
+ image_std: Optional[Union[float, List[float]]] = None,
564
+ do_pad: Optional[bool] = None,
565
+ do_convert_rgb: bool = None,
566
+ return_tensors: Optional[Union[str, TensorType]] = None,
567
+ data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
568
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
569
+ ):
570
+ """
571
+ Args:
572
+ images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
573
+ The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
574
+ tensor. Both channels-first and channels-last formats are supported.
575
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
576
+ Whether to resize the image.
577
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
578
+ Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
579
+ the longest edge resized to keep the input aspect ratio.
580
+ image_grid_pinpoints (`List` *optional*, defaults to `self.image_grid_pinpoints`):
581
+ A list of possible resolutions to use for processing high resolution images. The best resolution is
582
+ selected based on the original size of the image.
583
+ resample (`int`, *optional*, defaults to `self.resample`):
584
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
585
+ has an effect if `do_resize` is set to `True`.
586
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
587
+ Whether to rescale the image.
588
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
589
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
590
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
591
+ Whether to normalize the image.
592
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
593
+ Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
594
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
595
+ Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
596
+ `True`.
597
+ do_pad (`bool`, *optional*, defaults to `self.do_pad`):
598
+ Whether to pad the image. If `True`, will pad the patch dimension of the images in the batch to the largest
599
+ number of patches in the batch. Padding will be applied to the bottom and right with zeros.
600
+ do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
601
+ Whether to convert the image to RGB.
602
+ return_tensors (`str` or `TensorType`, *optional*):
603
+ The type of tensors to return. Can be one of:
604
+ - Unset: Return a list of `np.ndarray`.
605
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
606
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
607
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
608
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
609
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
610
+ The channel dimension format for the output image. Can be one of:
611
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
612
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
613
+ - Unset: Use the channel dimension format of the input image.
614
+ input_data_format (`ChannelDimension` or `str`, *optional*):
615
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
616
+ from the input image. Can be one of:
617
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
618
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
619
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
620
+
621
+ """
622
+ do_resize = do_resize if do_resize is not None else self.do_resize
623
+ size = size if size is not None else self.size
624
+ size = get_size_dict(size, default_to_square=False)
625
+ image_grid_pinpoints = image_grid_pinpoints if image_grid_pinpoints is not None else self.image_grid_pinpoints
626
+ resample = resample if resample is not None else self.resample
627
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
628
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
629
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
630
+ image_mean = image_mean if image_mean is not None else self.image_mean
631
+ image_std = image_std if image_std is not None else self.image_std
632
+ do_pad = do_pad if do_pad is not None else self.do_pad
633
+ do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
634
+
635
+ images = make_batched_images(images)
636
+
637
+ if not valid_images(images):
638
+ raise ValueError(
639
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
640
+ "torch.Tensor, tf.Tensor or jax.ndarray."
641
+ )
642
+
643
+ validate_preprocess_arguments(
644
+ do_rescale=do_rescale,
645
+ rescale_factor=rescale_factor,
646
+ do_normalize=do_normalize,
647
+ image_mean=image_mean,
648
+ image_std=image_std,
649
+ do_resize=do_resize,
650
+ size=size,
651
+ resample=resample,
652
+ )
653
+
654
+ if do_convert_rgb:
655
+ images = [convert_to_rgb(image) for image in images]
656
+
657
+ # All transformations expect numpy arrays.
658
+ images = [to_numpy_array(image) for image in images]
659
+
660
+ if do_rescale and is_scaled_image(images[0]):
661
+ logger.warning_once(
662
+ "It looks like you are trying to rescale already rescaled images. If the input"
663
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
664
+ )
665
+
666
+ if input_data_format is None:
667
+ # We assume that all images have the same channel dimension format.
668
+ input_data_format = infer_channel_dimension_format(images[0])
669
+
670
+ new_images = []
671
+ image_sizes = [get_image_size(image, channel_dim=input_data_format) for image in images]
672
+ for image in images:
673
+ # convert image into a list of patches
674
+ # we intentially use the same data format as the input data format
675
+ size_tuple = (
676
+ (size["height"], size["width"])
677
+ if "height" in size and "width" in size
678
+ else (size["shortest_edge"], size["shortest_edge"])
679
+ )
680
+ image_patches = self.get_image_patches(
681
+ image,
682
+ image_grid_pinpoints,
683
+ size=size_tuple,
684
+ patch_size=size["height"],
685
+ resample=resample,
686
+ data_format=input_data_format,
687
+ input_data_format=input_data_format,
688
+ )
689
+
690
+ # preprocess patches
691
+ pixel_values = self._preprocess(
692
+ image_patches,
693
+ do_resize=do_resize,
694
+ size=size_tuple,
695
+ resample=resample,
696
+ do_rescale=do_rescale,
697
+ rescale_factor=rescale_factor,
698
+ do_normalize=do_normalize,
699
+ image_mean=image_mean,
700
+ image_std=image_std,
701
+ data_format=data_format,
702
+ input_data_format=input_data_format,
703
+ )
704
+ pixel_values = np.array(pixel_values)
705
+ new_images.append(pixel_values)
706
+
707
+ if do_pad:
708
+ processed_images = self._pad_for_batching(new_images)
709
+
710
+ return BatchFeature(
711
+ data={"pixel_values": processed_images, "image_sizes": image_sizes}, tensor_type=return_tensors
712
+ )
713
+
714
+
715
+ __all__ = ["LlavaOnevisionImageProcessor"]
.venv/lib/python3.11/site-packages/transformers/models/llava_onevision/modeling_llava_onevision.py ADDED
@@ -0,0 +1,812 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """PyTorch Llava-Onevision model."""
16
+
17
+ import math
18
+ from dataclasses import dataclass
19
+ from typing import List, Optional, Tuple, Union
20
+
21
+ import numpy as np
22
+ import torch
23
+ import torch.utils.checkpoint
24
+ from torch import nn
25
+
26
+ from ...activations import ACT2FN
27
+ from ...generation import GenerationMixin
28
+ from ...image_processing_utils import select_best_resolution
29
+ from ...modeling_outputs import ModelOutput
30
+ from ...modeling_utils import PreTrainedModel
31
+ from ...utils import (
32
+ add_start_docstrings,
33
+ logging,
34
+ )
35
+ from ..auto import AutoModel, AutoModelForCausalLM
36
+ from .configuration_llava_onevision import LlavaOnevisionConfig
37
+
38
+
39
+ logger = logging.get_logger(__name__)
40
+
41
+ _CONFIG_FOR_DOC = "LlavaNextConfig"
42
+
43
+
44
+ # Copied from transformers.models.llava_next.modeling_llava_next.get_anyres_image_grid_shape
45
+ def get_anyres_image_grid_shape(image_size, grid_pinpoints, patch_size):
46
+ """
47
+ Calculate the shape of the image patch grid after the preprocessing for images of any resolution.
48
+
49
+ Args:
50
+ image_size (`tuple`):
51
+ The size of the input image in the format (width, height).
52
+ grid_pinpoints (`List`):
53
+ A list containing possible resolutions. Each item in the list should be a tuple or list
54
+ of the form `(height, width)`.
55
+ patch_size (`int`):
56
+ The size of each image patch.
57
+
58
+ Returns:
59
+ tuple: The shape of the image patch grid in the format (width, height).
60
+ """
61
+ if not isinstance(grid_pinpoints, list):
62
+ raise TypeError("grid_pinpoints should be a list of tuples or lists")
63
+
64
+ # ! VERY IMPORTANT if image_size is tensor, must convert to into tuple, otherwise it will cause wrong calculate
65
+ if not isinstance(image_size, (list, tuple)):
66
+ if not isinstance(image_size, (torch.Tensor, np.ndarray)):
67
+ raise TypeError(
68
+ f"image_size invalid type: {type(image_size)} not valid, should be either list, tuple, np.ndarray or tensor"
69
+ )
70
+ image_size = image_size.tolist()
71
+
72
+ height, width = select_best_resolution(image_size, grid_pinpoints)
73
+ return height // patch_size, width // patch_size
74
+
75
+
76
+ # Copied from transformers.models.llava_next.modeling_llava_next.image_size_to_num_patches
77
+ def image_size_to_num_patches(image_size, grid_pinpoints, patch_size: int):
78
+ """
79
+ Calculate the number of patches after the preprocessing for images of any resolution.
80
+
81
+ Args:
82
+ image_size (`torch.LongTensor` or `np.ndarray` or `Tuple[int, int]`):
83
+ The size of the input image in the format (height, width). ?
84
+ grid_pinpoints (`List`):
85
+ A list containing possible resolutions. Each item in the list should be a tuple or list
86
+ of the form `(height, width)`.
87
+ patch_size (`int`):
88
+ The size of each image patch.
89
+
90
+ Returns:
91
+ int: the number of patches
92
+ """
93
+ if not isinstance(grid_pinpoints, list):
94
+ raise TypeError("grid_pinpoints should be a list of tuples or lists")
95
+
96
+ # ! VERY IMPORTANT if image_size is tensor, must convert to into tuple, otherwise it will cause wrong calculate
97
+ if not isinstance(image_size, (list, tuple)):
98
+ if not isinstance(image_size, (torch.Tensor, np.ndarray)):
99
+ raise TypeError(f"image_size invalid type {type(image_size)} with value {image_size}")
100
+ image_size = image_size.tolist()
101
+
102
+ best_resolution = select_best_resolution(image_size, grid_pinpoints)
103
+ height, width = best_resolution
104
+ num_patches = 0
105
+ # consider change to ceil(height/patch_size)*ceil(width/patch_size) + 1
106
+ for i in range(0, height, patch_size):
107
+ for j in range(0, width, patch_size):
108
+ num_patches += 1
109
+ # add the base patch
110
+ num_patches += 1
111
+ return num_patches
112
+
113
+
114
+ # Copied from transformers.models.llava_next.modeling_llava_next.unpad_image
115
+ def unpad_image(tensor, original_size):
116
+ """
117
+ Unpads a PyTorch tensor of a padded and resized image.
118
+
119
+ Args:
120
+ tensor (`torch.Tensor`):
121
+ The image tensor, assumed to be of shape (num_channels, height, width).
122
+ original_size (`tuple`):
123
+ The original size of the image (height, width).
124
+
125
+ Returns:
126
+ `torch.Tensor`: The unpadded image tensor.
127
+ """
128
+ if not isinstance(original_size, (list, tuple)):
129
+ if not isinstance(original_size, (torch.Tensor, np.ndarray)):
130
+ raise TypeError(
131
+ f"image_size invalid type: {type(original_size)} not valid, should be either list, tuple, np.ndarray or tensor"
132
+ )
133
+ original_size = original_size.tolist()
134
+ original_height, original_width = original_size
135
+ current_height, current_width = tensor.shape[1:]
136
+
137
+ original_aspect_ratio = original_width / original_height
138
+ current_aspect_ratio = current_width / current_height
139
+
140
+ if original_aspect_ratio > current_aspect_ratio:
141
+ scale_factor = current_width / original_width
142
+ new_height = int(round(original_height * scale_factor, 7))
143
+ padding = (current_height - new_height) // 2
144
+ unpadded_tensor = tensor[:, padding : current_height - padding, :]
145
+ else:
146
+ scale_factor = current_height / original_height
147
+ new_width = int(round(original_width * scale_factor, 7))
148
+ padding = (current_width - new_width) // 2
149
+ unpadded_tensor = tensor[:, :, padding : current_width - padding]
150
+
151
+ return unpadded_tensor
152
+
153
+
154
+ @dataclass
155
+ # Copied from transformers.models.llava_next_video.modeling_llava_next_video.LlavaNextVideoCausalLMOutputWithPast with LlavaNextVideo->LlavaOnevision
156
+ class LlavaOnevisionCausalLMOutputWithPast(ModelOutput):
157
+ """
158
+ Base class for LlavaOnevision causal language model (or autoregressive) outputs.
159
+
160
+ Args:
161
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
162
+ Language modeling loss (for next-token prediction).
163
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
164
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
165
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
166
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
167
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`)
168
+
169
+ Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
170
+ `past_key_values` input) to speed up sequential decoding.
171
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
172
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
173
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
174
+
175
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
176
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
177
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
178
+ sequence_length)`.
179
+
180
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
181
+ heads.
182
+ image_hidden_states (`torch.FloatTensor`, *optional*):
183
+ A `torch.FloatTensor` of size (batch_size * num_patches, num_images, sequence_length, hidden_size)`.
184
+ image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
185
+
186
+ video_hidden_states (`torch.FloatTensor`, *optional*):
187
+ A `torch.FloatTensor` of size `(batch_size * num_frames, num_videos, sequence_length, hidden_size)`.
188
+ video_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
189
+ """
190
+
191
+ loss: Optional[torch.FloatTensor] = None
192
+ logits: torch.FloatTensor = None
193
+ past_key_values: Optional[List[torch.FloatTensor]] = None
194
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
195
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
196
+ image_hidden_states: Optional[torch.FloatTensor] = None
197
+ video_hidden_states: Optional[torch.FloatTensor] = None
198
+
199
+
200
+ # Copied from transformers.models.llava.modeling_llava.LlavaMultiModalProjector with Llava->LlavaOnevision
201
+ class LlavaOnevisionMultiModalProjector(nn.Module):
202
+ def __init__(self, config: LlavaOnevisionConfig):
203
+ super().__init__()
204
+ self.linear_1 = nn.Linear(
205
+ config.vision_config.hidden_size, config.text_config.hidden_size, bias=config.multimodal_projector_bias
206
+ )
207
+ self.act = ACT2FN[config.projector_hidden_act]
208
+ self.linear_2 = nn.Linear(
209
+ config.text_config.hidden_size, config.text_config.hidden_size, bias=config.multimodal_projector_bias
210
+ )
211
+
212
+ def forward(self, image_features):
213
+ hidden_states = self.linear_1(image_features)
214
+ hidden_states = self.act(hidden_states)
215
+ hidden_states = self.linear_2(hidden_states)
216
+ return hidden_states
217
+
218
+
219
+ LLAVA_ONEVISION_START_DOCSTRING = r"""
220
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
221
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
222
+ etc.)
223
+
224
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
225
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
226
+ and behavior.
227
+
228
+ Parameters:
229
+ config ([`LlavaNextConfig`] or [`LlavaNextVisionConfig`]):
230
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
231
+ load the weights associated with the model, only the configuration. Check out the
232
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
233
+ """
234
+
235
+
236
+ @add_start_docstrings(
237
+ "The bare LLaVA-Onevision Model outputting raw hidden-states without any specific head on top.",
238
+ LLAVA_ONEVISION_START_DOCSTRING,
239
+ )
240
+ class LlavaOnevisionPreTrainedModel(PreTrainedModel):
241
+ config_class = LlavaOnevisionConfig
242
+ base_model_prefix = "model"
243
+ supports_gradient_checkpointing = True
244
+ _no_split_modules = ["LlavaOnevisionVisionAttention"]
245
+ _skip_keys_device_placement = "past_key_values"
246
+ _supports_flash_attn_2 = True
247
+ _supports_cache_class = True
248
+ _supports_static_cache = False # Qwen2 doesn't but llava has no reasons to not support
249
+ _supports_quantized_cache = True
250
+ _supports_sdpa = True
251
+
252
+ # Copied from transformers.models.llava_next.modeling_llava_next.LlavaNextPreTrainedModel._init_weights
253
+ def _init_weights(self, module):
254
+ # important: this ported version of LlavaNext isn't meant for training from scratch - only
255
+ # inference and fine-tuning - so the proper init weights code has been removed - the original codebase
256
+ # https://github.com/haotian-liu/LLaVA/tree/main/llava_next should serve for that purpose
257
+ std = (
258
+ self.config.initializer_range
259
+ if hasattr(self.config, "initializer_range")
260
+ else self.config.text_config.initializer_range
261
+ )
262
+
263
+ if hasattr(module, "class_embedding"):
264
+ module.class_embedding.data.normal_(mean=0.0, std=std)
265
+
266
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
267
+ module.weight.data.normal_(mean=0.0, std=std)
268
+ if module.bias is not None:
269
+ module.bias.data.zero_()
270
+ elif isinstance(module, nn.Embedding):
271
+ module.weight.data.normal_(mean=0.0, std=std)
272
+ if module.padding_idx is not None:
273
+ module.weight.data[module.padding_idx].zero_()
274
+
275
+
276
+ LLAVA_ONEVISION_INPUTS_DOCSTRING = r"""
277
+ Args:
278
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
279
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
280
+ it.
281
+
282
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
283
+ [`PreTrainedTokenizer.__call__`] for details.
284
+
285
+ [What are input IDs?](../glossary#input-ids)
286
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)):
287
+ The tensors corresponding to the input images. Pixel values can be obtained using
288
+ [`AutoImageProcessor`]. See [`LlavaNextImageProcessor.__call__`] for details. [`LlavaProcessor`] uses
289
+ [`LlavaNextImageProcessor`] for processing images.
290
+ image_sizes (`torch.LongTensor` of shape `(batch_size, 2)`, *optional*):
291
+ The sizes of the images in the batch, being (height, width) for each image.
292
+ pixel_values_videos (`torch.FloatTensor` of shape `(batch_size, frames, num_channels, image_size, image_size)):
293
+ The tensors corresponding to the input videos. Pixel values can be obtained using
294
+ [`LlavaNextVideoProcessor`]. See [`LlavaNextVideoProcessor.__call__`] for details. [`LlavaProcessor`] uses
295
+ [`LlavaNextVideoProcessor`] for processing videos.
296
+ image_sizes_videos (`torch.LongTensor` of shape `(batch_size, frames, 2)`, *optional*):
297
+ The sizes of the videos in the batch, being (height, width) for each frame in the video.
298
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
299
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
300
+
301
+ - 1 for tokens that are **not masked**,
302
+ - 0 for tokens that are **masked**.
303
+
304
+ [What are attention masks?](../glossary#attention-mask)
305
+
306
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
307
+ [`PreTrainedTokenizer.__call__`] for details.
308
+
309
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
310
+ `past_key_values`).
311
+
312
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
313
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
314
+ information on the default strategy.
315
+
316
+ - 1 indicates the head is **not masked**,
317
+ - 0 indicates the head is **masked**.
318
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
319
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
320
+ config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
321
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
322
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
323
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
324
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
325
+
326
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
327
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
328
+
329
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
330
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
331
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
332
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
333
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
334
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
335
+ model's internal embedding lookup matrix.
336
+ vision_feature_layer (`int`, *optional*, defaults to -2):
337
+ The index of the layer to select the vision feature.
338
+ vision_feature_select_strategy (`str`, *optional*, defaults to `"default"`):
339
+ The feature selection strategy used to select the vision feature from the vision backbone.
340
+ Can be one of `"default"` or `"full"`. If `"default"`, the CLS token is removed from the vision features.
341
+ If `"full"`, the full vision features are used.
342
+ vision_aspect_ratio (`str`, *optional*, defaults to `"anyres_max_9"`):
343
+ Aspect ratio used when processong image features. The default value is "anyres_max_9".
344
+ use_cache (`bool`, *optional*):
345
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
346
+ `past_key_values`).
347
+ output_attentions (`bool`, *optional*):
348
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
349
+ tensors for more detail.
350
+ output_hidden_states (`bool`, *optional*):
351
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
352
+ more detail.
353
+ return_dict (`bool`, *optional*):
354
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
355
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
356
+ Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
357
+ this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
358
+ the complete sequence length.
359
+ """
360
+
361
+
362
+ @add_start_docstrings(
363
+ """The LLaVA-Onevision model which consists of a vision backbone and a language model.""",
364
+ LLAVA_ONEVISION_START_DOCSTRING,
365
+ )
366
+ class LlavaOnevisionForConditionalGeneration(LlavaOnevisionPreTrainedModel, GenerationMixin):
367
+ def __init__(self, config: LlavaOnevisionConfig):
368
+ super().__init__(config)
369
+ self.vision_tower = AutoModel.from_config(config.vision_config)
370
+
371
+ self.multi_modal_projector = LlavaOnevisionMultiModalProjector(config)
372
+ embed_std = 1 / math.sqrt(config.text_config.hidden_size)
373
+ self.image_newline = nn.Parameter(torch.randn(config.text_config.hidden_size, dtype=self.dtype) * embed_std)
374
+
375
+ self.vocab_size = config.text_config.vocab_size
376
+ self.language_model = AutoModelForCausalLM.from_config(config.text_config)
377
+ self.post_init()
378
+
379
+ # Copied from transformers.models.llava_next.modeling_llava_next.LlavaNextForConditionalGeneration.get_input_embeddings
380
+ def get_input_embeddings(self):
381
+ return self.language_model.get_input_embeddings()
382
+
383
+ # Copied from transformers.models.llava_next.modeling_llava_next.LlavaNextForConditionalGeneration.set_input_embeddings
384
+ def set_input_embeddings(self, value):
385
+ self.language_model.set_input_embeddings(value)
386
+
387
+ # Copied from transformers.models.llava_next.modeling_llava_next.LlavaNextForConditionalGeneration.get_output_embeddings
388
+ def get_output_embeddings(self):
389
+ return self.language_model.get_output_embeddings()
390
+
391
+ # Copied from transformers.models.llava_next.modeling_llava_next.LlavaNextForConditionalGeneration.set_output_embeddings
392
+ def set_output_embeddings(self, new_embeddings):
393
+ self.language_model.set_output_embeddings(new_embeddings)
394
+
395
+ # Copied from transformers.models.llava_next.modeling_llava_next.LlavaNextForConditionalGeneration.set_decoder
396
+ def set_decoder(self, decoder):
397
+ self.language_model.set_decoder(decoder)
398
+
399
+ # Copied from transformers.models.llava_next.modeling_llava_next.LlavaNextForConditionalGeneration.get_decoder
400
+ def get_decoder(self):
401
+ return self.language_model.get_decoder()
402
+
403
+ # Copied from transformers.models.llava_next.modeling_llava_next.LlavaNextForConditionalGeneration.tie_weights
404
+ def tie_weights(self):
405
+ return self.language_model.tie_weights()
406
+
407
+ def pack_image_features(self, image_features, image_sizes, image_newline=None, vision_aspect_ratio="anyres_max_9"):
408
+ """
409
+ Reshape, unpad and then pack each image_feature into a single image_features tensor containing all visual vectors.
410
+
411
+ Args:
412
+ image_features (`List[torch.Tensor]` of length num_images, each of shape `(num_patches, image_length, embed_dim)`)
413
+ List of image feature tensor, each contains all the visual feature of all patches.
414
+ image_sizes (`torch.Tensor` of shape `(num_images, 2)`)
415
+ Actual image size of each images (H, W).
416
+ image_newline (`torch.Tensor` of shape `(embed_dim)`)
417
+ New line embedding vector.
418
+ vision_aspect_ratio (`str`, *optional*, "anyres_max_9"):
419
+ Aspect ratio used when processong image features. The default value is "anyres_max_9".
420
+ Returns:
421
+ image_features (`torch.Tensor` of shape `(all_feat_len, embed_dim)`)
422
+ feature_lens (`List[int]`)
423
+ token length of each image in image_features
424
+ """
425
+ new_image_features = []
426
+ feature_lens = []
427
+ for image_idx, image_feature in enumerate(image_features):
428
+ if image_feature.shape[0] > 1:
429
+ base_image_feature = image_feature[0]
430
+ image_feature = image_feature[1:]
431
+ height = width = self.config.vision_config.image_size // self.config.vision_config.patch_size
432
+ if height * width != base_image_feature.shape[0]:
433
+ raise ValueError("The number of patches is not consistent with the image size.")
434
+ num_patch_height, num_patch_width = get_anyres_image_grid_shape(
435
+ image_sizes[image_idx],
436
+ self.config.image_grid_pinpoints,
437
+ self.config.vision_config.image_size,
438
+ )
439
+ image_feature = image_feature.view(num_patch_height, num_patch_width, height, width, -1)
440
+ image_feature = image_feature.permute(4, 0, 2, 1, 3).contiguous()
441
+ image_feature = image_feature.flatten(1, 2).flatten(2, 3)
442
+ image_feature = unpad_image(image_feature, image_sizes[image_idx])
443
+ max_num_patches = int(vision_aspect_ratio.strip("anyres_max_"))
444
+ channels, curr_height, curr_width = image_feature.shape
445
+ ratio = math.sqrt(curr_height * curr_width / (max_num_patches * height**2))
446
+ if ratio > 1.1:
447
+ image_feature = image_feature[None]
448
+ image_feature = nn.functional.interpolate(
449
+ image_feature, [int(curr_height // ratio), int(curr_width // ratio)], mode="bilinear"
450
+ )[0]
451
+ if image_newline is not None:
452
+ image_feature = torch.cat(
453
+ (
454
+ image_feature,
455
+ image_newline[:, None, None]
456
+ .expand(*image_feature.shape[:-1], 1)
457
+ .to(image_feature.device, image_feature.dtype),
458
+ ),
459
+ dim=-1,
460
+ )
461
+ image_feature = image_feature.flatten(1, 2).transpose(0, 1)
462
+ image_feature = torch.cat((base_image_feature, image_feature), dim=0)
463
+ else:
464
+ image_feature = image_feature[0]
465
+ if image_newline is not None:
466
+ image_feature = torch.cat((image_feature, image_newline[None].to(image_feature)), dim=0)
467
+ new_image_features.append(image_feature)
468
+ feature_lens.append(image_feature.size(0))
469
+ image_features = torch.cat(new_image_features, dim=0)
470
+ feature_lens = torch.tensor(feature_lens, dtype=torch.long, device=image_features.device)
471
+ return image_features, feature_lens
472
+
473
+ def apply_pooling(self, image_features):
474
+ height = width = self.config.vision_config.image_size // self.config.vision_config.patch_size
475
+ batch_frames, seq_len, dim = image_features.shape
476
+ image_features = image_features.view(batch_frames, height, width, -1)
477
+ image_features = image_features.permute(0, 3, 1, 2).contiguous()
478
+
479
+ height, width = image_features.shape[2:]
480
+ scaled_shape = [math.ceil(height / 2), math.ceil(width / 2)]
481
+ image_features = nn.functional.interpolate(image_features, size=scaled_shape, mode="bilinear")
482
+
483
+ image_features = image_features.permute(0, 2, 3, 1)
484
+ image_features = image_features.view(batch_frames, -1, dim)
485
+ return image_features
486
+
487
+ def get_image_features(
488
+ self,
489
+ pixel_values: torch.FloatTensor,
490
+ image_sizes: torch.Tensor,
491
+ vision_feature_layer: int,
492
+ vision_feature_select_strategy: str,
493
+ ):
494
+ """
495
+ Obtains image last hidden states from the vision tower and apply multimodal projection.
496
+
497
+ Args:
498
+ pixel_values (`torch.FloatTensor]` of shape `(batch_size, num_patches, channels, height, width)`)
499
+ The tensors corresponding to the input images.
500
+ image_sizes (`torch.Tensor` of shape `(num_images, 2)`)
501
+ Actual image size of each images (H, W).
502
+ vision_feature_layer (`int`):
503
+ The index of the layer to select the vision feature.
504
+ vision_feature_select_strategy (`str`):
505
+ The feature selection strategy used to select the vision feature from the vision backbone.
506
+ Can be one of `"default"` or `"full"`
507
+ Returns:
508
+ image_features (List[`torch.Tensor`]): List of image feature tensor, each contains all the visual feature of all patches
509
+ and are of shape `(num_patches, image_length, embed_dim)`).
510
+ """
511
+ # ! infer image_num_patches from image_sizes
512
+ image_num_patches = [
513
+ image_size_to_num_patches(
514
+ image_size=imsize,
515
+ grid_pinpoints=self.config.image_grid_pinpoints,
516
+ patch_size=self.config.vision_config.image_size,
517
+ )
518
+ for imsize in image_sizes
519
+ ]
520
+ if pixel_values.dim() == 5:
521
+ # stacked if input is (batch_size, num_patches, num_channels, height, width)
522
+ _pixel_values_list = [pix_val[:num_patch] for pix_val, num_patch in zip(pixel_values, image_num_patches)]
523
+ pixel_values = torch.cat(_pixel_values_list, dim=0)
524
+ elif pixel_values.dim() != 4:
525
+ # otherwise has to be stacked from list of (num_patches, num_channels, height, width)
526
+ raise ValueError(f"pixel_values of shape {pixel_values.shape}, expect to be of 4 or 5 dimensions")
527
+
528
+ image_features = self.vision_tower(pixel_values, output_hidden_states=True)
529
+ selected_image_feature = image_features.hidden_states[vision_feature_layer]
530
+ if vision_feature_select_strategy == "default":
531
+ selected_image_feature = selected_image_feature[:, 1:]
532
+ elif vision_feature_select_strategy == "full":
533
+ selected_image_feature = selected_image_feature
534
+ image_features = self.multi_modal_projector(selected_image_feature)
535
+ image_features = torch.split(image_features, image_num_patches, dim=0)
536
+ return image_features
537
+
538
+ def get_video_features(
539
+ self, pixel_values: torch.FloatTensor, vision_feature_layer: int, vision_feature_select_strategy: str
540
+ ):
541
+ """
542
+ Obtains video last hidden states from the vision tower, apply multimodal projection and pooling.
543
+
544
+ Args:
545
+ pixel_values (`torch.FloatTensor]` of shape `(batch_size, num_frames, channels, height, width)`)
546
+ The tensors corresponding to the input video.
547
+ vision_feature_layer (`int`):
548
+ The index of the layer to select the vision feature.
549
+ vision_feature_select_strategy (`str`):
550
+ The feature selection strategy used to select the vision feature from the vision backbone.
551
+ Can be one of `"default"` or `"full"`
552
+ Returns:
553
+ video_features (List[`torch.Tensor`]): List of video feature tensor, each contains all the visual feature of all patches
554
+ and are of shape `(num_videos, video_length, embed_dim)`).
555
+ """
556
+ batch_size, frames, channels, height, width = pixel_values.shape
557
+ pixel_values = pixel_values.view(batch_size * frames, channels, height, width)
558
+ video_features = self.vision_tower(pixel_values, output_hidden_states=True)
559
+ selected_video_feature = video_features.hidden_states[vision_feature_layer]
560
+
561
+ if vision_feature_select_strategy == "default":
562
+ selected_video_feature = selected_video_feature[:, 1:]
563
+ elif vision_feature_select_strategy == "full":
564
+ selected_video_feature = selected_video_feature
565
+ video_features = self.multi_modal_projector(selected_video_feature)
566
+
567
+ video_features = self.apply_pooling(video_features)
568
+ video_features = video_features.reshape(batch_size, frames * video_features.shape[1], -1)
569
+
570
+ return video_features
571
+
572
+ @add_start_docstrings(LLAVA_ONEVISION_INPUTS_DOCSTRING)
573
+ def forward(
574
+ self,
575
+ input_ids: torch.LongTensor = None,
576
+ pixel_values: torch.FloatTensor = None,
577
+ image_sizes: Optional[torch.LongTensor] = None,
578
+ pixel_values_videos: torch.FloatTensor = None,
579
+ image_sizes_videos: Optional[torch.LongTensor] = None,
580
+ attention_mask: Optional[torch.Tensor] = None,
581
+ position_ids: Optional[torch.LongTensor] = None,
582
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
583
+ inputs_embeds: Optional[torch.FloatTensor] = None,
584
+ vision_feature_layer: Optional[int] = None,
585
+ vision_feature_select_strategy: Optional[str] = None,
586
+ vision_aspect_ratio: Optional[str] = None,
587
+ labels: Optional[torch.LongTensor] = None,
588
+ use_cache: Optional[bool] = None,
589
+ output_attentions: Optional[bool] = None,
590
+ output_hidden_states: Optional[bool] = None,
591
+ return_dict: Optional[bool] = None,
592
+ cache_position: Optional[torch.LongTensor] = None,
593
+ num_logits_to_keep: int = 0,
594
+ ) -> Union[Tuple, LlavaOnevisionCausalLMOutputWithPast]:
595
+ r"""
596
+ Args:
597
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
598
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
599
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
600
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
601
+
602
+ num_logits_to_keep (`int`, *optional*):
603
+ Calculate logits for the last `num_logits_to_keep` tokens. If `0`, calculate logits for all
604
+ `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
605
+ token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
606
+
607
+
608
+ Returns:
609
+ [`~LlavaOnevisionCausalLMOutputWithPast`] (if `return_dict=True`) or a `tuple`.
610
+
611
+ Example:
612
+
613
+ ```python
614
+ >>> from PIL import Image
615
+ >>> import requests
616
+ >>> import torch
617
+ >>> from transformers import LlavaOnevisionProcessor, LlavaOnevisionForConditionalGeneration
618
+
619
+ >>> model = LlavaOnevisionForConditionalGeneration.from_pretrained("llava-hf/llava-onevision-qwen2-7b-ov-hf", torch_dtype="float16", device_map="cuda:0")
620
+ >>> processor = LlavaOnevisionProcessor.from_pretrained("llava-hf/llava-onevision-qwen2-7b-ov-hf")
621
+
622
+ >>> conversation = [
623
+ ... {
624
+ ... "role": "user",
625
+ ... "content": [
626
+ ... {"type": "text", "text": "What is shown in this image?"},
627
+ ... {"type": "image"},
628
+ ... ],
629
+ ... },
630
+ ... ]
631
+ >>> prompt = processor.apply_chat_template(conversation, add_generation_prompt=True)
632
+
633
+ >>> image_file = "http://images.cocodataset.org/val2017/000000039769.jpg"
634
+ >>> raw_image = Image.open(requests.get(image_file, stream=True).raw)
635
+ >>> inputs = processor(text=prompt, images=raw_image, return_tensors='pt').to(0, torch.float16)
636
+
637
+ >>> output = model.generate(**inputs, max_new_tokens=20, do_sample=False)
638
+ >>> processor.batch_decode(output, skip_special_tokens=True)[0]
639
+ "user\n\nWhat is shown in this image?\nassistant\ncat"
640
+ ```"""
641
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
642
+ output_hidden_states = (
643
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
644
+ )
645
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
646
+ vision_feature_layer = (
647
+ vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer
648
+ )
649
+ vision_feature_select_strategy = (
650
+ vision_feature_select_strategy
651
+ if vision_feature_select_strategy is not None
652
+ else self.config.vision_feature_select_strategy
653
+ )
654
+ vision_aspect_ratio = (
655
+ vision_aspect_ratio if vision_aspect_ratio is not None else self.config.vision_aspect_ratio
656
+ )
657
+
658
+ if (input_ids is None) ^ (inputs_embeds is not None):
659
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
660
+
661
+ if (pixel_values is not None or pixel_values_videos is not None) and inputs_embeds is not None:
662
+ raise ValueError(
663
+ "You cannot specify both `pixel_values`/`pixel_values_videos` and `inputs_embeds` at the same time, "
664
+ "and must specify either one"
665
+ )
666
+
667
+ if inputs_embeds is None:
668
+ inputs_embeds = self.get_input_embeddings()(input_ids)
669
+
670
+ # Images are processed with Anyres
671
+ if pixel_values is not None:
672
+ image_features = self.get_image_features(
673
+ pixel_values,
674
+ image_sizes,
675
+ vision_feature_layer=vision_feature_layer,
676
+ vision_feature_select_strategy=vision_feature_select_strategy,
677
+ )
678
+ image_features, feature_lens = self.pack_image_features(
679
+ image_features,
680
+ image_sizes,
681
+ image_newline=self.image_newline,
682
+ vision_aspect_ratio=vision_aspect_ratio,
683
+ )
684
+ n_image_tokens = (input_ids == self.config.image_token_index).sum().item()
685
+ n_image_features = image_features.shape[0]
686
+
687
+ if n_image_tokens != n_image_features:
688
+ raise ValueError(
689
+ f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {n_image_features}"
690
+ )
691
+ special_image_mask = (
692
+ (input_ids == self.config.image_token_index)
693
+ .unsqueeze(-1)
694
+ .expand_as(inputs_embeds)
695
+ .to(inputs_embeds.device)
696
+ )
697
+ image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype)
698
+ inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features)
699
+
700
+ # Video are simply embedded and further pooled to decrease seq len
701
+ if pixel_values_videos is not None:
702
+ video_features = self.get_video_features(
703
+ pixel_values_videos,
704
+ vision_feature_layer=vision_feature_layer,
705
+ vision_feature_select_strategy=vision_feature_select_strategy,
706
+ )
707
+ image_newline = (
708
+ self.image_newline[None, None, :].repeat(video_features.shape[0], 1, 1).to(video_features.device)
709
+ )
710
+ video_features = torch.cat((video_features, image_newline), dim=1)
711
+ video_features = video_features.flatten(0, 1)
712
+
713
+ n_video_tokens = (input_ids == self.config.video_token_index).sum().item()
714
+ n_video_features = video_features.shape[0]
715
+ if n_video_tokens != n_video_features:
716
+ raise ValueError(
717
+ f"Video features and video tokens do not match: tokens: {n_video_tokens}, features {n_video_features}"
718
+ )
719
+ special_video_mask = (
720
+ (input_ids == self.config.video_token_index)
721
+ .unsqueeze(-1)
722
+ .expand_as(inputs_embeds)
723
+ .to(inputs_embeds.device)
724
+ )
725
+ video_features = video_features.to(inputs_embeds.device, inputs_embeds.dtype)
726
+ inputs_embeds = inputs_embeds.masked_scatter(special_video_mask, video_features)
727
+
728
+ outputs = self.language_model(
729
+ attention_mask=attention_mask,
730
+ position_ids=position_ids,
731
+ past_key_values=past_key_values,
732
+ inputs_embeds=inputs_embeds,
733
+ use_cache=use_cache,
734
+ output_attentions=output_attentions,
735
+ output_hidden_states=output_hidden_states,
736
+ return_dict=return_dict,
737
+ cache_position=cache_position,
738
+ num_logits_to_keep=num_logits_to_keep,
739
+ )
740
+
741
+ logits = outputs[0]
742
+
743
+ loss = None
744
+ if labels is not None:
745
+ # Shift so that tokens < n predict n
746
+ if attention_mask is not None:
747
+ # we use the input attention mask to shift the logits and labels, because it is 2D.
748
+ # we also crop attn mask in case it is longer, which happens in PrefixTuning with peft
749
+ shift_attention_mask = attention_mask[:, -(logits.shape[1] - 1) :].to(logits.device)
750
+ shift_logits = logits[..., :-1, :][shift_attention_mask.to(logits.device) != 0].contiguous()
751
+ shift_labels = labels[..., 1:][shift_attention_mask.to(labels.device) != 0].contiguous()
752
+ else:
753
+ shift_logits = logits[..., :-1, :].contiguous()
754
+ shift_labels = labels[..., 1:].contiguous()
755
+ # Flatten the tokens
756
+ loss_fct = nn.CrossEntropyLoss()
757
+ loss = loss_fct(
758
+ shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1).to(shift_logits.device)
759
+ )
760
+
761
+ if not return_dict:
762
+ output = (logits,) + outputs[1:]
763
+ return (loss,) + output if loss is not None else output
764
+
765
+ return LlavaOnevisionCausalLMOutputWithPast(
766
+ loss=loss,
767
+ logits=logits,
768
+ past_key_values=outputs.past_key_values,
769
+ hidden_states=outputs.hidden_states,
770
+ attentions=outputs.attentions,
771
+ image_hidden_states=image_features if pixel_values is not None else None,
772
+ video_hidden_states=video_features if pixel_values_videos is not None else None,
773
+ )
774
+
775
+ def prepare_inputs_for_generation(
776
+ self,
777
+ input_ids,
778
+ past_key_values=None,
779
+ inputs_embeds=None,
780
+ pixel_values=None,
781
+ image_sizes=None,
782
+ pixel_values_videos=None,
783
+ image_sizes_videos=None,
784
+ attention_mask=None,
785
+ cache_position=None,
786
+ num_logits_to_keep=None,
787
+ **kwargs,
788
+ ):
789
+ # Overwritten -- in specific circumstances we don't want to forward image inputs to the model
790
+
791
+ model_inputs = self.language_model.prepare_inputs_for_generation(
792
+ input_ids,
793
+ past_key_values=past_key_values,
794
+ inputs_embeds=inputs_embeds,
795
+ attention_mask=attention_mask,
796
+ cache_position=cache_position,
797
+ num_logits_to_keep=num_logits_to_keep,
798
+ **kwargs,
799
+ )
800
+
801
+ if cache_position[0] == 0:
802
+ # If we're in cached decoding stage, pixel values should be None because input ids do not contain special image token anymore
803
+ # Otherwise we need pixel values to be passed to model
804
+ model_inputs["pixel_values"] = pixel_values
805
+ model_inputs["image_sizes"] = image_sizes
806
+ model_inputs["pixel_values_videos"] = pixel_values_videos
807
+ model_inputs["image_sizes_videos"] = image_sizes_videos
808
+
809
+ return model_inputs
810
+
811
+
812
+ __all__ = ["LlavaOnevisionForConditionalGeneration", "LlavaOnevisionPreTrainedModel"]
.venv/lib/python3.11/site-packages/transformers/models/llava_onevision/processing_llava_onevision.py ADDED
@@ -0,0 +1,319 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Processor class for LLaVa-Onevision.
17
+ """
18
+
19
+ import math
20
+ import os
21
+ from typing import Iterable, List, Union
22
+
23
+ from ...feature_extraction_utils import BatchFeature
24
+ from ...image_processing_utils import select_best_resolution
25
+ from ...image_utils import ImageInput, VideoInput, get_image_size, to_numpy_array
26
+ from ...processing_utils import ProcessingKwargs, ProcessorMixin, Unpack
27
+ from ...tokenization_utils_base import PreTokenizedInput, TextInput
28
+ from ...utils import logging
29
+ from ..auto import AutoImageProcessor
30
+
31
+
32
+ logger = logging.get_logger(__name__)
33
+
34
+
35
+ class LlavaOnevisionProcessorKwargs(ProcessingKwargs, total=False):
36
+ # see processing_utils.ProcessingKwargs documentation for usage.
37
+ _defaults = {
38
+ "text_kwargs": {
39
+ "padding": False,
40
+ },
41
+ "image_kwargs": {},
42
+ "video_kwargs": {},
43
+ }
44
+
45
+
46
+ class LlavaOnevisionProcessor(ProcessorMixin):
47
+ r"""
48
+ Constructs a LLaVa-Onevision processor which wraps a LLaVa-Onevision video processor, LLaVa-NeXT image processor and a LLaMa tokenizer into a single processor.
49
+
50
+ [`LlavaNextProcessor`] offers all the functionalities of [`LlavaOnevisionVideoProcessor`], [`LlavaOnevisionImageProcessor`] and [`LlamaTokenizerFast`]. See the
51
+ [`~LlavaOnevisionVideoProcessor.__call__`], [`~LlavaNextProcessor.__call__`] and [`~LlavaNextProcessor.decode`] for more information.
52
+
53
+ Args:
54
+ image_processor ([`LlavaOnevisionImageProcessor`], *optional*):
55
+ The image processor is a required input.
56
+ tokenizer ([`LlamaTokenizerFast`], *optional*):
57
+ The tokenizer is a required input.
58
+ video_processor ([`LlavaOnevisionVideoProcessor`], *optional*):
59
+ The video processor is a required input.
60
+ num_image_tokens (`int`, *optional*):
61
+ Number of image tokens for one imagethat will be returned by vision tower.
62
+ vision_feature_select_strategy (`str`, *optional*):
63
+ The feature selection strategy used to select the vision feature from the vision backbone.
64
+ Shoudl be same as in model's config
65
+ chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
66
+ in a chat into a tokenizable string.
67
+ image_token (`str`, *optional*, defaults to `"<image>"`):
68
+ Special token used to denote image location.
69
+ video_token (`str`, *optional*, defaults to `"<video>"`):
70
+ Special token used to denote video location.
71
+ """
72
+
73
+ attributes = ["image_processor", "tokenizer", "video_processor"]
74
+ valid_kwargs = [
75
+ "chat_template",
76
+ "num_image_tokens",
77
+ "vision_feature_select_strategy",
78
+ "image_token",
79
+ "video_token",
80
+ ]
81
+ image_processor_class = "AutoImageProcessor"
82
+ tokenizer_class = "AutoTokenizer"
83
+ video_processor_class = "LlavaOnevisionVideoProcessor"
84
+
85
+ def __init__(
86
+ self,
87
+ image_processor=None,
88
+ tokenizer=None,
89
+ video_processor=None,
90
+ num_image_tokens=None,
91
+ vision_feature_select_strategy=None,
92
+ chat_template=None,
93
+ image_token="<image>",
94
+ video_token="<video>",
95
+ **kwargs,
96
+ ):
97
+ self.num_image_tokens = num_image_tokens
98
+ self.vision_feature_select_strategy = vision_feature_select_strategy
99
+ self.image_token = tokenizer.image_token if hasattr(tokenizer, "image_token") else image_token
100
+ self.video_token = tokenizer.video_token if hasattr(tokenizer, "video_token") else video_token
101
+ super().__init__(image_processor, tokenizer, video_processor, chat_template=chat_template)
102
+
103
+ def __call__(
104
+ self,
105
+ images: ImageInput = None,
106
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
107
+ audio=None,
108
+ videos: VideoInput = None,
109
+ **kwargs: Unpack[LlavaOnevisionProcessorKwargs],
110
+ ) -> BatchFeature:
111
+ """
112
+ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
113
+ and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to encode
114
+ the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to
115
+ LlavaNextImageProcessor's [`~LlavaNextImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring
116
+ of the above two methods for more information.
117
+
118
+ Args:
119
+ images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
120
+ The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
121
+ tensor. Both channels-first and channels-last formats are supported.
122
+ text (`str`, `List[str]`, `List[List[str]]`):
123
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
124
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
125
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
126
+ videos (`np.ndarray`, `torch.Tensor`, `List[np.ndarray]`, `List[torch.Tensor]`):
127
+ The image or batch of videos to be prepared. Each video can be a 4D NumPy array or PyTorch
128
+
129
+ Returns:
130
+ [`BatchFeature`]: A [`BatchFeature`] with the following fields:
131
+
132
+ - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
133
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
134
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
135
+ `None`).
136
+ - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
137
+ - **pixel_values_videos** -- Pixel values of a video input to be fed to a model. Returned when `videos` is not `None`.
138
+ - **image_sizes** -- Size of each image that will be used to unpad an image. Returned when `images` is not `None`.
139
+ """
140
+
141
+ output_kwargs = self._merge_kwargs(
142
+ LlavaOnevisionProcessorKwargs,
143
+ tokenizer_init_kwargs=self.tokenizer.init_kwargs,
144
+ **kwargs,
145
+ )
146
+
147
+ if isinstance(text, str):
148
+ text = [text]
149
+ elif not isinstance(text, list) and not isinstance(text[0], str):
150
+ raise ValueError("Invalid input text. Please provide a string, or a list of strings")
151
+
152
+ image_inputs = video_inputs = {}
153
+
154
+ if images is not None:
155
+ image_inputs = self.image_processor(images, **output_kwargs["images_kwargs"])
156
+
157
+ image_sizes = iter(image_inputs["image_sizes"])
158
+ height, width = get_image_size(
159
+ to_numpy_array(image_inputs["pixel_values"][0][0]),
160
+ channel_dim=output_kwargs["images_kwargs"].get("data_format"),
161
+ )
162
+ text = self._expand_image_tokens(text, image_sizes, height, width, self.image_token)
163
+
164
+ if videos is not None:
165
+ video_inputs = self.video_processor(videos, **output_kwargs["videos_kwargs"])
166
+
167
+ one_video = to_numpy_array(video_inputs["pixel_values_videos"][0])
168
+ height, width = get_image_size(one_video[0], channel_dim=output_kwargs["images_kwargs"].get("data_format"))
169
+ num_frames = one_video.shape[0] # frame dim is always after batch dim
170
+ patches_height_width = int(math.sqrt(self.num_image_tokens))
171
+ pooled_height_width = math.ceil(patches_height_width / 2)
172
+ num_video_tokens = (num_frames * pooled_height_width * pooled_height_width) + 1 # +1 for newline token
173
+ text = [sample.replace(self.video_token, self.video_token * num_video_tokens) for sample in text]
174
+
175
+ text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"])
176
+ return BatchFeature(data={**text_inputs, **image_inputs, **video_inputs})
177
+
178
+ def _expand_image_tokens(
179
+ self,
180
+ text: List[TextInput],
181
+ image_sizes: Iterable[Union[List[int], int]],
182
+ height: int,
183
+ width: int,
184
+ special_token: str,
185
+ num_frames: int = 1,
186
+ ):
187
+ prompt_strings = []
188
+ for sample in text:
189
+ while special_token in sample:
190
+ image_size_list = next(image_sizes)
191
+ original_size = image_size_list[0] if num_frames != 1 else image_size_list
192
+ if not isinstance(original_size, (list, tuple)):
193
+ # cast to list to avoid numerical precision errors when calculating unpadding
194
+ original_size = original_size.tolist()
195
+ orig_height, orig_width = original_size
196
+ num_image_tokens = self._get_number_of_features(orig_height, orig_width, height, width)
197
+ if self.vision_feature_select_strategy == "default":
198
+ num_image_tokens -= 1
199
+ sample = sample.replace(special_token, "<placeholder>" * num_image_tokens * num_frames, 1)
200
+ prompt_strings.append(sample)
201
+ text = [sample.replace("<placeholder>", special_token) for sample in prompt_strings]
202
+ return text
203
+
204
+ def _get_number_of_features(self, orig_height: int, orig_width: int, height: int, width: int) -> int:
205
+ image_grid_pinpoints = self.image_processor.image_grid_pinpoints
206
+
207
+ height_best_resolution, width_best_resolution = select_best_resolution(
208
+ [orig_height, orig_width], image_grid_pinpoints
209
+ )
210
+ scale_height, scale_width = height_best_resolution // height, width_best_resolution // width
211
+
212
+ patches_height = patches_width = int(math.sqrt(self.num_image_tokens))
213
+ unpadded_features, newline_features = self._get_unpadded_features(
214
+ orig_height, orig_width, patches_height, patches_width, scale_height, scale_width
215
+ )
216
+
217
+ # The base patch covers the entire image (no CLS for SigLIP)
218
+ base_features = self.num_image_tokens
219
+ num_image_tokens = unpadded_features + newline_features + base_features
220
+ return num_image_tokens
221
+
222
+ def _get_unpadded_features(self, height, width, patches_height, patches_width, scale_height, scale_width):
223
+ """
224
+ Get number of features for a given image with height/width. LLaVA-NeXT is different from LLaVA
225
+ because it divided each image into patches depending on its resolution. Therefore we need to calculate how many
226
+ patches an image is divided into and get the number of features from that.
227
+ """
228
+ current_height = patches_height * scale_height
229
+ current_width = patches_width * scale_width
230
+
231
+ original_aspect_ratio = width / height
232
+ current_aspect_ratio = current_width / current_height
233
+ if original_aspect_ratio > current_aspect_ratio:
234
+ new_height = int(height * (current_width / width))
235
+ padding = (current_height - new_height) // 2
236
+ current_height -= padding * 2
237
+ else:
238
+ new_width = int(width * (current_height / height))
239
+ padding = (current_width - new_width) // 2
240
+ current_width -= padding * 2
241
+
242
+ unpadded_features = current_height * current_width
243
+ newline_features = current_height
244
+
245
+ ratio = math.sqrt(current_height * current_width / (9 * patches_height**2))
246
+ if ratio > 1.1:
247
+ unpadded_features = int(current_height // ratio) * int(current_width // ratio)
248
+ newline_features = int(current_height // ratio)
249
+
250
+ return (unpadded_features, newline_features)
251
+
252
+ # Copied from transformers.models.clip.processing_clip.CLIPProcessor.batch_decode with CLIP->Llama
253
+ def batch_decode(self, *args, **kwargs):
254
+ """
255
+ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
256
+ refer to the docstring of this method for more information.
257
+ """
258
+ return self.tokenizer.batch_decode(*args, **kwargs)
259
+
260
+ # Copied from transformers.models.clip.processing_clip.CLIPProcessor.decode with CLIP->Llama
261
+ def decode(self, *args, **kwargs):
262
+ """
263
+ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
264
+ the docstring of this method for more information.
265
+ """
266
+ return self.tokenizer.decode(*args, **kwargs)
267
+
268
+ @property
269
+ # Copied from transformers.models.clip.processing_clip.CLIPProcessor.model_input_names
270
+ def model_input_names(self):
271
+ tokenizer_input_names = self.tokenizer.model_input_names
272
+ image_processor_input_names = self.image_processor.model_input_names
273
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
274
+
275
+ # override to save video-config in a separate config file
276
+ def save_pretrained(self, save_directory, **kwargs):
277
+ if os.path.isfile(save_directory):
278
+ raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file")
279
+ os.makedirs(save_directory, exist_ok=True)
280
+ video_processor_path = os.path.join(save_directory, "video_processor")
281
+ self.video_processor.save_pretrained(video_processor_path)
282
+
283
+ video_processor_present = "video_processor" in self.attributes
284
+ if video_processor_present:
285
+ self.attributes.remove("video_processor")
286
+
287
+ outputs = super().save_pretrained(save_directory, **kwargs)
288
+
289
+ if video_processor_present:
290
+ self.attributes += ["video_processor"]
291
+ return outputs
292
+
293
+ # override to load video-config from a separate config file
294
+ @classmethod
295
+ def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
296
+ processor = super().from_pretrained(pretrained_model_name_or_path, **kwargs)
297
+
298
+ # if return_unused_kwargs a tuple is returned where the second element is 'unused_kwargs'
299
+ if isinstance(processor, tuple):
300
+ processor = processor[0]
301
+
302
+ try:
303
+ video_processor = AutoImageProcessor.from_pretrained(
304
+ pretrained_model_name_or_path, subfolder="video_processor"
305
+ )
306
+ processor.video_processor = video_processor
307
+ except EnvironmentError:
308
+ # this means users are using prev version of saved processor where we had only one preprocessor_config.json
309
+ # for loading back that should work and load a LlavaOnevisionVideoProcessor class
310
+ logger.info(
311
+ "You are loading `LlavaOnevisionProcessor` but the indicated `path` doesn't contain a folder called "
312
+ "`video_processor`. It is strongly recommended to load and save the processor again so the video processor is saved "
313
+ "in a separate config."
314
+ )
315
+
316
+ return processor
317
+
318
+
319
+ __all__ = ["LlavaOnevisionProcessor"]
.venv/lib/python3.11/site-packages/transformers/models/llava_onevision/video_processing_llava_onevision.py ADDED
@@ -0,0 +1,338 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Video processor class for LLaVa-Onevision."""
16
+
17
+ from typing import Dict, List, Optional, Union
18
+
19
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
20
+ from ...image_transforms import (
21
+ convert_to_rgb,
22
+ resize,
23
+ to_channel_dimension_format,
24
+ )
25
+ from ...image_utils import (
26
+ OPENAI_CLIP_MEAN,
27
+ OPENAI_CLIP_STD,
28
+ ChannelDimension,
29
+ ImageInput,
30
+ PILImageResampling,
31
+ VideoInput,
32
+ infer_channel_dimension_format,
33
+ is_scaled_image,
34
+ is_valid_image,
35
+ to_numpy_array,
36
+ valid_images,
37
+ validate_preprocess_arguments,
38
+ )
39
+ from ...utils import TensorType, is_vision_available, logging
40
+
41
+
42
+ logger = logging.get_logger(__name__)
43
+
44
+
45
+ if is_vision_available():
46
+ from PIL import Image
47
+
48
+
49
+ def make_batched_videos(videos) -> List[VideoInput]:
50
+ if isinstance(videos, (list, tuple)) and isinstance(videos[0], (list, tuple)) and is_valid_image(videos[0][0]):
51
+ return videos
52
+
53
+ elif isinstance(videos, (list, tuple)) and is_valid_image(videos[0]):
54
+ if isinstance(videos[0], Image.Image) or len(videos[0].shape) == 3:
55
+ return [videos]
56
+ elif len(videos[0].shape) == 4:
57
+ return [list(video) for video in videos]
58
+
59
+ elif is_valid_image(videos) and len(videos.shape) == 4:
60
+ return [list(videos)]
61
+
62
+ raise ValueError(f"Could not make batched video from {videos}")
63
+
64
+
65
+ class LlavaOnevisionVideoProcessor(BaseImageProcessor):
66
+ r"""
67
+ Constructs a LLaVa-Onevisino-Video video processor. Based on [`SiglipImageProcessor`] with incorporation of processing each video frame.
68
+
69
+ Args:
70
+ do_resize (`bool`, *optional*, defaults to `True`):
71
+ Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
72
+ `do_resize` in the `preprocess` method.
73
+ size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`):
74
+ Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with
75
+ the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess`
76
+ method.
77
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
78
+ Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
79
+ do_rescale (`bool`, *optional*, defaults to `True`):
80
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
81
+ the `preprocess` method.
82
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
83
+ Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
84
+ method.
85
+ do_normalize (`bool`, *optional*, defaults to `True`):
86
+ Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method.
87
+ image_mean (`float` or `List[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`):
88
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
89
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
90
+ image_std (`float` or `List[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`):
91
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
92
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
93
+ Can be overridden by the `image_std` parameter in the `preprocess` method.
94
+ do_convert_rgb (`bool`, *optional*, defaults to `True`):
95
+ Whether to convert the image to RGB.
96
+ """
97
+
98
+ model_input_names = ["pixel_values_videos"]
99
+
100
+ def __init__(
101
+ self,
102
+ do_resize: bool = True,
103
+ size: Dict[str, int] = None,
104
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
105
+ do_rescale: bool = True,
106
+ rescale_factor: Union[int, float] = 1 / 255,
107
+ do_normalize: bool = True,
108
+ image_mean: Optional[Union[float, List[float]]] = None,
109
+ image_std: Optional[Union[float, List[float]]] = None,
110
+ do_convert_rgb: bool = True,
111
+ **kwargs,
112
+ ) -> None:
113
+ super().__init__(**kwargs)
114
+ size = size if size is not None else {"height": 384, "width": 384}
115
+ size = get_size_dict(size, default_to_square=False)
116
+
117
+ self.do_resize = do_resize
118
+ self.size = size
119
+ self.resample = resample
120
+ self.do_rescale = do_rescale
121
+ self.rescale_factor = rescale_factor
122
+ self.do_normalize = do_normalize
123
+ self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
124
+ self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
125
+ self.do_convert_rgb = do_convert_rgb
126
+
127
+ def _preprocess(
128
+ self,
129
+ images: ImageInput,
130
+ do_resize: bool = None,
131
+ size: Dict[str, int] = None,
132
+ resample: PILImageResampling = None,
133
+ do_rescale: bool = None,
134
+ rescale_factor: float = None,
135
+ do_normalize: bool = None,
136
+ image_mean: Optional[Union[float, List[float]]] = None,
137
+ image_std: Optional[Union[float, List[float]]] = None,
138
+ do_convert_rgb: bool = None,
139
+ data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
140
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
141
+ ) -> Image.Image:
142
+ """
143
+ Args:
144
+ images (`ImageInput`):
145
+ Batch of frames (one video) to preprocess. Expects a batch of frames with pixel values ranging from 0 to 255. If
146
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
147
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
148
+ Whether to resize the image.
149
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
150
+ Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
151
+ the longest edge resized to keep the input aspect ratio.
152
+ resample (`int`, *optional*, defaults to `self.resample`):
153
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
154
+ has an effect if `do_resize` is set to `True`.
155
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
156
+ Whether to rescale the image.
157
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
158
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
159
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
160
+ Whether to normalize the image.
161
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
162
+ Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
163
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
164
+ Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
165
+ `True`.
166
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
167
+ The channel dimension format for the output image. Can be one of:
168
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
169
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
170
+ - Unset: Use the channel dimension format of the input image.
171
+ input_data_format (`ChannelDimension` or `str`, *optional*):
172
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
173
+ from the input image. Can be one of:
174
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
175
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
176
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
177
+ """
178
+ if do_convert_rgb:
179
+ images = [convert_to_rgb(image) for image in images]
180
+
181
+ # All transformations expect numpy arrays.
182
+ images = [to_numpy_array(image) for image in images]
183
+
184
+ if do_rescale and is_scaled_image(images[0]):
185
+ logger.warning_once(
186
+ "It looks like you are trying to rescale already rescaled videos. If the input"
187
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
188
+ )
189
+
190
+ if input_data_format is None:
191
+ # We assume that all images have the same channel dimension format.
192
+ input_data_format = infer_channel_dimension_format(images[0])
193
+
194
+ if do_resize:
195
+ images = [
196
+ resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
197
+ for image in images
198
+ ]
199
+
200
+ if do_rescale:
201
+ images = [
202
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
203
+ for image in images
204
+ ]
205
+
206
+ if do_normalize:
207
+ images = [
208
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
209
+ for image in images
210
+ ]
211
+
212
+ images = [
213
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
214
+ ]
215
+
216
+ return images
217
+
218
+ def preprocess(
219
+ self,
220
+ videos: VideoInput,
221
+ do_resize: bool = None,
222
+ size: Dict[str, int] = None,
223
+ resample: PILImageResampling = None,
224
+ do_rescale: bool = None,
225
+ rescale_factor: float = None,
226
+ do_normalize: bool = None,
227
+ image_mean: Optional[Union[float, List[float]]] = None,
228
+ image_std: Optional[Union[float, List[float]]] = None,
229
+ do_convert_rgb: bool = None,
230
+ return_tensors: Optional[Union[str, TensorType]] = None,
231
+ data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
232
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
233
+ ):
234
+ """
235
+ Args:
236
+ videos (`np.ndarray`, `torch.Tensor`, `List[np.ndarray]`, `List[torch.Tensor]`):
237
+ The image or batch of videos to be prepared. Each video can be a 4D NumPy array or PyTorch
238
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
239
+ Whether to resize the image.
240
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
241
+ Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
242
+ the longest edge resized to keep the input aspect ratio.
243
+ resample (`int`, *optional*, defaults to `self.resample`):
244
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
245
+ has an effect if `do_resize` is set to `True`.
246
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
247
+ Whether to rescale the image.
248
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
249
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
250
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
251
+ Whether to normalize the image.
252
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
253
+ Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
254
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
255
+ Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
256
+ `True`.
257
+ do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
258
+ Whether to convert the image to RGB.
259
+ return_tensors (`str` or `TensorType`, *optional*):
260
+ The type of tensors to return. Can be one of:
261
+ - Unset: Return a list of `np.ndarray`.
262
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
263
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
264
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
265
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
266
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
267
+ The channel dimension format for the output image. Can be one of:
268
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
269
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
270
+ - Unset: Use the channel dimension format of the input image.
271
+ input_data_format (`ChannelDimension` or `str`, *optional*):
272
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
273
+ from the input image. Can be one of:
274
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
275
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
276
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
277
+
278
+ """
279
+ do_resize = do_resize if do_resize is not None else self.do_resize
280
+ size = size if size is not None else self.size
281
+ resample = resample if resample is not None else self.resample
282
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
283
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
284
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
285
+ image_mean = image_mean if image_mean is not None else self.image_mean
286
+ image_std = image_std if image_std is not None else self.image_std
287
+ do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
288
+
289
+ videos = make_batched_videos(videos)
290
+
291
+ if not valid_images(videos[0]):
292
+ raise ValueError(
293
+ "Invalid video type. Must be a list consisting of PIL.Image.Image, numpy.ndarray, "
294
+ "torch.Tensor, tf.Tensor or jax.ndarray."
295
+ )
296
+
297
+ validate_preprocess_arguments(
298
+ do_rescale=do_rescale,
299
+ rescale_factor=rescale_factor,
300
+ do_normalize=do_normalize,
301
+ image_mean=image_mean,
302
+ image_std=image_std,
303
+ do_resize=do_resize,
304
+ size=size,
305
+ resample=resample,
306
+ )
307
+
308
+ size_tuple = (
309
+ (size["height"], size["width"])
310
+ if "height" in size and "width" in size
311
+ else (size["shortest_edge"], size["shortest_edge"])
312
+ )
313
+
314
+ pixel_values = [
315
+ self._preprocess(
316
+ video,
317
+ do_resize=do_resize,
318
+ size=size_tuple,
319
+ resample=resample,
320
+ do_rescale=do_rescale,
321
+ rescale_factor=rescale_factor,
322
+ do_normalize=do_normalize,
323
+ image_mean=image_mean,
324
+ image_std=image_std,
325
+ do_convert_rgb=do_convert_rgb,
326
+ data_format=data_format,
327
+ input_data_format=input_data_format,
328
+ )
329
+ for video in videos
330
+ ]
331
+
332
+ return BatchFeature(
333
+ data={"pixel_values_videos": pixel_values},
334
+ tensor_type=return_tensors,
335
+ )
336
+
337
+
338
+ __all__ = ["LlavaOnevisionVideoProcessor"]
.venv/lib/python3.11/site-packages/transformers/models/myt5/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import _LazyModule
17
+ from ...utils.import_utils import define_import_structure
18
+
19
+
20
+ if TYPE_CHECKING:
21
+ from .tokenization_myt5 import *
22
+ else:
23
+ import sys
24
+
25
+ _file = globals()["__file__"]
26
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
.venv/lib/python3.11/site-packages/transformers/models/myt5/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (724 Bytes). View file
 
.venv/lib/python3.11/site-packages/transformers/models/myt5/__pycache__/tokenization_myt5.cpython-311.pyc ADDED
Binary file (20.5 kB). View file
 
.venv/lib/python3.11/site-packages/transformers/models/myt5/tokenization_myt5.py ADDED
@@ -0,0 +1,380 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization class for model MyT5."""
16
+
17
+ import json
18
+ import os
19
+ import warnings
20
+ from collections import defaultdict
21
+ from typing import Dict, List, Optional, Tuple, Union
22
+
23
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer
24
+ from ...utils import logging
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+
30
+ VOCAB_FILES_NAMES = {"vocab_file": "byte_maps.json"}
31
+
32
+
33
+ class ByteRewriter:
34
+ """
35
+ Byte rewriter class for MyT5 tokenizer.
36
+ This class is used to rewrite bytes using a hash tree. The hash tree is constructed from a set of rewriting rules.
37
+
38
+ Args:
39
+ rewriting_rules (`str` or `Dict[str, str]`):
40
+ A path to a json file containing the rewriting rules or a dictionary containing the rewriting rules.
41
+
42
+ """
43
+
44
+ LEAF = "[LEAF]"
45
+
46
+ def __init__(self, rewriting_rules: Union[str, Dict[str, str]]):
47
+ if isinstance(rewriting_rules, str):
48
+ with open(rewriting_rules, "r") as f:
49
+ rewriting_rules = json.load(f)
50
+ elif not isinstance(rewriting_rules, dict):
51
+ raise ValueError(
52
+ f"rewriting_rules should be either a path to json file or a dict, got {type(rewriting_rules)}"
53
+ )
54
+
55
+ self.hash_tree = self.construct_hash_tree(rewriting_rules)
56
+ reverse_rewriting_rules = {v: k for k, v in rewriting_rules.items()}
57
+ self.reverse_hash_tree = self.construct_hash_tree(reverse_rewriting_rules)
58
+
59
+ def add_leaf(self, hash_tree: Dict[str, Union[dict, List[str]]], byte_in_sequence: str, byte_out_sequence: str):
60
+ """
61
+ Add a leaf with the output byte sequence to the hash tree.
62
+ """
63
+ byte_in_list = byte_in_sequence.split(" ")
64
+ byte_out_list = byte_out_sequence.split(" ")
65
+
66
+ tree_pointer = hash_tree
67
+ for b in byte_in_list:
68
+ if b not in tree_pointer:
69
+ tree_pointer[b] = {}
70
+ tree_pointer = tree_pointer[b]
71
+
72
+ tree_pointer[self.LEAF] = byte_out_list
73
+
74
+ def construct_hash_tree(self, rewriting_rules: Dict[str, str]) -> Dict[str, Union[dict, List[str]]]:
75
+ """
76
+ Construct a hash tree for rewritten byte sequences.
77
+ """
78
+ hash_tree = defaultdict(dict)
79
+ for b in (f"{x:02x}" for x in range(256)):
80
+ hash_tree[b][self.LEAF] = [b]
81
+
82
+ for in_sequence, out_sequence in rewriting_rules.items():
83
+ self.add_leaf(hash_tree, in_sequence, out_sequence)
84
+
85
+ return hash_tree
86
+
87
+ def search_hash_tree(self, byte_sequence: List[str]) -> Union[None, List[str]]:
88
+ """
89
+ Search the hash tree and return the rewritten byte sequence if found.
90
+ """
91
+ tree_pointer = self.hash_tree
92
+ for b in byte_sequence:
93
+ if b in tree_pointer:
94
+ tree_pointer = tree_pointer[b]
95
+ else:
96
+ return None
97
+
98
+ return tree_pointer[self.LEAF]
99
+
100
+ def rewrite_bytes(self, in_bytes: List[str], reverse=False) -> List[str]:
101
+ """
102
+ Rewrite a sequence of bytes using the hash tree.
103
+
104
+ Args:
105
+ in_bytes (`List[str]`): A list of bytes to be rewritten.
106
+ reverse (`bool`): If True, decoding is performed with the reverse hash tree.
107
+ Returns:
108
+ `List[str]`: The rewritten byte sequence.
109
+ """
110
+ out_bytes = []
111
+ b_start = 0
112
+ b_end = 0
113
+
114
+ while b_start < len(in_bytes):
115
+ tree_pointer = self.hash_tree if not reverse else self.reverse_hash_tree
116
+ for j in range(b_start, len(in_bytes)):
117
+ b = in_bytes[j]
118
+ if b in tree_pointer:
119
+ tree_pointer = tree_pointer[b]
120
+ elif j == b_start:
121
+ cur_leaf = [b]
122
+ b_end = j
123
+ break
124
+ else:
125
+ break
126
+ if self.LEAF in tree_pointer:
127
+ cur_leaf = tree_pointer[self.LEAF]
128
+ b_end = j
129
+ out_bytes.extend(cur_leaf)
130
+ b_start = b_end + 1
131
+
132
+ return out_bytes
133
+
134
+
135
+ class MyT5Tokenizer(PreTrainedTokenizer):
136
+ """
137
+ Construct a MyT5 tokenizer.
138
+
139
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
140
+ this superclass for more information regarding those methods.
141
+
142
+ Args:
143
+ vocab_file (`str`): The file containing the byte rewriting rules.
144
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
145
+ The end of sequence token.
146
+
147
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
148
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
149
+ token instead.
150
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
151
+ The token used for padding, for example when batching sequences of different lengths.
152
+ extra_ids (`int`, *optional*, defaults to 125):
153
+ Add a number of extra ids added to the end of the vocabulary for use as sentinels. These tokens are
154
+ accessible as "<extra_id_{%d}>" where "{%d}" is a number between 0 and extra_ids-1. Extra tokens are
155
+ indexed from the end of the vocabulary up to beginning ("<extra_id_0>" is the last token in the vocabulary
156
+ like in ByT5 preprocessing see
157
+ [here](https://github.com/google-research/text-to-text-transfer-transformer/blob/9fd7b14a769417be33bc6c850f9598764913c833/t5/data/preprocessors.py#L2117)).
158
+ additional_special_tokens (`List[str]`, *optional*):
159
+ Additional special tokens used by the tokenizer.
160
+ """
161
+
162
+ model_input_names = ["input_ids", "attention_mask"]
163
+ vocab_files_names = VOCAB_FILES_NAMES
164
+
165
+ def __init__(
166
+ self,
167
+ vocab_file,
168
+ eos_token="</s>",
169
+ unk_token="<unk>",
170
+ pad_token="<pad>",
171
+ extra_ids=125,
172
+ additional_special_tokens=None,
173
+ **kwargs,
174
+ ) -> None:
175
+ # Add extra_ids to the special token list
176
+ if extra_ids > 0 and additional_special_tokens is None:
177
+ additional_special_tokens = [f"<extra_id_{i}>" for i in range(extra_ids)]
178
+ elif extra_ids > 0 and additional_special_tokens is not None and len(additional_special_tokens) > 0:
179
+ # Check that we have the right number of extra_id special tokens
180
+ extra_tokens = len(set(filter(lambda x: bool("extra_id" in str(x)), additional_special_tokens)))
181
+ if extra_tokens != extra_ids:
182
+ raise ValueError(
183
+ f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
184
+ " provided to MyT5Tokenizer. In this case the additional_special_tokens must include the"
185
+ " extra_ids tokens"
186
+ )
187
+
188
+ pad_token = AddedToken(pad_token, lstrip=True, rstrip=True) if isinstance(pad_token, str) else pad_token
189
+ eos_token = AddedToken(eos_token, lstrip=True, rstrip=True) if isinstance(eos_token, str) else eos_token
190
+ unk_token = AddedToken(unk_token, lstrip=True, rstrip=True) if isinstance(unk_token, str) else unk_token
191
+ # unk token needs to be in the vocab with correct index
192
+ self._added_tokens_decoder = {0: pad_token, 1: eos_token, 2: unk_token}
193
+ self.offset = len(self._added_tokens_decoder)
194
+ self._utf_vocab_size = 2**8 # utf is 8 bits
195
+
196
+ # Load byte maps
197
+ self.byte_maps = json.load(open(vocab_file, "r"))
198
+
199
+ self.decompose_rewriter = ByteRewriter(self.byte_maps["decompose_map"])
200
+ self.merge_rewriter = ByteRewriter(self.byte_maps["merge_map"])
201
+
202
+ super().__init__(
203
+ eos_token=eos_token,
204
+ unk_token=unk_token,
205
+ pad_token=pad_token,
206
+ extra_ids=0,
207
+ additional_special_tokens=additional_special_tokens,
208
+ **kwargs,
209
+ )
210
+
211
+ @property
212
+ def vocab_size(self):
213
+ return self._utf_vocab_size
214
+
215
+ # Copied from transformers.models.byt5.tokenization_byt5.ByT5Tokenizer.get_vocab
216
+ def get_vocab(self):
217
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size + self.offset)}
218
+ vocab.update(self.added_tokens_encoder)
219
+ return vocab
220
+
221
+ # Copied from transformers.models.byt5.tokenization_byt5.ByT5Tokenizer.get_special_tokens_mask
222
+ def get_special_tokens_mask(
223
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
224
+ ) -> List[int]:
225
+ """
226
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
227
+ special tokens using the tokenizer `prepare_for_model` method.
228
+
229
+ Args:
230
+ token_ids_0 (`List[int]`):
231
+ List of IDs.
232
+ token_ids_1 (`List[int]`, *optional*):
233
+ Optional second list of IDs for sequence pairs.
234
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
235
+ Whether or not the token list is already formatted with special tokens for the model.
236
+
237
+ Returns:
238
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
239
+ """
240
+ if already_has_special_tokens:
241
+ return super().get_special_tokens_mask(
242
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
243
+ )
244
+
245
+ # normal case: some special tokens
246
+ if token_ids_1 is None:
247
+ return ([0] * len(token_ids_0)) + [1]
248
+ return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
249
+
250
+ def _add_eos_if_not_present(self, token_ids: List[int]) -> List[int]:
251
+ """Do not add eos again if user already added it."""
252
+ if len(token_ids) > 0 and token_ids[-1] == self.eos_token_id:
253
+ warnings.warn(
254
+ f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
255
+ " eos tokens being added."
256
+ )
257
+ return token_ids
258
+ else:
259
+ return token_ids + [self.eos_token_id]
260
+
261
+ def create_token_type_ids_from_sequences(
262
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
263
+ ) -> List[int]:
264
+ """
265
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. MyT5 does not
266
+ make use of token type ids, therefore a list of zeros is returned.
267
+
268
+ Args:
269
+ token_ids_0 (`List[int]`):
270
+ List of IDs.
271
+ token_ids_1 (`List[int]`, *optional*):
272
+ Optional second list of IDs for sequence pairs.
273
+
274
+ Returns:
275
+ `List[int]`: List of zeros.
276
+ """
277
+ eos = [self.eos_token_id]
278
+
279
+ if token_ids_1 is None:
280
+ return len(token_ids_0 + eos) * [0]
281
+ return len(token_ids_0 + eos + token_ids_1 + eos) * [0]
282
+
283
+ # Copied from transformers.models.byt5.tokenization_byt5.ByT5Tokenizer.build_inputs_with_special_tokens
284
+ def build_inputs_with_special_tokens(
285
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
286
+ ) -> List[int]:
287
+ """
288
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
289
+ adding special tokens. A sequence has the following format:
290
+
291
+ - single sequence: `X </s>`
292
+ - pair of sequences: `A </s> B </s>`
293
+
294
+ Args:
295
+ token_ids_0 (`List[int]`):
296
+ List of IDs to which the special tokens will be added.
297
+ token_ids_1 (`List[int]`, *optional*):
298
+ Optional second list of IDs for sequence pairs.
299
+
300
+ Returns:
301
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
302
+ """
303
+ token_ids_0 = self._add_eos_if_not_present(token_ids_0)
304
+ if token_ids_1 is None:
305
+ return token_ids_0
306
+ else:
307
+ token_ids_1 = self._add_eos_if_not_present(token_ids_1)
308
+ return token_ids_0 + token_ids_1
309
+
310
+ def _tokenize(self, text: str, **kwargs) -> List[str]:
311
+ """Take as input a string and return a list of strings (tokens) for words/sub-words.
312
+ Represents tokens in two character hex format"""
313
+
314
+ tokens = [f"{i:02x}" for i in text.encode("utf-8")]
315
+ tokens = self.morphological_encode(tokens)
316
+ return tokens
317
+
318
+ def _convert_token_to_id(self, token):
319
+ """Converts a token (str) in an id using the vocab."""
320
+
321
+ if len(token) != 2:
322
+ token_id = None
323
+ else:
324
+ token_id = int(token, 16) + self.offset
325
+
326
+ return token_id
327
+
328
+ def _convert_id_to_token(self, index):
329
+ """Converts an index (integer) in a token (str) using the vocab."""
330
+ token = f"{index - self.offset:02x}"
331
+ return token
332
+
333
+ def morphological_encode(self, indices: List[str]) -> List[str]:
334
+ # Decompose and merge morphological sequences
335
+ indices = self.decompose_rewriter.rewrite_bytes(indices, reverse=False)
336
+ indices = self.merge_rewriter.rewrite_bytes(indices, reverse=False)
337
+ return indices
338
+
339
+ def morphological_decode(self, indices: List[str]) -> List[str]:
340
+ # Demerge and compose morphological sequences
341
+ indices = self.merge_rewriter.rewrite_bytes(indices, reverse=True)
342
+ indices = self.decompose_rewriter.rewrite_bytes(indices, reverse=True)
343
+ return indices
344
+
345
+ def convert_tokens_to_string(self, tokens):
346
+ """Converts a sequence of tokens (string) in a single string."""
347
+ bstring = b""
348
+
349
+ out_tokens = []
350
+ for token in tokens:
351
+ if token in self.added_tokens_decoder:
352
+ out_tokens.append(self.added_tokens_decoder[token])
353
+ elif token in self.added_tokens_encoder:
354
+ out_tokens.append(token)
355
+ else:
356
+ out_tokens.append(token)
357
+
358
+ out_tokens = self.morphological_decode(out_tokens)
359
+ _added_tokens = set(self.added_tokens_decoder.values()) | set(self.added_tokens_encoder)
360
+ for token in out_tokens:
361
+ if token in _added_tokens:
362
+ bstring += bytes(token, "utf-8")
363
+ else:
364
+ bstring += bytes.fromhex(token)
365
+ string = bstring.decode("utf-8", errors="ignore")
366
+ return string
367
+
368
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
369
+ if os.path.isdir(save_directory):
370
+ vocab_file = os.path.join(
371
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
372
+ )
373
+ else:
374
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
375
+ with open(vocab_file, "w", encoding="utf-8") as writer:
376
+ writer.write(json.dumps(self.byte_maps, indent=2, ensure_ascii=False))
377
+ return (vocab_file,)
378
+
379
+
380
+ __all__ = ["MyT5Tokenizer"]
.venv/lib/python3.11/site-packages/transformers/models/rembert/__init__.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import _LazyModule
17
+ from ...utils.import_utils import define_import_structure
18
+
19
+
20
+ if TYPE_CHECKING:
21
+ from .configuration_rembert import *
22
+ from .modeling_rembert import *
23
+ from .modeling_tf_rembert import *
24
+ from .tokenization_rembert import *
25
+ from .tokenization_rembert_fast import *
26
+ else:
27
+ import sys
28
+
29
+ _file = globals()["__file__"]
30
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
.venv/lib/python3.11/site-packages/transformers/models/rembert/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (899 Bytes). View file
 
.venv/lib/python3.11/site-packages/transformers/models/rembert/__pycache__/configuration_rembert.cpython-311.pyc ADDED
Binary file (7.55 kB). View file
 
.venv/lib/python3.11/site-packages/transformers/models/rembert/__pycache__/modeling_rembert.cpython-311.pyc ADDED
Binary file (75.1 kB). View file
 
.venv/lib/python3.11/site-packages/transformers/models/rembert/__pycache__/modeling_tf_rembert.cpython-311.pyc ADDED
Binary file (85.7 kB). View file
 
.venv/lib/python3.11/site-packages/transformers/models/rembert/__pycache__/tokenization_rembert.cpython-311.pyc ADDED
Binary file (13.8 kB). View file
 
.venv/lib/python3.11/site-packages/transformers/models/rembert/__pycache__/tokenization_rembert_fast.cpython-311.pyc ADDED
Binary file (11.4 kB). View file
 
.venv/lib/python3.11/site-packages/transformers/models/rembert/configuration_rembert.py ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright The HuggingFace Team and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """RemBERT model configuration"""
16
+
17
+ from collections import OrderedDict
18
+ from typing import Mapping
19
+
20
+ from ...configuration_utils import PretrainedConfig
21
+ from ...onnx import OnnxConfig
22
+ from ...utils import logging
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+
28
+ class RemBertConfig(PretrainedConfig):
29
+ r"""
30
+ This is the configuration class to store the configuration of a [`RemBertModel`]. It is used to instantiate an
31
+ RemBERT model according to the specified arguments, defining the model architecture. Instantiating a configuration
32
+ with the defaults will yield a similar configuration to that of the RemBERT
33
+ [google/rembert](https://huggingface.co/google/rembert) architecture.
34
+
35
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
36
+ documentation from [`PretrainedConfig`] for more information.
37
+
38
+
39
+ Args:
40
+ vocab_size (`int`, *optional*, defaults to 250300):
41
+ Vocabulary size of the RemBERT model. Defines the number of different tokens that can be represented by the
42
+ `inputs_ids` passed when calling [`RemBertModel`] or [`TFRemBertModel`]. Vocabulary size of the model.
43
+ Defines the different tokens that can be represented by the *inputs_ids* passed to the forward method of
44
+ [`RemBertModel`].
45
+ hidden_size (`int`, *optional*, defaults to 1152):
46
+ Dimensionality of the encoder layers and the pooler layer.
47
+ num_hidden_layers (`int`, *optional*, defaults to 32):
48
+ Number of hidden layers in the Transformer encoder.
49
+ num_attention_heads (`int`, *optional*, defaults to 18):
50
+ Number of attention heads for each attention layer in the Transformer encoder.
51
+ input_embedding_size (`int`, *optional*, defaults to 256):
52
+ Dimensionality of the input embeddings.
53
+ output_embedding_size (`int`, *optional*, defaults to 1664):
54
+ Dimensionality of the output embeddings.
55
+ intermediate_size (`int`, *optional*, defaults to 4608):
56
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
57
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
58
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
59
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
60
+ hidden_dropout_prob (`float`, *optional*, defaults to 0):
61
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
62
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0):
63
+ The dropout ratio for the attention probabilities.
64
+ classifier_dropout_prob (`float`, *optional*, defaults to 0.1):
65
+ The dropout ratio for the classifier layer when fine-tuning.
66
+ max_position_embeddings (`int`, *optional*, defaults to 512):
67
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
68
+ just in case (e.g., 512 or 1024 or 2048).
69
+ type_vocab_size (`int`, *optional*, defaults to 2):
70
+ The vocabulary size of the `token_type_ids` passed when calling [`RemBertModel`] or [`TFRemBertModel`].
71
+ initializer_range (`float`, *optional*, defaults to 0.02):
72
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
73
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
74
+ The epsilon used by the layer normalization layers.
75
+ is_decoder (`bool`, *optional*, defaults to `False`):
76
+ Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
77
+ use_cache (`bool`, *optional*, defaults to `True`):
78
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
79
+ relevant if `config.is_decoder=True`.
80
+
81
+ Example:
82
+
83
+ ```python
84
+ >>> from transformers import RemBertModel, RemBertConfig
85
+
86
+ >>> # Initializing a RemBERT rembert style configuration
87
+ >>> configuration = RemBertConfig()
88
+
89
+ >>> # Initializing a model from the rembert style configuration
90
+ >>> model = RemBertModel(configuration)
91
+
92
+ >>> # Accessing the model configuration
93
+ >>> configuration = model.config
94
+ ```"""
95
+
96
+ model_type = "rembert"
97
+
98
+ def __init__(
99
+ self,
100
+ vocab_size=250300,
101
+ hidden_size=1152,
102
+ num_hidden_layers=32,
103
+ num_attention_heads=18,
104
+ input_embedding_size=256,
105
+ output_embedding_size=1664,
106
+ intermediate_size=4608,
107
+ hidden_act="gelu",
108
+ hidden_dropout_prob=0.0,
109
+ attention_probs_dropout_prob=0.0,
110
+ classifier_dropout_prob=0.1,
111
+ max_position_embeddings=512,
112
+ type_vocab_size=2,
113
+ initializer_range=0.02,
114
+ layer_norm_eps=1e-12,
115
+ use_cache=True,
116
+ pad_token_id=0,
117
+ bos_token_id=312,
118
+ eos_token_id=313,
119
+ **kwargs,
120
+ ):
121
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
122
+
123
+ self.vocab_size = vocab_size
124
+ self.input_embedding_size = input_embedding_size
125
+ self.output_embedding_size = output_embedding_size
126
+ self.max_position_embeddings = max_position_embeddings
127
+ self.hidden_size = hidden_size
128
+ self.num_hidden_layers = num_hidden_layers
129
+ self.num_attention_heads = num_attention_heads
130
+ self.intermediate_size = intermediate_size
131
+ self.hidden_act = hidden_act
132
+ self.hidden_dropout_prob = hidden_dropout_prob
133
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
134
+ self.classifier_dropout_prob = classifier_dropout_prob
135
+ self.initializer_range = initializer_range
136
+ self.type_vocab_size = type_vocab_size
137
+ self.layer_norm_eps = layer_norm_eps
138
+ self.use_cache = use_cache
139
+ self.tie_word_embeddings = False
140
+
141
+
142
+ class RemBertOnnxConfig(OnnxConfig):
143
+ @property
144
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
145
+ if self.task == "multiple-choice":
146
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
147
+ else:
148
+ dynamic_axis = {0: "batch", 1: "sequence"}
149
+ return OrderedDict(
150
+ [
151
+ ("input_ids", dynamic_axis),
152
+ ("attention_mask", dynamic_axis),
153
+ ("token_type_ids", dynamic_axis),
154
+ ]
155
+ )
156
+
157
+ @property
158
+ def atol_for_validation(self) -> float:
159
+ return 1e-4
160
+
161
+
162
+ __all__ = ["RemBertConfig", "RemBertOnnxConfig"]
.venv/lib/python3.11/site-packages/transformers/models/rembert/modeling_rembert.py ADDED
@@ -0,0 +1,1517 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Team The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """PyTorch RemBERT model."""
16
+
17
+ import math
18
+ import os
19
+ from typing import Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.utils.checkpoint
23
+ from torch import nn
24
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
25
+
26
+ from ...activations import ACT2FN
27
+ from ...generation import GenerationMixin
28
+ from ...modeling_outputs import (
29
+ BaseModelOutputWithPastAndCrossAttentions,
30
+ BaseModelOutputWithPoolingAndCrossAttentions,
31
+ CausalLMOutputWithCrossAttentions,
32
+ MaskedLMOutput,
33
+ MultipleChoiceModelOutput,
34
+ QuestionAnsweringModelOutput,
35
+ SequenceClassifierOutput,
36
+ TokenClassifierOutput,
37
+ )
38
+ from ...modeling_utils import PreTrainedModel
39
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
40
+ from ...utils import (
41
+ add_code_sample_docstrings,
42
+ add_start_docstrings,
43
+ add_start_docstrings_to_model_forward,
44
+ logging,
45
+ replace_return_docstrings,
46
+ )
47
+ from .configuration_rembert import RemBertConfig
48
+
49
+
50
+ logger = logging.get_logger(__name__)
51
+
52
+ _CONFIG_FOR_DOC = "RemBertConfig"
53
+ _CHECKPOINT_FOR_DOC = "google/rembert"
54
+
55
+
56
+ def load_tf_weights_in_rembert(model, config, tf_checkpoint_path):
57
+ """Load tf checkpoints in a pytorch model."""
58
+ try:
59
+ import re
60
+
61
+ import numpy as np
62
+ import tensorflow as tf
63
+ except ImportError:
64
+ logger.error(
65
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
66
+ "https://www.tensorflow.org/install/ for installation instructions."
67
+ )
68
+ raise
69
+ tf_path = os.path.abspath(tf_checkpoint_path)
70
+ logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
71
+ # Load weights from TF model
72
+ init_vars = tf.train.list_variables(tf_path)
73
+ names = []
74
+ arrays = []
75
+ for name, shape in init_vars:
76
+ # Checkpoint is 12Gb, save memory by not loading useless variables
77
+ # Output embedding and cls are reset at classification time
78
+ if any(deny in name for deny in ("adam_v", "adam_m", "output_embedding", "cls")):
79
+ # logger.info("Skipping loading of %s", name)
80
+ continue
81
+ logger.info(f"Loading TF weight {name} with shape {shape}")
82
+ array = tf.train.load_variable(tf_path, name)
83
+ names.append(name)
84
+ arrays.append(array)
85
+
86
+ for name, array in zip(names, arrays):
87
+ # Replace prefix with right one
88
+ name = name.replace("bert/", "rembert/")
89
+ # The pooler is a linear layer
90
+ # name = name.replace("pooler/dense", "pooler")
91
+
92
+ name = name.split("/")
93
+ # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
94
+ # which are not required for using pretrained model
95
+ if any(
96
+ n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
97
+ for n in name
98
+ ):
99
+ logger.info(f"Skipping {'/'.join(name)}")
100
+ continue
101
+ pointer = model
102
+ for m_name in name:
103
+ if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
104
+ scope_names = re.split(r"_(\d+)", m_name)
105
+ else:
106
+ scope_names = [m_name]
107
+ if scope_names[0] == "kernel" or scope_names[0] == "gamma":
108
+ pointer = getattr(pointer, "weight")
109
+ elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
110
+ pointer = getattr(pointer, "bias")
111
+ elif scope_names[0] == "output_weights":
112
+ pointer = getattr(pointer, "weight")
113
+ elif scope_names[0] == "squad":
114
+ pointer = getattr(pointer, "classifier")
115
+ else:
116
+ try:
117
+ pointer = getattr(pointer, scope_names[0])
118
+ except AttributeError:
119
+ logger.info("Skipping {}".format("/".join(name)))
120
+ continue
121
+ if len(scope_names) >= 2:
122
+ num = int(scope_names[1])
123
+ pointer = pointer[num]
124
+ if m_name[-11:] == "_embeddings":
125
+ pointer = getattr(pointer, "weight")
126
+ elif m_name == "kernel":
127
+ array = np.transpose(array)
128
+ try:
129
+ if pointer.shape != array.shape:
130
+ raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
131
+ except AssertionError as e:
132
+ e.args += (pointer.shape, array.shape)
133
+ raise
134
+ logger.info(f"Initialize PyTorch weight {name}")
135
+ pointer.data = torch.from_numpy(array)
136
+ return model
137
+
138
+
139
+ class RemBertEmbeddings(nn.Module):
140
+ """Construct the embeddings from word, position and token_type embeddings."""
141
+
142
+ def __init__(self, config):
143
+ super().__init__()
144
+ self.word_embeddings = nn.Embedding(
145
+ config.vocab_size, config.input_embedding_size, padding_idx=config.pad_token_id
146
+ )
147
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.input_embedding_size)
148
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.input_embedding_size)
149
+
150
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
151
+ # any TensorFlow checkpoint file
152
+ self.LayerNorm = nn.LayerNorm(config.input_embedding_size, eps=config.layer_norm_eps)
153
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
154
+
155
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
156
+ self.register_buffer(
157
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
158
+ )
159
+
160
+ def forward(
161
+ self,
162
+ input_ids: Optional[torch.LongTensor] = None,
163
+ token_type_ids: Optional[torch.LongTensor] = None,
164
+ position_ids: Optional[torch.LongTensor] = None,
165
+ inputs_embeds: Optional[torch.FloatTensor] = None,
166
+ past_key_values_length: int = 0,
167
+ ) -> torch.Tensor:
168
+ if input_ids is not None:
169
+ input_shape = input_ids.size()
170
+ else:
171
+ input_shape = inputs_embeds.size()[:-1]
172
+
173
+ seq_length = input_shape[1]
174
+
175
+ if position_ids is None:
176
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
177
+
178
+ if token_type_ids is None:
179
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
180
+
181
+ if inputs_embeds is None:
182
+ inputs_embeds = self.word_embeddings(input_ids)
183
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
184
+
185
+ embeddings = inputs_embeds + token_type_embeddings
186
+ position_embeddings = self.position_embeddings(position_ids)
187
+ embeddings += position_embeddings
188
+ embeddings = self.LayerNorm(embeddings)
189
+ embeddings = self.dropout(embeddings)
190
+ return embeddings
191
+
192
+
193
+ # Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->RemBert
194
+ class RemBertPooler(nn.Module):
195
+ def __init__(self, config):
196
+ super().__init__()
197
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
198
+ self.activation = nn.Tanh()
199
+
200
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
201
+ # We "pool" the model by simply taking the hidden state corresponding
202
+ # to the first token.
203
+ first_token_tensor = hidden_states[:, 0]
204
+ pooled_output = self.dense(first_token_tensor)
205
+ pooled_output = self.activation(pooled_output)
206
+ return pooled_output
207
+
208
+
209
+ class RemBertSelfAttention(nn.Module):
210
+ def __init__(self, config):
211
+ super().__init__()
212
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
213
+ raise ValueError(
214
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
215
+ f"heads ({config.num_attention_heads})"
216
+ )
217
+
218
+ self.num_attention_heads = config.num_attention_heads
219
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
220
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
221
+
222
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
223
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
224
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
225
+
226
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
227
+
228
+ self.is_decoder = config.is_decoder
229
+
230
+ def transpose_for_scores(self, x):
231
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
232
+ x = x.view(*new_x_shape)
233
+ return x.permute(0, 2, 1, 3)
234
+
235
+ def forward(
236
+ self,
237
+ hidden_states: torch.Tensor,
238
+ attention_mask: Optional[torch.FloatTensor] = None,
239
+ head_mask: Optional[torch.FloatTensor] = None,
240
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
241
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
242
+ past_key_value: Tuple[Tuple[torch.FloatTensor]] = None,
243
+ output_attentions: bool = False,
244
+ ) -> Tuple:
245
+ mixed_query_layer = self.query(hidden_states)
246
+
247
+ # If this is instantiated as a cross-attention module, the keys
248
+ # and values come from an encoder; the attention mask needs to be
249
+ # such that the encoder's padding tokens are not attended to.
250
+ is_cross_attention = encoder_hidden_states is not None
251
+
252
+ if is_cross_attention and past_key_value is not None:
253
+ # reuse k,v, cross_attentions
254
+ key_layer = past_key_value[0]
255
+ value_layer = past_key_value[1]
256
+ attention_mask = encoder_attention_mask
257
+ elif is_cross_attention:
258
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
259
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
260
+ attention_mask = encoder_attention_mask
261
+ elif past_key_value is not None:
262
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
263
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
264
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
265
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
266
+ else:
267
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
268
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
269
+
270
+ query_layer = self.transpose_for_scores(mixed_query_layer)
271
+
272
+ if self.is_decoder:
273
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
274
+ # Further calls to cross_attention layer can then reuse all cross-attention
275
+ # key/value_states (first "if" case)
276
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
277
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
278
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
279
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
280
+ past_key_value = (key_layer, value_layer)
281
+
282
+ # Take the dot product between "query" and "key" to get the raw attention scores.
283
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
284
+
285
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
286
+ if attention_mask is not None:
287
+ # Apply the attention mask is (precomputed for all layers in RemBertModel forward() function)
288
+ attention_scores = attention_scores + attention_mask
289
+
290
+ # Normalize the attention scores to probabilities.
291
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
292
+
293
+ # This is actually dropping out entire tokens to attend to, which might
294
+ # seem a bit unusual, but is taken from the original Transformer paper.
295
+ attention_probs = self.dropout(attention_probs)
296
+
297
+ # Mask heads if we want to
298
+ if head_mask is not None:
299
+ attention_probs = attention_probs * head_mask
300
+
301
+ context_layer = torch.matmul(attention_probs, value_layer)
302
+
303
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
304
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
305
+ context_layer = context_layer.view(*new_context_layer_shape)
306
+
307
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
308
+
309
+ if self.is_decoder:
310
+ outputs = outputs + (past_key_value,)
311
+ return outputs
312
+
313
+
314
+ # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->RemBert
315
+ class RemBertSelfOutput(nn.Module):
316
+ def __init__(self, config):
317
+ super().__init__()
318
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
319
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
320
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
321
+
322
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
323
+ hidden_states = self.dense(hidden_states)
324
+ hidden_states = self.dropout(hidden_states)
325
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
326
+ return hidden_states
327
+
328
+
329
+ class RemBertAttention(nn.Module):
330
+ def __init__(self, config):
331
+ super().__init__()
332
+ self.self = RemBertSelfAttention(config)
333
+ self.output = RemBertSelfOutput(config)
334
+ self.pruned_heads = set()
335
+
336
+ # Copied from transformers.models.bert.modeling_bert.BertAttention.prune_heads
337
+ def prune_heads(self, heads):
338
+ if len(heads) == 0:
339
+ return
340
+ heads, index = find_pruneable_heads_and_indices(
341
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
342
+ )
343
+
344
+ # Prune linear layers
345
+ self.self.query = prune_linear_layer(self.self.query, index)
346
+ self.self.key = prune_linear_layer(self.self.key, index)
347
+ self.self.value = prune_linear_layer(self.self.value, index)
348
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
349
+
350
+ # Update hyper params and store pruned heads
351
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
352
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
353
+ self.pruned_heads = self.pruned_heads.union(heads)
354
+
355
+ # Copied from transformers.models.bert.modeling_bert.BertAttention.forward
356
+ def forward(
357
+ self,
358
+ hidden_states: torch.Tensor,
359
+ attention_mask: Optional[torch.FloatTensor] = None,
360
+ head_mask: Optional[torch.FloatTensor] = None,
361
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
362
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
363
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
364
+ output_attentions: Optional[bool] = False,
365
+ ) -> Tuple[torch.Tensor]:
366
+ self_outputs = self.self(
367
+ hidden_states,
368
+ attention_mask,
369
+ head_mask,
370
+ encoder_hidden_states,
371
+ encoder_attention_mask,
372
+ past_key_value,
373
+ output_attentions,
374
+ )
375
+ attention_output = self.output(self_outputs[0], hidden_states)
376
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
377
+ return outputs
378
+
379
+
380
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->RemBert
381
+ class RemBertIntermediate(nn.Module):
382
+ def __init__(self, config):
383
+ super().__init__()
384
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
385
+ if isinstance(config.hidden_act, str):
386
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
387
+ else:
388
+ self.intermediate_act_fn = config.hidden_act
389
+
390
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
391
+ hidden_states = self.dense(hidden_states)
392
+ hidden_states = self.intermediate_act_fn(hidden_states)
393
+ return hidden_states
394
+
395
+
396
+ # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->RemBert
397
+ class RemBertOutput(nn.Module):
398
+ def __init__(self, config):
399
+ super().__init__()
400
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
401
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
402
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
403
+
404
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
405
+ hidden_states = self.dense(hidden_states)
406
+ hidden_states = self.dropout(hidden_states)
407
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
408
+ return hidden_states
409
+
410
+
411
+ class RemBertLayer(nn.Module):
412
+ def __init__(self, config):
413
+ super().__init__()
414
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
415
+ self.seq_len_dim = 1
416
+ self.attention = RemBertAttention(config)
417
+ self.is_decoder = config.is_decoder
418
+ self.add_cross_attention = config.add_cross_attention
419
+ if self.add_cross_attention:
420
+ if not self.is_decoder:
421
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
422
+ self.crossattention = RemBertAttention(config)
423
+ self.intermediate = RemBertIntermediate(config)
424
+ self.output = RemBertOutput(config)
425
+
426
+ # Copied from transformers.models.bert.modeling_bert.BertLayer.forward
427
+ def forward(
428
+ self,
429
+ hidden_states: torch.Tensor,
430
+ attention_mask: Optional[torch.FloatTensor] = None,
431
+ head_mask: Optional[torch.FloatTensor] = None,
432
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
433
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
434
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
435
+ output_attentions: Optional[bool] = False,
436
+ ) -> Tuple[torch.Tensor]:
437
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
438
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
439
+ self_attention_outputs = self.attention(
440
+ hidden_states,
441
+ attention_mask,
442
+ head_mask,
443
+ output_attentions=output_attentions,
444
+ past_key_value=self_attn_past_key_value,
445
+ )
446
+ attention_output = self_attention_outputs[0]
447
+
448
+ # if decoder, the last output is tuple of self-attn cache
449
+ if self.is_decoder:
450
+ outputs = self_attention_outputs[1:-1]
451
+ present_key_value = self_attention_outputs[-1]
452
+ else:
453
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
454
+
455
+ cross_attn_present_key_value = None
456
+ if self.is_decoder and encoder_hidden_states is not None:
457
+ if not hasattr(self, "crossattention"):
458
+ raise ValueError(
459
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
460
+ " by setting `config.add_cross_attention=True`"
461
+ )
462
+
463
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
464
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
465
+ cross_attention_outputs = self.crossattention(
466
+ attention_output,
467
+ attention_mask,
468
+ head_mask,
469
+ encoder_hidden_states,
470
+ encoder_attention_mask,
471
+ cross_attn_past_key_value,
472
+ output_attentions,
473
+ )
474
+ attention_output = cross_attention_outputs[0]
475
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
476
+
477
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
478
+ cross_attn_present_key_value = cross_attention_outputs[-1]
479
+ present_key_value = present_key_value + cross_attn_present_key_value
480
+
481
+ layer_output = apply_chunking_to_forward(
482
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
483
+ )
484
+ outputs = (layer_output,) + outputs
485
+
486
+ # if decoder, return the attn key/values as the last output
487
+ if self.is_decoder:
488
+ outputs = outputs + (present_key_value,)
489
+
490
+ return outputs
491
+
492
+ # Copied from transformers.models.bert.modeling_bert.BertLayer.feed_forward_chunk
493
+ def feed_forward_chunk(self, attention_output):
494
+ intermediate_output = self.intermediate(attention_output)
495
+ layer_output = self.output(intermediate_output, attention_output)
496
+ return layer_output
497
+
498
+
499
+ class RemBertEncoder(nn.Module):
500
+ def __init__(self, config):
501
+ super().__init__()
502
+ self.config = config
503
+
504
+ self.embedding_hidden_mapping_in = nn.Linear(config.input_embedding_size, config.hidden_size)
505
+ self.layer = nn.ModuleList([RemBertLayer(config) for _ in range(config.num_hidden_layers)])
506
+ self.gradient_checkpointing = False
507
+
508
+ def forward(
509
+ self,
510
+ hidden_states: torch.Tensor,
511
+ attention_mask: Optional[torch.FloatTensor] = None,
512
+ head_mask: Optional[torch.FloatTensor] = None,
513
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
514
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
515
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
516
+ use_cache: Optional[bool] = None,
517
+ output_attentions: bool = False,
518
+ output_hidden_states: bool = False,
519
+ return_dict: bool = True,
520
+ ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
521
+ if self.gradient_checkpointing and self.training:
522
+ if use_cache:
523
+ logger.warning_once(
524
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
525
+ )
526
+ use_cache = False
527
+ hidden_states = self.embedding_hidden_mapping_in(hidden_states)
528
+ all_hidden_states = () if output_hidden_states else None
529
+ all_self_attentions = () if output_attentions else None
530
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
531
+
532
+ next_decoder_cache = () if use_cache else None
533
+ for i, layer_module in enumerate(self.layer):
534
+ if output_hidden_states:
535
+ all_hidden_states = all_hidden_states + (hidden_states,)
536
+
537
+ layer_head_mask = head_mask[i] if head_mask is not None else None
538
+ past_key_value = past_key_values[i] if past_key_values is not None else None
539
+
540
+ if self.gradient_checkpointing and self.training:
541
+ layer_outputs = self._gradient_checkpointing_func(
542
+ layer_module.__call__,
543
+ hidden_states,
544
+ attention_mask,
545
+ layer_head_mask,
546
+ encoder_hidden_states,
547
+ encoder_attention_mask,
548
+ past_key_value,
549
+ output_attentions,
550
+ )
551
+ else:
552
+ layer_outputs = layer_module(
553
+ hidden_states,
554
+ attention_mask,
555
+ layer_head_mask,
556
+ encoder_hidden_states,
557
+ encoder_attention_mask,
558
+ past_key_value,
559
+ output_attentions,
560
+ )
561
+
562
+ hidden_states = layer_outputs[0]
563
+ if use_cache:
564
+ next_decoder_cache += (layer_outputs[-1],)
565
+ if output_attentions:
566
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
567
+ if self.config.add_cross_attention:
568
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
569
+
570
+ if output_hidden_states:
571
+ all_hidden_states = all_hidden_states + (hidden_states,)
572
+
573
+ if not return_dict:
574
+ return tuple(
575
+ v
576
+ for v in [
577
+ hidden_states,
578
+ next_decoder_cache,
579
+ all_hidden_states,
580
+ all_self_attentions,
581
+ all_cross_attentions,
582
+ ]
583
+ if v is not None
584
+ )
585
+ return BaseModelOutputWithPastAndCrossAttentions(
586
+ last_hidden_state=hidden_states,
587
+ past_key_values=next_decoder_cache,
588
+ hidden_states=all_hidden_states,
589
+ attentions=all_self_attentions,
590
+ cross_attentions=all_cross_attentions,
591
+ )
592
+
593
+
594
+ # Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->RemBert
595
+ class RemBertPredictionHeadTransform(nn.Module):
596
+ def __init__(self, config):
597
+ super().__init__()
598
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
599
+ if isinstance(config.hidden_act, str):
600
+ self.transform_act_fn = ACT2FN[config.hidden_act]
601
+ else:
602
+ self.transform_act_fn = config.hidden_act
603
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
604
+
605
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
606
+ hidden_states = self.dense(hidden_states)
607
+ hidden_states = self.transform_act_fn(hidden_states)
608
+ hidden_states = self.LayerNorm(hidden_states)
609
+ return hidden_states
610
+
611
+
612
+ class RemBertLMPredictionHead(nn.Module):
613
+ def __init__(self, config):
614
+ super().__init__()
615
+ self.dense = nn.Linear(config.hidden_size, config.output_embedding_size)
616
+ self.decoder = nn.Linear(config.output_embedding_size, config.vocab_size)
617
+ self.activation = ACT2FN[config.hidden_act]
618
+ self.LayerNorm = nn.LayerNorm(config.output_embedding_size, eps=config.layer_norm_eps)
619
+
620
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
621
+ hidden_states = self.dense(hidden_states)
622
+ hidden_states = self.activation(hidden_states)
623
+ hidden_states = self.LayerNorm(hidden_states)
624
+ hidden_states = self.decoder(hidden_states)
625
+ return hidden_states
626
+
627
+
628
+ # Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->RemBert
629
+ class RemBertOnlyMLMHead(nn.Module):
630
+ def __init__(self, config):
631
+ super().__init__()
632
+ self.predictions = RemBertLMPredictionHead(config)
633
+
634
+ def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
635
+ prediction_scores = self.predictions(sequence_output)
636
+ return prediction_scores
637
+
638
+
639
+ class RemBertPreTrainedModel(PreTrainedModel):
640
+ """
641
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
642
+ models.
643
+ """
644
+
645
+ config_class = RemBertConfig
646
+ load_tf_weights = load_tf_weights_in_rembert
647
+ base_model_prefix = "rembert"
648
+ supports_gradient_checkpointing = True
649
+
650
+ def _init_weights(self, module):
651
+ """Initialize the weights"""
652
+ if isinstance(module, nn.Linear):
653
+ # Slightly different from the TF version which uses truncated_normal for initialization
654
+ # cf https://github.com/pytorch/pytorch/pull/5617
655
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
656
+ if module.bias is not None:
657
+ module.bias.data.zero_()
658
+ elif isinstance(module, nn.Embedding):
659
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
660
+ if module.padding_idx is not None:
661
+ module.weight.data[module.padding_idx].zero_()
662
+ elif isinstance(module, nn.LayerNorm):
663
+ module.bias.data.zero_()
664
+ module.weight.data.fill_(1.0)
665
+
666
+
667
+ REMBERT_START_DOCSTRING = r"""
668
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
669
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
670
+ behavior.
671
+
672
+ Parameters:
673
+ config ([`RemBertConfig`]): Model configuration class with all the parameters of the model.
674
+ Initializing with a config file does not load the weights associated with the model, only the
675
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
676
+ """
677
+
678
+ REMBERT_INPUTS_DOCSTRING = r"""
679
+ Args:
680
+ input_ids (`torch.LongTensor` of shape `({0})`):
681
+ Indices of input sequence tokens in the vocabulary.
682
+
683
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
684
+ [`PreTrainedTokenizer.__call__`] for details.
685
+
686
+ [What are input IDs?](../glossary#input-ids)
687
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
688
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
689
+
690
+ - 1 for tokens that are **not masked**,
691
+ - 0 for tokens that are **masked**.
692
+
693
+ [What are attention masks?](../glossary#attention-mask)
694
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
695
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
696
+ 1]`:
697
+
698
+ - 0 corresponds to a *sentence A* token,
699
+ - 1 corresponds to a *sentence B* token.
700
+
701
+ [What are token type IDs?](../glossary#token-type-ids)
702
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
703
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
704
+ config.max_position_embeddings - 1]`.
705
+
706
+ [What are position IDs?](../glossary#position-ids)
707
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
708
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
709
+
710
+ - 1 indicates the head is **not masked**,
711
+ - 0 indicates the head is **masked**.
712
+
713
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
714
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
715
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
716
+ model's internal embedding lookup matrix.
717
+ output_attentions (`bool`, *optional*):
718
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
719
+ tensors for more detail.
720
+ output_hidden_states (`bool`, *optional*):
721
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
722
+ more detail.
723
+ return_dict (`bool`, *optional*):
724
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
725
+ """
726
+
727
+
728
+ @add_start_docstrings(
729
+ "The bare RemBERT Model transformer outputting raw hidden-states without any specific head on top.",
730
+ REMBERT_START_DOCSTRING,
731
+ )
732
+ class RemBertModel(RemBertPreTrainedModel):
733
+ """
734
+
735
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
736
+ cross-attention is added between the self-attention layers, following the architecture described in [Attention is
737
+ all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
738
+ Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
739
+
740
+ To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
741
+ to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
742
+ `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
743
+ """
744
+
745
+ def __init__(self, config, add_pooling_layer=True):
746
+ super().__init__(config)
747
+ self.config = config
748
+
749
+ self.embeddings = RemBertEmbeddings(config)
750
+ self.encoder = RemBertEncoder(config)
751
+
752
+ self.pooler = RemBertPooler(config) if add_pooling_layer else None
753
+
754
+ # Initialize weights and apply final processing
755
+ self.post_init()
756
+
757
+ def get_input_embeddings(self):
758
+ return self.embeddings.word_embeddings
759
+
760
+ def set_input_embeddings(self, value):
761
+ self.embeddings.word_embeddings = value
762
+
763
+ def _prune_heads(self, heads_to_prune):
764
+ """
765
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
766
+ class PreTrainedModel
767
+ """
768
+ for layer, heads in heads_to_prune.items():
769
+ self.encoder.layer[layer].attention.prune_heads(heads)
770
+
771
+ @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
772
+ @add_code_sample_docstrings(
773
+ checkpoint="google/rembert",
774
+ output_type=BaseModelOutputWithPastAndCrossAttentions,
775
+ config_class=_CONFIG_FOR_DOC,
776
+ )
777
+ def forward(
778
+ self,
779
+ input_ids: torch.LongTensor = None,
780
+ attention_mask: Optional[torch.LongTensor] = None,
781
+ token_type_ids: Optional[torch.LongTensor] = None,
782
+ position_ids: Optional[torch.LongTensor] = None,
783
+ head_mask: Optional[torch.FloatTensor] = None,
784
+ inputs_embeds: Optional[torch.FloatTensor] = None,
785
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
786
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
787
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
788
+ use_cache: Optional[bool] = None,
789
+ output_attentions: Optional[bool] = None,
790
+ output_hidden_states: Optional[bool] = None,
791
+ return_dict: Optional[bool] = None,
792
+ ) -> Union[Tuple, BaseModelOutputWithPoolingAndCrossAttentions]:
793
+ r"""
794
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
795
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
796
+ the model is configured as a decoder.
797
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
798
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
799
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
800
+
801
+ - 1 for tokens that are **not masked**,
802
+ - 0 for tokens that are **masked**.
803
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
804
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
805
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
806
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
807
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
808
+ use_cache (`bool`, *optional*):
809
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
810
+ `past_key_values`).
811
+ """
812
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
813
+ output_hidden_states = (
814
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
815
+ )
816
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
817
+
818
+ if self.config.is_decoder:
819
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
820
+ else:
821
+ use_cache = False
822
+
823
+ if input_ids is not None and inputs_embeds is not None:
824
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
825
+ elif input_ids is not None:
826
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
827
+ input_shape = input_ids.size()
828
+ elif inputs_embeds is not None:
829
+ input_shape = inputs_embeds.size()[:-1]
830
+ else:
831
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
832
+
833
+ batch_size, seq_length = input_shape
834
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
835
+
836
+ # past_key_values_length
837
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
838
+
839
+ if attention_mask is None:
840
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
841
+ if token_type_ids is None:
842
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
843
+
844
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
845
+ # ourselves in which case we just need to make it broadcastable to all heads.
846
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
847
+
848
+ # If a 2D or 3D attention mask is provided for the cross-attention
849
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
850
+ if self.config.is_decoder and encoder_hidden_states is not None:
851
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
852
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
853
+ if encoder_attention_mask is None:
854
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
855
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
856
+ else:
857
+ encoder_extended_attention_mask = None
858
+
859
+ # Prepare head mask if needed
860
+ # 1.0 in head_mask indicate we keep the head
861
+ # attention_probs has shape bsz x n_heads x N x N
862
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
863
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
864
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
865
+
866
+ embedding_output = self.embeddings(
867
+ input_ids=input_ids,
868
+ position_ids=position_ids,
869
+ token_type_ids=token_type_ids,
870
+ inputs_embeds=inputs_embeds,
871
+ past_key_values_length=past_key_values_length,
872
+ )
873
+ encoder_outputs = self.encoder(
874
+ embedding_output,
875
+ attention_mask=extended_attention_mask,
876
+ head_mask=head_mask,
877
+ encoder_hidden_states=encoder_hidden_states,
878
+ encoder_attention_mask=encoder_extended_attention_mask,
879
+ past_key_values=past_key_values,
880
+ use_cache=use_cache,
881
+ output_attentions=output_attentions,
882
+ output_hidden_states=output_hidden_states,
883
+ return_dict=return_dict,
884
+ )
885
+ sequence_output = encoder_outputs[0]
886
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
887
+
888
+ if not return_dict:
889
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
890
+
891
+ return BaseModelOutputWithPoolingAndCrossAttentions(
892
+ last_hidden_state=sequence_output,
893
+ pooler_output=pooled_output,
894
+ past_key_values=encoder_outputs.past_key_values,
895
+ hidden_states=encoder_outputs.hidden_states,
896
+ attentions=encoder_outputs.attentions,
897
+ cross_attentions=encoder_outputs.cross_attentions,
898
+ )
899
+
900
+
901
+ @add_start_docstrings("""RemBERT Model with a `language modeling` head on top.""", REMBERT_START_DOCSTRING)
902
+ class RemBertForMaskedLM(RemBertPreTrainedModel):
903
+ _tied_weights_keys = ["cls.predictions.decoder.weight"]
904
+
905
+ def __init__(self, config):
906
+ super().__init__(config)
907
+
908
+ if config.is_decoder:
909
+ logger.warning(
910
+ "If you want to use `RemBertForMaskedLM` make sure `config.is_decoder=False` for "
911
+ "bi-directional self-attention."
912
+ )
913
+
914
+ self.rembert = RemBertModel(config, add_pooling_layer=False)
915
+ self.cls = RemBertOnlyMLMHead(config)
916
+
917
+ # Initialize weights and apply final processing
918
+ self.post_init()
919
+
920
+ def get_output_embeddings(self):
921
+ return self.cls.predictions.decoder
922
+
923
+ def set_output_embeddings(self, new_embeddings):
924
+ self.cls.predictions.decoder = new_embeddings
925
+
926
+ @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
927
+ @add_code_sample_docstrings(
928
+ checkpoint="google/rembert",
929
+ output_type=MaskedLMOutput,
930
+ config_class=_CONFIG_FOR_DOC,
931
+ )
932
+ def forward(
933
+ self,
934
+ input_ids: torch.LongTensor = None,
935
+ attention_mask: Optional[torch.LongTensor] = None,
936
+ token_type_ids: Optional[torch.LongTensor] = None,
937
+ position_ids: Optional[torch.LongTensor] = None,
938
+ head_mask: Optional[torch.FloatTensor] = None,
939
+ inputs_embeds: Optional[torch.FloatTensor] = None,
940
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
941
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
942
+ labels: Optional[torch.LongTensor] = None,
943
+ output_attentions: Optional[bool] = None,
944
+ output_hidden_states: Optional[bool] = None,
945
+ return_dict: Optional[bool] = None,
946
+ ) -> Union[Tuple, MaskedLMOutput]:
947
+ r"""
948
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
949
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
950
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
951
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
952
+ """
953
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
954
+
955
+ outputs = self.rembert(
956
+ input_ids,
957
+ attention_mask=attention_mask,
958
+ token_type_ids=token_type_ids,
959
+ position_ids=position_ids,
960
+ head_mask=head_mask,
961
+ inputs_embeds=inputs_embeds,
962
+ encoder_hidden_states=encoder_hidden_states,
963
+ encoder_attention_mask=encoder_attention_mask,
964
+ output_attentions=output_attentions,
965
+ output_hidden_states=output_hidden_states,
966
+ return_dict=return_dict,
967
+ )
968
+
969
+ sequence_output = outputs[0]
970
+ prediction_scores = self.cls(sequence_output)
971
+
972
+ masked_lm_loss = None
973
+ if labels is not None:
974
+ loss_fct = CrossEntropyLoss() # -100 index = padding token
975
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
976
+
977
+ if not return_dict:
978
+ output = (prediction_scores,) + outputs[2:]
979
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
980
+
981
+ return MaskedLMOutput(
982
+ loss=masked_lm_loss,
983
+ logits=prediction_scores,
984
+ hidden_states=outputs.hidden_states,
985
+ attentions=outputs.attentions,
986
+ )
987
+
988
+ def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
989
+ input_shape = input_ids.shape
990
+ effective_batch_size = input_shape[0]
991
+
992
+ # add a dummy token
993
+ assert self.config.pad_token_id is not None, "The PAD token should be defined for generation"
994
+ attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
995
+ dummy_token = torch.full(
996
+ (effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
997
+ )
998
+ input_ids = torch.cat([input_ids, dummy_token], dim=1)
999
+
1000
+ return {"input_ids": input_ids, "attention_mask": attention_mask}
1001
+
1002
+
1003
+ @add_start_docstrings(
1004
+ """RemBERT Model with a `language modeling` head on top for CLM fine-tuning.""", REMBERT_START_DOCSTRING
1005
+ )
1006
+ class RemBertForCausalLM(RemBertPreTrainedModel, GenerationMixin):
1007
+ _tied_weights_keys = ["cls.predictions.decoder.weight"]
1008
+
1009
+ def __init__(self, config):
1010
+ super().__init__(config)
1011
+
1012
+ if not config.is_decoder:
1013
+ logger.warning("If you want to use `RemBertForCausalLM` as a standalone, add `is_decoder=True.`")
1014
+
1015
+ self.rembert = RemBertModel(config, add_pooling_layer=False)
1016
+ self.cls = RemBertOnlyMLMHead(config)
1017
+
1018
+ # Initialize weights and apply final processing
1019
+ self.post_init()
1020
+
1021
+ def get_output_embeddings(self):
1022
+ return self.cls.predictions.decoder
1023
+
1024
+ def set_output_embeddings(self, new_embeddings):
1025
+ self.cls.predictions.decoder = new_embeddings
1026
+
1027
+ @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1028
+ @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
1029
+ def forward(
1030
+ self,
1031
+ input_ids: torch.LongTensor = None,
1032
+ attention_mask: Optional[torch.LongTensor] = None,
1033
+ token_type_ids: Optional[torch.LongTensor] = None,
1034
+ position_ids: Optional[torch.LongTensor] = None,
1035
+ head_mask: Optional[torch.FloatTensor] = None,
1036
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1037
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
1038
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
1039
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
1040
+ labels: Optional[torch.LongTensor] = None,
1041
+ use_cache: Optional[bool] = None,
1042
+ output_attentions: Optional[bool] = None,
1043
+ output_hidden_states: Optional[bool] = None,
1044
+ return_dict: Optional[bool] = None,
1045
+ **kwargs,
1046
+ ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
1047
+ r"""
1048
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1049
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
1050
+ the model is configured as a decoder.
1051
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
1052
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
1053
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1054
+
1055
+ - 1 for tokens that are **not masked**,
1056
+ - 0 for tokens that are **masked**.
1057
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
1058
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1059
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1060
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1061
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1062
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1063
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
1064
+ `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
1065
+ ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`.
1066
+ use_cache (`bool`, *optional*):
1067
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1068
+ `past_key_values`).
1069
+
1070
+ Returns:
1071
+
1072
+ Example:
1073
+
1074
+ ```python
1075
+ >>> from transformers import AutoTokenizer, RemBertForCausalLM, RemBertConfig
1076
+ >>> import torch
1077
+
1078
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/rembert")
1079
+ >>> config = RemBertConfig.from_pretrained("google/rembert")
1080
+ >>> config.is_decoder = True
1081
+ >>> model = RemBertForCausalLM.from_pretrained("google/rembert", config=config)
1082
+
1083
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
1084
+ >>> outputs = model(**inputs)
1085
+
1086
+ >>> prediction_logits = outputs.logits
1087
+ ```"""
1088
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1089
+
1090
+ outputs = self.rembert(
1091
+ input_ids,
1092
+ attention_mask=attention_mask,
1093
+ token_type_ids=token_type_ids,
1094
+ position_ids=position_ids,
1095
+ head_mask=head_mask,
1096
+ inputs_embeds=inputs_embeds,
1097
+ encoder_hidden_states=encoder_hidden_states,
1098
+ encoder_attention_mask=encoder_attention_mask,
1099
+ past_key_values=past_key_values,
1100
+ use_cache=use_cache,
1101
+ output_attentions=output_attentions,
1102
+ output_hidden_states=output_hidden_states,
1103
+ return_dict=return_dict,
1104
+ )
1105
+
1106
+ sequence_output = outputs[0]
1107
+ prediction_scores = self.cls(sequence_output)
1108
+
1109
+ lm_loss = None
1110
+ if labels is not None:
1111
+ lm_loss = self.loss_function(
1112
+ prediction_scores,
1113
+ labels,
1114
+ vocab_size=self.config.vocab_size,
1115
+ **kwargs,
1116
+ )
1117
+
1118
+ if not return_dict:
1119
+ output = (prediction_scores,) + outputs[2:]
1120
+ return ((lm_loss,) + output) if lm_loss is not None else output
1121
+
1122
+ return CausalLMOutputWithCrossAttentions(
1123
+ loss=lm_loss,
1124
+ logits=prediction_scores,
1125
+ past_key_values=outputs.past_key_values,
1126
+ hidden_states=outputs.hidden_states,
1127
+ attentions=outputs.attentions,
1128
+ cross_attentions=outputs.cross_attentions,
1129
+ )
1130
+
1131
+ def _reorder_cache(self, past_key_values, beam_idx):
1132
+ reordered_past = ()
1133
+ for layer_past in past_key_values:
1134
+ reordered_past += (
1135
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past[:2])
1136
+ + layer_past[2:],
1137
+ )
1138
+ return reordered_past
1139
+
1140
+
1141
+ @add_start_docstrings(
1142
+ """
1143
+ RemBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the
1144
+ pooled output) e.g. for GLUE tasks.
1145
+ """,
1146
+ REMBERT_START_DOCSTRING,
1147
+ )
1148
+ class RemBertForSequenceClassification(RemBertPreTrainedModel):
1149
+ def __init__(self, config):
1150
+ super().__init__(config)
1151
+ self.num_labels = config.num_labels
1152
+ self.rembert = RemBertModel(config)
1153
+ self.dropout = nn.Dropout(config.classifier_dropout_prob)
1154
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1155
+
1156
+ # Initialize weights and apply final processing
1157
+ self.post_init()
1158
+
1159
+ @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1160
+ @add_code_sample_docstrings(
1161
+ checkpoint="google/rembert",
1162
+ output_type=SequenceClassifierOutput,
1163
+ config_class=_CONFIG_FOR_DOC,
1164
+ )
1165
+ def forward(
1166
+ self,
1167
+ input_ids: torch.FloatTensor = None,
1168
+ attention_mask: Optional[torch.FloatTensor] = None,
1169
+ token_type_ids: Optional[torch.LongTensor] = None,
1170
+ position_ids: Optional[torch.FloatTensor] = None,
1171
+ head_mask: Optional[torch.FloatTensor] = None,
1172
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1173
+ labels: Optional[torch.LongTensor] = None,
1174
+ output_attentions: Optional[bool] = None,
1175
+ output_hidden_states: Optional[bool] = None,
1176
+ return_dict: Optional[bool] = None,
1177
+ ) -> Union[Tuple, SequenceClassifierOutput]:
1178
+ r"""
1179
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1180
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1181
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1182
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1183
+ """
1184
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1185
+
1186
+ outputs = self.rembert(
1187
+ input_ids,
1188
+ attention_mask=attention_mask,
1189
+ token_type_ids=token_type_ids,
1190
+ position_ids=position_ids,
1191
+ head_mask=head_mask,
1192
+ inputs_embeds=inputs_embeds,
1193
+ output_attentions=output_attentions,
1194
+ output_hidden_states=output_hidden_states,
1195
+ return_dict=return_dict,
1196
+ )
1197
+
1198
+ pooled_output = outputs[1]
1199
+
1200
+ pooled_output = self.dropout(pooled_output)
1201
+ logits = self.classifier(pooled_output)
1202
+
1203
+ loss = None
1204
+ if labels is not None:
1205
+ if self.config.problem_type is None:
1206
+ if self.num_labels == 1:
1207
+ self.config.problem_type = "regression"
1208
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1209
+ self.config.problem_type = "single_label_classification"
1210
+ else:
1211
+ self.config.problem_type = "multi_label_classification"
1212
+
1213
+ if self.config.problem_type == "regression":
1214
+ loss_fct = MSELoss()
1215
+ if self.num_labels == 1:
1216
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1217
+ else:
1218
+ loss = loss_fct(logits, labels)
1219
+ elif self.config.problem_type == "single_label_classification":
1220
+ loss_fct = CrossEntropyLoss()
1221
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1222
+ elif self.config.problem_type == "multi_label_classification":
1223
+ loss_fct = BCEWithLogitsLoss()
1224
+ loss = loss_fct(logits, labels)
1225
+ if not return_dict:
1226
+ output = (logits,) + outputs[2:]
1227
+ return ((loss,) + output) if loss is not None else output
1228
+
1229
+ return SequenceClassifierOutput(
1230
+ loss=loss,
1231
+ logits=logits,
1232
+ hidden_states=outputs.hidden_states,
1233
+ attentions=outputs.attentions,
1234
+ )
1235
+
1236
+
1237
+ @add_start_docstrings(
1238
+ """
1239
+ RemBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
1240
+ softmax) e.g. for RocStories/SWAG tasks.
1241
+ """,
1242
+ REMBERT_START_DOCSTRING,
1243
+ )
1244
+ class RemBertForMultipleChoice(RemBertPreTrainedModel):
1245
+ def __init__(self, config):
1246
+ super().__init__(config)
1247
+
1248
+ self.rembert = RemBertModel(config)
1249
+ self.dropout = nn.Dropout(config.classifier_dropout_prob)
1250
+ self.classifier = nn.Linear(config.hidden_size, 1)
1251
+
1252
+ # Initialize weights and apply final processing
1253
+ self.post_init()
1254
+
1255
+ @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
1256
+ @add_code_sample_docstrings(
1257
+ checkpoint="google/rembert",
1258
+ output_type=MultipleChoiceModelOutput,
1259
+ config_class=_CONFIG_FOR_DOC,
1260
+ )
1261
+ def forward(
1262
+ self,
1263
+ input_ids: torch.FloatTensor = None,
1264
+ attention_mask: Optional[torch.FloatTensor] = None,
1265
+ token_type_ids: Optional[torch.LongTensor] = None,
1266
+ position_ids: Optional[torch.FloatTensor] = None,
1267
+ head_mask: Optional[torch.FloatTensor] = None,
1268
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1269
+ labels: Optional[torch.LongTensor] = None,
1270
+ output_attentions: Optional[bool] = None,
1271
+ output_hidden_states: Optional[bool] = None,
1272
+ return_dict: Optional[bool] = None,
1273
+ ) -> Union[Tuple, MultipleChoiceModelOutput]:
1274
+ r"""
1275
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1276
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
1277
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
1278
+ `input_ids` above)
1279
+ """
1280
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1281
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
1282
+
1283
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
1284
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
1285
+ token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
1286
+ position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
1287
+ inputs_embeds = (
1288
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
1289
+ if inputs_embeds is not None
1290
+ else None
1291
+ )
1292
+
1293
+ outputs = self.rembert(
1294
+ input_ids,
1295
+ attention_mask=attention_mask,
1296
+ token_type_ids=token_type_ids,
1297
+ position_ids=position_ids,
1298
+ head_mask=head_mask,
1299
+ inputs_embeds=inputs_embeds,
1300
+ output_attentions=output_attentions,
1301
+ output_hidden_states=output_hidden_states,
1302
+ return_dict=return_dict,
1303
+ )
1304
+
1305
+ pooled_output = outputs[1]
1306
+
1307
+ pooled_output = self.dropout(pooled_output)
1308
+ logits = self.classifier(pooled_output)
1309
+ reshaped_logits = logits.view(-1, num_choices)
1310
+
1311
+ loss = None
1312
+ if labels is not None:
1313
+ loss_fct = CrossEntropyLoss()
1314
+ loss = loss_fct(reshaped_logits, labels)
1315
+
1316
+ if not return_dict:
1317
+ output = (reshaped_logits,) + outputs[2:]
1318
+ return ((loss,) + output) if loss is not None else output
1319
+
1320
+ return MultipleChoiceModelOutput(
1321
+ loss=loss,
1322
+ logits=reshaped_logits,
1323
+ hidden_states=outputs.hidden_states,
1324
+ attentions=outputs.attentions,
1325
+ )
1326
+
1327
+
1328
+ @add_start_docstrings(
1329
+ """
1330
+ RemBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1331
+ Named-Entity-Recognition (NER) tasks.
1332
+ """,
1333
+ REMBERT_START_DOCSTRING,
1334
+ )
1335
+ class RemBertForTokenClassification(RemBertPreTrainedModel):
1336
+ def __init__(self, config):
1337
+ super().__init__(config)
1338
+ self.num_labels = config.num_labels
1339
+
1340
+ self.rembert = RemBertModel(config, add_pooling_layer=False)
1341
+ self.dropout = nn.Dropout(config.classifier_dropout_prob)
1342
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1343
+
1344
+ # Initialize weights and apply final processing
1345
+ self.post_init()
1346
+
1347
+ @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1348
+ @add_code_sample_docstrings(
1349
+ checkpoint="google/rembert",
1350
+ output_type=TokenClassifierOutput,
1351
+ config_class=_CONFIG_FOR_DOC,
1352
+ )
1353
+ def forward(
1354
+ self,
1355
+ input_ids: torch.FloatTensor = None,
1356
+ attention_mask: Optional[torch.FloatTensor] = None,
1357
+ token_type_ids: Optional[torch.LongTensor] = None,
1358
+ position_ids: Optional[torch.FloatTensor] = None,
1359
+ head_mask: Optional[torch.FloatTensor] = None,
1360
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1361
+ labels: Optional[torch.LongTensor] = None,
1362
+ output_attentions: Optional[bool] = None,
1363
+ output_hidden_states: Optional[bool] = None,
1364
+ return_dict: Optional[bool] = None,
1365
+ ) -> Union[Tuple, TokenClassifierOutput]:
1366
+ r"""
1367
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1368
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1369
+ """
1370
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1371
+
1372
+ outputs = self.rembert(
1373
+ input_ids,
1374
+ attention_mask=attention_mask,
1375
+ token_type_ids=token_type_ids,
1376
+ position_ids=position_ids,
1377
+ head_mask=head_mask,
1378
+ inputs_embeds=inputs_embeds,
1379
+ output_attentions=output_attentions,
1380
+ output_hidden_states=output_hidden_states,
1381
+ return_dict=return_dict,
1382
+ )
1383
+
1384
+ sequence_output = outputs[0]
1385
+
1386
+ sequence_output = self.dropout(sequence_output)
1387
+ logits = self.classifier(sequence_output)
1388
+
1389
+ loss = None
1390
+ if labels is not None:
1391
+ loss_fct = CrossEntropyLoss()
1392
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1393
+
1394
+ if not return_dict:
1395
+ output = (logits,) + outputs[2:]
1396
+ return ((loss,) + output) if loss is not None else output
1397
+
1398
+ return TokenClassifierOutput(
1399
+ loss=loss,
1400
+ logits=logits,
1401
+ hidden_states=outputs.hidden_states,
1402
+ attentions=outputs.attentions,
1403
+ )
1404
+
1405
+
1406
+ @add_start_docstrings(
1407
+ """
1408
+ RemBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1409
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1410
+ """,
1411
+ REMBERT_START_DOCSTRING,
1412
+ )
1413
+ class RemBertForQuestionAnswering(RemBertPreTrainedModel):
1414
+ def __init__(self, config):
1415
+ super().__init__(config)
1416
+
1417
+ self.num_labels = config.num_labels
1418
+
1419
+ self.rembert = RemBertModel(config, add_pooling_layer=False)
1420
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1421
+
1422
+ # Initialize weights and apply final processing
1423
+ self.post_init()
1424
+
1425
+ @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1426
+ @add_code_sample_docstrings(
1427
+ checkpoint="google/rembert",
1428
+ output_type=QuestionAnsweringModelOutput,
1429
+ config_class=_CONFIG_FOR_DOC,
1430
+ )
1431
+ def forward(
1432
+ self,
1433
+ input_ids: torch.FloatTensor = None,
1434
+ attention_mask: Optional[torch.FloatTensor] = None,
1435
+ token_type_ids: Optional[torch.LongTensor] = None,
1436
+ position_ids: Optional[torch.FloatTensor] = None,
1437
+ head_mask: Optional[torch.FloatTensor] = None,
1438
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1439
+ start_positions: Optional[torch.LongTensor] = None,
1440
+ end_positions: Optional[torch.LongTensor] = None,
1441
+ output_attentions: Optional[bool] = None,
1442
+ output_hidden_states: Optional[bool] = None,
1443
+ return_dict: Optional[bool] = None,
1444
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
1445
+ r"""
1446
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1447
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1448
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1449
+ are not taken into account for computing the loss.
1450
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1451
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1452
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1453
+ are not taken into account for computing the loss.
1454
+ """
1455
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1456
+
1457
+ outputs = self.rembert(
1458
+ input_ids,
1459
+ attention_mask=attention_mask,
1460
+ token_type_ids=token_type_ids,
1461
+ position_ids=position_ids,
1462
+ head_mask=head_mask,
1463
+ inputs_embeds=inputs_embeds,
1464
+ output_attentions=output_attentions,
1465
+ output_hidden_states=output_hidden_states,
1466
+ return_dict=return_dict,
1467
+ )
1468
+
1469
+ sequence_output = outputs[0]
1470
+
1471
+ logits = self.qa_outputs(sequence_output)
1472
+ start_logits, end_logits = logits.split(1, dim=-1)
1473
+ start_logits = start_logits.squeeze(-1)
1474
+ end_logits = end_logits.squeeze(-1)
1475
+
1476
+ total_loss = None
1477
+ if start_positions is not None and end_positions is not None:
1478
+ # If we are on multi-GPU, split add a dimension
1479
+ if len(start_positions.size()) > 1:
1480
+ start_positions = start_positions.squeeze(-1)
1481
+ if len(end_positions.size()) > 1:
1482
+ end_positions = end_positions.squeeze(-1)
1483
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1484
+ ignored_index = start_logits.size(1)
1485
+ start_positions.clamp_(0, ignored_index)
1486
+ end_positions.clamp_(0, ignored_index)
1487
+
1488
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1489
+ start_loss = loss_fct(start_logits, start_positions)
1490
+ end_loss = loss_fct(end_logits, end_positions)
1491
+ total_loss = (start_loss + end_loss) / 2
1492
+
1493
+ if not return_dict:
1494
+ output = (start_logits, end_logits) + outputs[2:]
1495
+ return ((total_loss,) + output) if total_loss is not None else output
1496
+
1497
+ return QuestionAnsweringModelOutput(
1498
+ loss=total_loss,
1499
+ start_logits=start_logits,
1500
+ end_logits=end_logits,
1501
+ hidden_states=outputs.hidden_states,
1502
+ attentions=outputs.attentions,
1503
+ )
1504
+
1505
+
1506
+ __all__ = [
1507
+ "RemBertForCausalLM",
1508
+ "RemBertForMaskedLM",
1509
+ "RemBertForMultipleChoice",
1510
+ "RemBertForQuestionAnswering",
1511
+ "RemBertForSequenceClassification",
1512
+ "RemBertForTokenClassification",
1513
+ "RemBertLayer",
1514
+ "RemBertModel",
1515
+ "RemBertPreTrainedModel",
1516
+ "load_tf_weights_in_rembert",
1517
+ ]
.venv/lib/python3.11/site-packages/transformers/models/rembert/modeling_tf_rembert.py ADDED
@@ -0,0 +1,1721 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Team and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """TF 2.0 RemBERT model."""
16
+
17
+ from __future__ import annotations
18
+
19
+ import math
20
+ from typing import Dict, Optional, Tuple, Union
21
+
22
+ import numpy as np
23
+ import tensorflow as tf
24
+
25
+ from ...activations_tf import get_tf_activation
26
+ from ...modeling_tf_outputs import (
27
+ TFBaseModelOutputWithPastAndCrossAttentions,
28
+ TFBaseModelOutputWithPoolingAndCrossAttentions,
29
+ TFCausalLMOutputWithCrossAttentions,
30
+ TFMaskedLMOutput,
31
+ TFMultipleChoiceModelOutput,
32
+ TFQuestionAnsweringModelOutput,
33
+ TFSequenceClassifierOutput,
34
+ TFTokenClassifierOutput,
35
+ )
36
+ from ...modeling_tf_utils import (
37
+ TFCausalLanguageModelingLoss,
38
+ TFMaskedLanguageModelingLoss,
39
+ TFModelInputType,
40
+ TFMultipleChoiceLoss,
41
+ TFPreTrainedModel,
42
+ TFQuestionAnsweringLoss,
43
+ TFSequenceClassificationLoss,
44
+ TFTokenClassificationLoss,
45
+ get_initializer,
46
+ keras,
47
+ keras_serializable,
48
+ unpack_inputs,
49
+ )
50
+ from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
51
+ from ...utils import (
52
+ add_code_sample_docstrings,
53
+ add_start_docstrings,
54
+ add_start_docstrings_to_model_forward,
55
+ logging,
56
+ )
57
+ from .configuration_rembert import RemBertConfig
58
+
59
+
60
+ logger = logging.get_logger(__name__)
61
+
62
+ _CONFIG_FOR_DOC = "RemBertConfig"
63
+
64
+
65
+ class TFRemBertEmbeddings(keras.layers.Layer):
66
+ """Construct the embeddings from word, position and token_type embeddings."""
67
+
68
+ def __init__(self, config: RemBertConfig, **kwargs):
69
+ super().__init__(**kwargs)
70
+
71
+ self.config = config
72
+ self.input_embedding_size = config.input_embedding_size
73
+ self.max_position_embeddings = config.max_position_embeddings
74
+ self.initializer_range = config.initializer_range
75
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
76
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
77
+
78
+ def build(self, input_shape=None):
79
+ with tf.name_scope("word_embeddings"):
80
+ self.weight = self.add_weight(
81
+ name="weight",
82
+ shape=[self.config.vocab_size, self.input_embedding_size],
83
+ initializer=get_initializer(self.initializer_range),
84
+ )
85
+
86
+ with tf.name_scope("token_type_embeddings"):
87
+ self.token_type_embeddings = self.add_weight(
88
+ name="embeddings",
89
+ shape=[self.config.type_vocab_size, self.input_embedding_size],
90
+ initializer=get_initializer(self.initializer_range),
91
+ )
92
+
93
+ with tf.name_scope("position_embeddings"):
94
+ self.position_embeddings = self.add_weight(
95
+ name="embeddings",
96
+ shape=[self.max_position_embeddings, self.input_embedding_size],
97
+ initializer=get_initializer(self.initializer_range),
98
+ )
99
+
100
+ if self.built:
101
+ return
102
+ self.built = True
103
+ if getattr(self, "LayerNorm", None) is not None:
104
+ with tf.name_scope(self.LayerNorm.name):
105
+ self.LayerNorm.build([None, None, self.config.input_embedding_size])
106
+
107
+ def call(
108
+ self,
109
+ input_ids: tf.Tensor = None,
110
+ position_ids: tf.Tensor = None,
111
+ token_type_ids: tf.Tensor = None,
112
+ inputs_embeds: tf.Tensor = None,
113
+ past_key_values_length=0,
114
+ training: bool = False,
115
+ ) -> tf.Tensor:
116
+ """
117
+ Applies embedding based on inputs tensor.
118
+
119
+ Returns:
120
+ final_embeddings (`tf.Tensor`): output embedding tensor.
121
+ """
122
+ assert not (input_ids is None and inputs_embeds is None)
123
+
124
+ if input_ids is not None:
125
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
126
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
127
+
128
+ input_shape = shape_list(inputs_embeds)[:-1]
129
+
130
+ if token_type_ids is None:
131
+ token_type_ids = tf.fill(dims=input_shape, value=0)
132
+
133
+ if position_ids is None:
134
+ position_ids = tf.expand_dims(
135
+ tf.range(start=past_key_values_length, limit=input_shape[1] + past_key_values_length), axis=0
136
+ )
137
+
138
+ position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
139
+ token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
140
+ final_embeddings = inputs_embeds + position_embeds + token_type_embeds
141
+ final_embeddings = self.LayerNorm(inputs=final_embeddings)
142
+ final_embeddings = self.dropout(inputs=final_embeddings, training=training)
143
+
144
+ return final_embeddings
145
+
146
+
147
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfAttention with Bert->RemBert
148
+ class TFRemBertSelfAttention(keras.layers.Layer):
149
+ def __init__(self, config: RemBertConfig, **kwargs):
150
+ super().__init__(**kwargs)
151
+
152
+ if config.hidden_size % config.num_attention_heads != 0:
153
+ raise ValueError(
154
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number "
155
+ f"of attention heads ({config.num_attention_heads})"
156
+ )
157
+
158
+ self.num_attention_heads = config.num_attention_heads
159
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
160
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
161
+ self.sqrt_att_head_size = math.sqrt(self.attention_head_size)
162
+
163
+ self.query = keras.layers.Dense(
164
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
165
+ )
166
+ self.key = keras.layers.Dense(
167
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key"
168
+ )
169
+ self.value = keras.layers.Dense(
170
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
171
+ )
172
+ self.dropout = keras.layers.Dropout(rate=config.attention_probs_dropout_prob)
173
+
174
+ self.is_decoder = config.is_decoder
175
+ self.config = config
176
+
177
+ def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor:
178
+ # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
179
+ tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size))
180
+
181
+ # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size]
182
+ return tf.transpose(tensor, perm=[0, 2, 1, 3])
183
+
184
+ def call(
185
+ self,
186
+ hidden_states: tf.Tensor,
187
+ attention_mask: tf.Tensor,
188
+ head_mask: tf.Tensor,
189
+ encoder_hidden_states: tf.Tensor,
190
+ encoder_attention_mask: tf.Tensor,
191
+ past_key_value: Tuple[tf.Tensor],
192
+ output_attentions: bool,
193
+ training: bool = False,
194
+ ) -> Tuple[tf.Tensor]:
195
+ batch_size = shape_list(hidden_states)[0]
196
+ mixed_query_layer = self.query(inputs=hidden_states)
197
+
198
+ # If this is instantiated as a cross-attention module, the keys
199
+ # and values come from an encoder; the attention mask needs to be
200
+ # such that the encoder's padding tokens are not attended to.
201
+ is_cross_attention = encoder_hidden_states is not None
202
+
203
+ if is_cross_attention and past_key_value is not None:
204
+ # reuse k,v, cross_attentions
205
+ key_layer = past_key_value[0]
206
+ value_layer = past_key_value[1]
207
+ attention_mask = encoder_attention_mask
208
+ elif is_cross_attention:
209
+ key_layer = self.transpose_for_scores(self.key(inputs=encoder_hidden_states), batch_size)
210
+ value_layer = self.transpose_for_scores(self.value(inputs=encoder_hidden_states), batch_size)
211
+ attention_mask = encoder_attention_mask
212
+ elif past_key_value is not None:
213
+ key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size)
214
+ value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size)
215
+ key_layer = tf.concat([past_key_value[0], key_layer], axis=2)
216
+ value_layer = tf.concat([past_key_value[1], value_layer], axis=2)
217
+ else:
218
+ key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size)
219
+ value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size)
220
+
221
+ query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
222
+
223
+ if self.is_decoder:
224
+ # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
225
+ # Further calls to cross_attention layer can then reuse all cross-attention
226
+ # key/value_states (first "if" case)
227
+ # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
228
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
229
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
230
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
231
+ past_key_value = (key_layer, value_layer)
232
+
233
+ # Take the dot product between "query" and "key" to get the raw attention scores.
234
+ # (batch size, num_heads, seq_len_q, seq_len_k)
235
+ attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
236
+ dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype)
237
+ attention_scores = tf.divide(attention_scores, dk)
238
+
239
+ if attention_mask is not None:
240
+ # Apply the attention mask is (precomputed for all layers in TFRemBertModel call() function)
241
+ attention_scores = tf.add(attention_scores, attention_mask)
242
+
243
+ # Normalize the attention scores to probabilities.
244
+ attention_probs = stable_softmax(logits=attention_scores, axis=-1)
245
+
246
+ # This is actually dropping out entire tokens to attend to, which might
247
+ # seem a bit unusual, but is taken from the original Transformer paper.
248
+ attention_probs = self.dropout(inputs=attention_probs, training=training)
249
+
250
+ # Mask heads if we want to
251
+ if head_mask is not None:
252
+ attention_probs = tf.multiply(attention_probs, head_mask)
253
+
254
+ attention_output = tf.matmul(attention_probs, value_layer)
255
+ attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3])
256
+
257
+ # (batch_size, seq_len_q, all_head_size)
258
+ attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.all_head_size))
259
+ outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
260
+
261
+ if self.is_decoder:
262
+ outputs = outputs + (past_key_value,)
263
+ return outputs
264
+
265
+ def build(self, input_shape=None):
266
+ if self.built:
267
+ return
268
+ self.built = True
269
+ if getattr(self, "query", None) is not None:
270
+ with tf.name_scope(self.query.name):
271
+ self.query.build([None, None, self.config.hidden_size])
272
+ if getattr(self, "key", None) is not None:
273
+ with tf.name_scope(self.key.name):
274
+ self.key.build([None, None, self.config.hidden_size])
275
+ if getattr(self, "value", None) is not None:
276
+ with tf.name_scope(self.value.name):
277
+ self.value.build([None, None, self.config.hidden_size])
278
+
279
+
280
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfOutput with Bert->RemBert
281
+ class TFRemBertSelfOutput(keras.layers.Layer):
282
+ def __init__(self, config: RemBertConfig, **kwargs):
283
+ super().__init__(**kwargs)
284
+
285
+ self.dense = keras.layers.Dense(
286
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
287
+ )
288
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
289
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
290
+ self.config = config
291
+
292
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
293
+ hidden_states = self.dense(inputs=hidden_states)
294
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
295
+ hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
296
+
297
+ return hidden_states
298
+
299
+ def build(self, input_shape=None):
300
+ if self.built:
301
+ return
302
+ self.built = True
303
+ if getattr(self, "dense", None) is not None:
304
+ with tf.name_scope(self.dense.name):
305
+ self.dense.build([None, None, self.config.hidden_size])
306
+ if getattr(self, "LayerNorm", None) is not None:
307
+ with tf.name_scope(self.LayerNorm.name):
308
+ self.LayerNorm.build([None, None, self.config.hidden_size])
309
+
310
+
311
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertAttention with Bert->RemBert
312
+ class TFRemBertAttention(keras.layers.Layer):
313
+ def __init__(self, config: RemBertConfig, **kwargs):
314
+ super().__init__(**kwargs)
315
+
316
+ self.self_attention = TFRemBertSelfAttention(config, name="self")
317
+ self.dense_output = TFRemBertSelfOutput(config, name="output")
318
+
319
+ def prune_heads(self, heads):
320
+ raise NotImplementedError
321
+
322
+ def call(
323
+ self,
324
+ input_tensor: tf.Tensor,
325
+ attention_mask: tf.Tensor,
326
+ head_mask: tf.Tensor,
327
+ encoder_hidden_states: tf.Tensor,
328
+ encoder_attention_mask: tf.Tensor,
329
+ past_key_value: Tuple[tf.Tensor],
330
+ output_attentions: bool,
331
+ training: bool = False,
332
+ ) -> Tuple[tf.Tensor]:
333
+ self_outputs = self.self_attention(
334
+ hidden_states=input_tensor,
335
+ attention_mask=attention_mask,
336
+ head_mask=head_mask,
337
+ encoder_hidden_states=encoder_hidden_states,
338
+ encoder_attention_mask=encoder_attention_mask,
339
+ past_key_value=past_key_value,
340
+ output_attentions=output_attentions,
341
+ training=training,
342
+ )
343
+ attention_output = self.dense_output(
344
+ hidden_states=self_outputs[0], input_tensor=input_tensor, training=training
345
+ )
346
+ # add attentions (possibly with past_key_value) if we output them
347
+ outputs = (attention_output,) + self_outputs[1:]
348
+
349
+ return outputs
350
+
351
+ def build(self, input_shape=None):
352
+ if self.built:
353
+ return
354
+ self.built = True
355
+ if getattr(self, "self_attention", None) is not None:
356
+ with tf.name_scope(self.self_attention.name):
357
+ self.self_attention.build(None)
358
+ if getattr(self, "dense_output", None) is not None:
359
+ with tf.name_scope(self.dense_output.name):
360
+ self.dense_output.build(None)
361
+
362
+
363
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertIntermediate with Bert->RemBert
364
+ class TFRemBertIntermediate(keras.layers.Layer):
365
+ def __init__(self, config: RemBertConfig, **kwargs):
366
+ super().__init__(**kwargs)
367
+
368
+ self.dense = keras.layers.Dense(
369
+ units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
370
+ )
371
+
372
+ if isinstance(config.hidden_act, str):
373
+ self.intermediate_act_fn = get_tf_activation(config.hidden_act)
374
+ else:
375
+ self.intermediate_act_fn = config.hidden_act
376
+ self.config = config
377
+
378
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
379
+ hidden_states = self.dense(inputs=hidden_states)
380
+ hidden_states = self.intermediate_act_fn(hidden_states)
381
+
382
+ return hidden_states
383
+
384
+ def build(self, input_shape=None):
385
+ if self.built:
386
+ return
387
+ self.built = True
388
+ if getattr(self, "dense", None) is not None:
389
+ with tf.name_scope(self.dense.name):
390
+ self.dense.build([None, None, self.config.hidden_size])
391
+
392
+
393
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertOutput with Bert->RemBert
394
+ class TFRemBertOutput(keras.layers.Layer):
395
+ def __init__(self, config: RemBertConfig, **kwargs):
396
+ super().__init__(**kwargs)
397
+
398
+ self.dense = keras.layers.Dense(
399
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
400
+ )
401
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
402
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
403
+ self.config = config
404
+
405
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
406
+ hidden_states = self.dense(inputs=hidden_states)
407
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
408
+ hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
409
+
410
+ return hidden_states
411
+
412
+ def build(self, input_shape=None):
413
+ if self.built:
414
+ return
415
+ self.built = True
416
+ if getattr(self, "dense", None) is not None:
417
+ with tf.name_scope(self.dense.name):
418
+ self.dense.build([None, None, self.config.intermediate_size])
419
+ if getattr(self, "LayerNorm", None) is not None:
420
+ with tf.name_scope(self.LayerNorm.name):
421
+ self.LayerNorm.build([None, None, self.config.hidden_size])
422
+
423
+
424
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertLayer with Bert->RemBert
425
+ class TFRemBertLayer(keras.layers.Layer):
426
+ def __init__(self, config: RemBertConfig, **kwargs):
427
+ super().__init__(**kwargs)
428
+
429
+ self.attention = TFRemBertAttention(config, name="attention")
430
+ self.is_decoder = config.is_decoder
431
+ self.add_cross_attention = config.add_cross_attention
432
+ if self.add_cross_attention:
433
+ if not self.is_decoder:
434
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
435
+ self.crossattention = TFRemBertAttention(config, name="crossattention")
436
+ self.intermediate = TFRemBertIntermediate(config, name="intermediate")
437
+ self.bert_output = TFRemBertOutput(config, name="output")
438
+
439
+ def call(
440
+ self,
441
+ hidden_states: tf.Tensor,
442
+ attention_mask: tf.Tensor,
443
+ head_mask: tf.Tensor,
444
+ encoder_hidden_states: tf.Tensor | None,
445
+ encoder_attention_mask: tf.Tensor | None,
446
+ past_key_value: Tuple[tf.Tensor] | None,
447
+ output_attentions: bool,
448
+ training: bool = False,
449
+ ) -> Tuple[tf.Tensor]:
450
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
451
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
452
+ self_attention_outputs = self.attention(
453
+ input_tensor=hidden_states,
454
+ attention_mask=attention_mask,
455
+ head_mask=head_mask,
456
+ encoder_hidden_states=None,
457
+ encoder_attention_mask=None,
458
+ past_key_value=self_attn_past_key_value,
459
+ output_attentions=output_attentions,
460
+ training=training,
461
+ )
462
+ attention_output = self_attention_outputs[0]
463
+
464
+ # if decoder, the last output is tuple of self-attn cache
465
+ if self.is_decoder:
466
+ outputs = self_attention_outputs[1:-1]
467
+ present_key_value = self_attention_outputs[-1]
468
+ else:
469
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
470
+
471
+ cross_attn_present_key_value = None
472
+ if self.is_decoder and encoder_hidden_states is not None:
473
+ if not hasattr(self, "crossattention"):
474
+ raise ValueError(
475
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
476
+ " by setting `config.add_cross_attention=True`"
477
+ )
478
+
479
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
480
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
481
+ cross_attention_outputs = self.crossattention(
482
+ input_tensor=attention_output,
483
+ attention_mask=attention_mask,
484
+ head_mask=head_mask,
485
+ encoder_hidden_states=encoder_hidden_states,
486
+ encoder_attention_mask=encoder_attention_mask,
487
+ past_key_value=cross_attn_past_key_value,
488
+ output_attentions=output_attentions,
489
+ training=training,
490
+ )
491
+ attention_output = cross_attention_outputs[0]
492
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
493
+
494
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
495
+ cross_attn_present_key_value = cross_attention_outputs[-1]
496
+ present_key_value = present_key_value + cross_attn_present_key_value
497
+
498
+ intermediate_output = self.intermediate(hidden_states=attention_output)
499
+ layer_output = self.bert_output(
500
+ hidden_states=intermediate_output, input_tensor=attention_output, training=training
501
+ )
502
+ outputs = (layer_output,) + outputs # add attentions if we output them
503
+
504
+ # if decoder, return the attn key/values as the last output
505
+ if self.is_decoder:
506
+ outputs = outputs + (present_key_value,)
507
+
508
+ return outputs
509
+
510
+ def build(self, input_shape=None):
511
+ if self.built:
512
+ return
513
+ self.built = True
514
+ if getattr(self, "attention", None) is not None:
515
+ with tf.name_scope(self.attention.name):
516
+ self.attention.build(None)
517
+ if getattr(self, "intermediate", None) is not None:
518
+ with tf.name_scope(self.intermediate.name):
519
+ self.intermediate.build(None)
520
+ if getattr(self, "bert_output", None) is not None:
521
+ with tf.name_scope(self.bert_output.name):
522
+ self.bert_output.build(None)
523
+ if getattr(self, "crossattention", None) is not None:
524
+ with tf.name_scope(self.crossattention.name):
525
+ self.crossattention.build(None)
526
+
527
+
528
+ class TFRemBertEncoder(keras.layers.Layer):
529
+ def __init__(self, config: RemBertConfig, **kwargs):
530
+ super().__init__(**kwargs)
531
+ self.config = config
532
+
533
+ self.embedding_hidden_mapping_in = keras.layers.Dense(
534
+ units=config.hidden_size,
535
+ kernel_initializer=get_initializer(config.initializer_range),
536
+ name="embedding_hidden_mapping_in",
537
+ )
538
+ self.layer = [TFRemBertLayer(config, name="layer_._{}".format(i)) for i in range(config.num_hidden_layers)]
539
+
540
+ def call(
541
+ self,
542
+ hidden_states: tf.Tensor,
543
+ attention_mask: tf.Tensor,
544
+ head_mask: tf.Tensor,
545
+ encoder_hidden_states: tf.Tensor,
546
+ encoder_attention_mask: tf.Tensor,
547
+ past_key_values: Tuple[Tuple[tf.Tensor]],
548
+ use_cache: bool,
549
+ output_attentions: bool,
550
+ output_hidden_states: bool,
551
+ return_dict: bool,
552
+ training: bool = False,
553
+ ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]:
554
+ hidden_states = self.embedding_hidden_mapping_in(inputs=hidden_states)
555
+ all_hidden_states = () if output_hidden_states else None
556
+ all_attentions = () if output_attentions else None
557
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
558
+
559
+ next_decoder_cache = () if use_cache else None
560
+ for i, layer_module in enumerate(self.layer):
561
+ if output_hidden_states:
562
+ all_hidden_states = all_hidden_states + (hidden_states,)
563
+
564
+ past_key_value = past_key_values[i] if past_key_values is not None else None
565
+
566
+ layer_outputs = layer_module(
567
+ hidden_states=hidden_states,
568
+ attention_mask=attention_mask,
569
+ head_mask=head_mask[i],
570
+ encoder_hidden_states=encoder_hidden_states,
571
+ encoder_attention_mask=encoder_attention_mask,
572
+ past_key_value=past_key_value,
573
+ output_attentions=output_attentions,
574
+ training=training,
575
+ )
576
+ hidden_states = layer_outputs[0]
577
+
578
+ if use_cache:
579
+ next_decoder_cache += (layer_outputs[-1],)
580
+
581
+ if output_attentions:
582
+ all_attentions = all_attentions + (layer_outputs[1],)
583
+ if self.config.add_cross_attention and encoder_hidden_states is not None:
584
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
585
+
586
+ # Add last layer
587
+ if output_hidden_states:
588
+ all_hidden_states = all_hidden_states + (hidden_states,)
589
+
590
+ if not return_dict:
591
+ return tuple(
592
+ v for v in [hidden_states, all_hidden_states, all_attentions, all_cross_attentions] if v is not None
593
+ )
594
+
595
+ return TFBaseModelOutputWithPastAndCrossAttentions(
596
+ last_hidden_state=hidden_states,
597
+ past_key_values=next_decoder_cache,
598
+ hidden_states=all_hidden_states,
599
+ attentions=all_attentions,
600
+ cross_attentions=all_cross_attentions,
601
+ )
602
+
603
+ def build(self, input_shape=None):
604
+ if self.built:
605
+ return
606
+ self.built = True
607
+ if getattr(self, "embedding_hidden_mapping_in", None) is not None:
608
+ with tf.name_scope(self.embedding_hidden_mapping_in.name):
609
+ self.embedding_hidden_mapping_in.build([None, None, self.config.input_embedding_size])
610
+ if getattr(self, "layer", None) is not None:
611
+ for layer in self.layer:
612
+ with tf.name_scope(layer.name):
613
+ layer.build(None)
614
+
615
+
616
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertPooler with Bert->RemBert
617
+ class TFRemBertPooler(keras.layers.Layer):
618
+ def __init__(self, config: RemBertConfig, **kwargs):
619
+ super().__init__(**kwargs)
620
+
621
+ self.dense = keras.layers.Dense(
622
+ units=config.hidden_size,
623
+ kernel_initializer=get_initializer(config.initializer_range),
624
+ activation="tanh",
625
+ name="dense",
626
+ )
627
+ self.config = config
628
+
629
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
630
+ # We "pool" the model by simply taking the hidden state corresponding
631
+ # to the first token.
632
+ first_token_tensor = hidden_states[:, 0]
633
+ pooled_output = self.dense(inputs=first_token_tensor)
634
+
635
+ return pooled_output
636
+
637
+ def build(self, input_shape=None):
638
+ if self.built:
639
+ return
640
+ self.built = True
641
+ if getattr(self, "dense", None) is not None:
642
+ with tf.name_scope(self.dense.name):
643
+ self.dense.build([None, None, self.config.hidden_size])
644
+
645
+
646
+ class TFRemBertLMPredictionHead(keras.layers.Layer):
647
+ def __init__(self, config: RemBertConfig, input_embeddings: keras.layers.Layer, **kwargs):
648
+ super().__init__(**kwargs)
649
+
650
+ self.config = config
651
+ self.initializer_range = config.initializer_range
652
+ self.output_embedding_size = config.output_embedding_size
653
+ self.dense = keras.layers.Dense(
654
+ config.output_embedding_size, kernel_initializer=get_initializer(self.initializer_range), name="dense"
655
+ )
656
+ if isinstance(config.hidden_act, str):
657
+ self.activation = get_tf_activation(config.hidden_act)
658
+ else:
659
+ self.activation = config.hidden_act
660
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
661
+
662
+ def build(self, input_shape=None):
663
+ self.decoder = self.add_weight(
664
+ name="decoder/weight",
665
+ shape=[self.config.vocab_size, self.output_embedding_size],
666
+ initializer=get_initializer(self.initializer_range),
667
+ )
668
+ self.decoder_bias = self.add_weight(
669
+ shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="decoder/bias"
670
+ )
671
+
672
+ if self.built:
673
+ return
674
+ self.built = True
675
+ if getattr(self, "dense", None) is not None:
676
+ with tf.name_scope(self.dense.name):
677
+ self.dense.build([None, None, self.config.hidden_size])
678
+ if getattr(self, "LayerNorm", None) is not None:
679
+ with tf.name_scope(self.LayerNorm.name):
680
+ self.LayerNorm.build([None, self.config.output_embedding_size])
681
+
682
+ def get_output_embeddings(self) -> keras.layers.Layer:
683
+ return self
684
+
685
+ def set_output_embeddings(self, value):
686
+ self.decoder = value
687
+ self.decoder.vocab_size = shape_list(value)[0]
688
+
689
+ def get_bias(self) -> Dict[str, tf.Variable]:
690
+ return {"decoder_bias": self.decoder_bias}
691
+
692
+ def set_bias(self, value: tf.Variable):
693
+ self.decoder_bias = value["decoder_bias"]
694
+ self.config.vocab_size = shape_list(value["decoder_bias"])[0]
695
+
696
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
697
+ hidden_states = self.dense(inputs=hidden_states)
698
+ hidden_states = self.activation(hidden_states)
699
+ seq_length = shape_list(tensor=hidden_states)[1]
700
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.output_embedding_size])
701
+ hidden_states = self.LayerNorm(hidden_states)
702
+ hidden_states = tf.matmul(a=hidden_states, b=self.decoder, transpose_b=True)
703
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
704
+ hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.decoder_bias)
705
+ return hidden_states
706
+
707
+
708
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertMLMHead with Bert->RemBert
709
+ class TFRemBertMLMHead(keras.layers.Layer):
710
+ def __init__(self, config: RemBertConfig, input_embeddings: keras.layers.Layer, **kwargs):
711
+ super().__init__(**kwargs)
712
+
713
+ self.predictions = TFRemBertLMPredictionHead(config, input_embeddings, name="predictions")
714
+
715
+ def call(self, sequence_output: tf.Tensor) -> tf.Tensor:
716
+ prediction_scores = self.predictions(hidden_states=sequence_output)
717
+
718
+ return prediction_scores
719
+
720
+ def build(self, input_shape=None):
721
+ if self.built:
722
+ return
723
+ self.built = True
724
+ if getattr(self, "predictions", None) is not None:
725
+ with tf.name_scope(self.predictions.name):
726
+ self.predictions.build(None)
727
+
728
+
729
+ @keras_serializable
730
+ class TFRemBertMainLayer(keras.layers.Layer):
731
+ config_class = RemBertConfig
732
+
733
+ def __init__(self, config: RemBertConfig, add_pooling_layer: bool = True, **kwargs):
734
+ super().__init__(**kwargs)
735
+
736
+ self.config = config
737
+ self.is_decoder = config.is_decoder
738
+
739
+ self.embeddings = TFRemBertEmbeddings(config, name="embeddings")
740
+ self.encoder = TFRemBertEncoder(config, name="encoder")
741
+ self.pooler = TFRemBertPooler(config, name="pooler") if add_pooling_layer else None
742
+
743
+ def get_input_embeddings(self) -> keras.layers.Layer:
744
+ return self.embeddings
745
+
746
+ def set_input_embeddings(self, value: tf.Variable):
747
+ self.embeddings.weight = value
748
+ self.embeddings.vocab_size = shape_list(value)[0]
749
+
750
+ def _prune_heads(self, heads_to_prune):
751
+ """
752
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
753
+ class PreTrainedModel
754
+ """
755
+ raise NotImplementedError
756
+
757
+ @unpack_inputs
758
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer.call
759
+ def call(
760
+ self,
761
+ input_ids: TFModelInputType | None = None,
762
+ attention_mask: np.ndarray | tf.Tensor | None = None,
763
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
764
+ position_ids: np.ndarray | tf.Tensor | None = None,
765
+ head_mask: np.ndarray | tf.Tensor | None = None,
766
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
767
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
768
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
769
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
770
+ use_cache: Optional[bool] = None,
771
+ output_attentions: Optional[bool] = None,
772
+ output_hidden_states: Optional[bool] = None,
773
+ return_dict: Optional[bool] = None,
774
+ training: bool = False,
775
+ ) -> Union[TFBaseModelOutputWithPoolingAndCrossAttentions, Tuple[tf.Tensor]]:
776
+ if not self.config.is_decoder:
777
+ use_cache = False
778
+
779
+ if input_ids is not None and inputs_embeds is not None:
780
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
781
+ elif input_ids is not None:
782
+ input_shape = shape_list(input_ids)
783
+ elif inputs_embeds is not None:
784
+ input_shape = shape_list(inputs_embeds)[:-1]
785
+ else:
786
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
787
+
788
+ batch_size, seq_length = input_shape
789
+
790
+ if past_key_values is None:
791
+ past_key_values_length = 0
792
+ past_key_values = [None] * len(self.encoder.layer)
793
+ else:
794
+ past_key_values_length = shape_list(past_key_values[0][0])[-2]
795
+
796
+ if attention_mask is None:
797
+ attention_mask = tf.fill(dims=(batch_size, seq_length + past_key_values_length), value=1)
798
+
799
+ if token_type_ids is None:
800
+ token_type_ids = tf.fill(dims=input_shape, value=0)
801
+
802
+ embedding_output = self.embeddings(
803
+ input_ids=input_ids,
804
+ position_ids=position_ids,
805
+ token_type_ids=token_type_ids,
806
+ inputs_embeds=inputs_embeds,
807
+ past_key_values_length=past_key_values_length,
808
+ training=training,
809
+ )
810
+
811
+ # We create a 3D attention mask from a 2D tensor mask.
812
+ # Sizes are [batch_size, 1, 1, to_seq_length]
813
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
814
+ # this attention mask is more simple than the triangular masking of causal attention
815
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
816
+ attention_mask_shape = shape_list(attention_mask)
817
+
818
+ mask_seq_length = seq_length + past_key_values_length
819
+ # Copied from `modeling_tf_t5.py`
820
+ # Provided a padding mask of dimensions [batch_size, mask_seq_length]
821
+ # - if the model is a decoder, apply a causal mask in addition to the padding mask
822
+ # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length]
823
+ if self.is_decoder:
824
+ seq_ids = tf.range(mask_seq_length)
825
+ causal_mask = tf.less_equal(
826
+ tf.tile(seq_ids[None, None, :], (batch_size, mask_seq_length, 1)),
827
+ seq_ids[None, :, None],
828
+ )
829
+ causal_mask = tf.cast(causal_mask, dtype=attention_mask.dtype)
830
+ extended_attention_mask = causal_mask * attention_mask[:, None, :]
831
+ attention_mask_shape = shape_list(extended_attention_mask)
832
+ extended_attention_mask = tf.reshape(
833
+ extended_attention_mask, (attention_mask_shape[0], 1, attention_mask_shape[1], attention_mask_shape[2])
834
+ )
835
+ if past_key_values[0] is not None:
836
+ # attention_mask needs to be sliced to the shape `[batch_size, 1, from_seq_length - cached_seq_length, to_seq_length]
837
+ extended_attention_mask = extended_attention_mask[:, :, -seq_length:, :]
838
+ else:
839
+ extended_attention_mask = tf.reshape(
840
+ attention_mask, (attention_mask_shape[0], 1, 1, attention_mask_shape[1])
841
+ )
842
+
843
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
844
+ # masked positions, this operation will create a tensor which is 0.0 for
845
+ # positions we want to attend and -10000.0 for masked positions.
846
+ # Since we are adding it to the raw scores before the softmax, this is
847
+ # effectively the same as removing these entirely.
848
+ extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype)
849
+ one_cst = tf.constant(1.0, dtype=embedding_output.dtype)
850
+ ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype)
851
+ extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst)
852
+
853
+ # Copied from `modeling_tf_t5.py` with -1e9 -> -10000
854
+ if self.is_decoder and encoder_attention_mask is not None:
855
+ # If a 2D ou 3D attention mask is provided for the cross-attention
856
+ # we need to make broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length]
857
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
858
+ encoder_attention_mask = tf.cast(encoder_attention_mask, dtype=extended_attention_mask.dtype)
859
+ num_dims_encoder_attention_mask = len(shape_list(encoder_attention_mask))
860
+ if num_dims_encoder_attention_mask == 3:
861
+ encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
862
+ if num_dims_encoder_attention_mask == 2:
863
+ encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
864
+
865
+ # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
866
+ # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow/transformer/transformer_layers.py#L270
867
+ # encoder_extended_attention_mask = tf.math.equal(encoder_extended_attention_mask,
868
+ # tf.transpose(encoder_extended_attention_mask, perm=(-1, -2)))
869
+
870
+ encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0
871
+ else:
872
+ encoder_extended_attention_mask = None
873
+
874
+ # Prepare head mask if needed
875
+ # 1.0 in head_mask indicate we keep the head
876
+ # attention_probs has shape bsz x n_heads x N x N
877
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
878
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
879
+ if head_mask is not None:
880
+ raise NotImplementedError
881
+ else:
882
+ head_mask = [None] * self.config.num_hidden_layers
883
+
884
+ encoder_outputs = self.encoder(
885
+ hidden_states=embedding_output,
886
+ attention_mask=extended_attention_mask,
887
+ head_mask=head_mask,
888
+ encoder_hidden_states=encoder_hidden_states,
889
+ encoder_attention_mask=encoder_extended_attention_mask,
890
+ past_key_values=past_key_values,
891
+ use_cache=use_cache,
892
+ output_attentions=output_attentions,
893
+ output_hidden_states=output_hidden_states,
894
+ return_dict=return_dict,
895
+ training=training,
896
+ )
897
+
898
+ sequence_output = encoder_outputs[0]
899
+ pooled_output = self.pooler(hidden_states=sequence_output) if self.pooler is not None else None
900
+
901
+ if not return_dict:
902
+ return (
903
+ sequence_output,
904
+ pooled_output,
905
+ ) + encoder_outputs[1:]
906
+
907
+ return TFBaseModelOutputWithPoolingAndCrossAttentions(
908
+ last_hidden_state=sequence_output,
909
+ pooler_output=pooled_output,
910
+ past_key_values=encoder_outputs.past_key_values,
911
+ hidden_states=encoder_outputs.hidden_states,
912
+ attentions=encoder_outputs.attentions,
913
+ cross_attentions=encoder_outputs.cross_attentions,
914
+ )
915
+
916
+ def build(self, input_shape=None):
917
+ if self.built:
918
+ return
919
+ self.built = True
920
+ if getattr(self, "embeddings", None) is not None:
921
+ with tf.name_scope(self.embeddings.name):
922
+ self.embeddings.build(None)
923
+ if getattr(self, "encoder", None) is not None:
924
+ with tf.name_scope(self.encoder.name):
925
+ self.encoder.build(None)
926
+ if getattr(self, "pooler", None) is not None:
927
+ with tf.name_scope(self.pooler.name):
928
+ self.pooler.build(None)
929
+
930
+
931
+ class TFRemBertPreTrainedModel(TFPreTrainedModel):
932
+ """
933
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
934
+ models.
935
+ """
936
+
937
+ config_class = RemBertConfig
938
+ base_model_prefix = "rembert"
939
+
940
+
941
+ REMBERT_START_DOCSTRING = r"""
942
+
943
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
944
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
945
+ etc.)
946
+
947
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
948
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
949
+ behavior.
950
+
951
+ <Tip>
952
+
953
+ TensorFlow models and layers in `transformers` accept two formats as input:
954
+
955
+ - having all inputs as keyword arguments (like PyTorch models), or
956
+ - having all inputs as a list, tuple or dict in the first positional argument.
957
+
958
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
959
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
960
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
961
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
962
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
963
+ positional argument:
964
+
965
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
966
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
967
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
968
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
969
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
970
+
971
+ Note that when creating models and layers with
972
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
973
+ about any of this, as you can just pass inputs like you would to any other Python function!
974
+
975
+ </Tip>
976
+
977
+ Args:
978
+ config ([`RemBertConfig`]): Model configuration class with all the parameters of the model.
979
+ Initializing with a config file does not load the weights associated with the model, only the
980
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
981
+ """
982
+
983
+ REMBERT_INPUTS_DOCSTRING = r"""
984
+ Args:
985
+ input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):
986
+ Indices of input sequence tokens in the vocabulary.
987
+
988
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
989
+ [`PreTrainedTokenizer.encode`] for details.
990
+
991
+ [What are input IDs?](../glossary#input-ids)
992
+ attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
993
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
994
+
995
+ - 1 for tokens that are **not masked**,
996
+ - 0 for tokens that are **masked**.
997
+
998
+ [What are attention masks?](../glossary#attention-mask)
999
+ token_type_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
1000
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1001
+ 1]`:
1002
+
1003
+ - 0 corresponds to a *sentence A* token,
1004
+ - 1 corresponds to a *sentence B* token.
1005
+
1006
+ [What are token type IDs?](../glossary#token-type-ids)
1007
+ position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
1008
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
1009
+ config.max_position_embeddings - 1]`.
1010
+
1011
+ [What are position IDs?](../glossary#position-ids)
1012
+ head_mask (`np.ndarray` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
1013
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
1014
+
1015
+ - 1 indicates the head is **not masked**,
1016
+ - 0 indicates the head is **masked**.
1017
+
1018
+ inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
1019
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1020
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
1021
+ model's internal embedding lookup matrix.
1022
+ output_attentions (`bool`, *optional*):
1023
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1024
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
1025
+ config will be used instead.
1026
+ output_hidden_states (`bool`, *optional*):
1027
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1028
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
1029
+ used instead.
1030
+ return_dict (`bool`, *optional*):
1031
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
1032
+ eager mode, in graph mode the value will always be set to True.
1033
+ training (`bool`, *optional*, defaults to `False``):
1034
+ Whether or not to use the model in training mode (some modules like dropout modules have different
1035
+ behaviors between training and evaluation).
1036
+ """
1037
+
1038
+
1039
+ @add_start_docstrings(
1040
+ "The bare RemBERT Model transformer outputing raw hidden-states without any specific head on top.",
1041
+ REMBERT_START_DOCSTRING,
1042
+ )
1043
+ class TFRemBertModel(TFRemBertPreTrainedModel):
1044
+ def __init__(self, config: RemBertConfig, *inputs, **kwargs):
1045
+ super().__init__(config, *inputs, **kwargs)
1046
+
1047
+ self.rembert = TFRemBertMainLayer(config, name="rembert")
1048
+
1049
+ @unpack_inputs
1050
+ @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1051
+ @add_code_sample_docstrings(
1052
+ checkpoint="google/rembert",
1053
+ output_type=TFBaseModelOutputWithPoolingAndCrossAttentions,
1054
+ config_class=_CONFIG_FOR_DOC,
1055
+ )
1056
+ def call(
1057
+ self,
1058
+ input_ids: TFModelInputType | None = None,
1059
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1060
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1061
+ position_ids: np.ndarray | tf.Tensor | None = None,
1062
+ head_mask: np.ndarray | tf.Tensor | None = None,
1063
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1064
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
1065
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
1066
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
1067
+ use_cache: Optional[bool] = None,
1068
+ output_attentions: Optional[bool] = None,
1069
+ output_hidden_states: Optional[bool] = None,
1070
+ return_dict: Optional[bool] = None,
1071
+ training: Optional[bool] = False,
1072
+ ) -> Union[TFBaseModelOutputWithPoolingAndCrossAttentions, Tuple[tf.Tensor]]:
1073
+ r"""
1074
+ encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1075
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
1076
+ the model is configured as a decoder.
1077
+ encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1078
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
1079
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1080
+
1081
+ - 1 for tokens that are **not masked**,
1082
+ - 0 for tokens that are **masked**.
1083
+
1084
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
1085
+ contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1086
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1087
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1088
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1089
+ use_cache (`bool`, *optional*, defaults to `True`):
1090
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1091
+ `past_key_values`). Set to `False` during training, `True` during generation
1092
+ """
1093
+ outputs = self.rembert(
1094
+ input_ids=input_ids,
1095
+ attention_mask=attention_mask,
1096
+ token_type_ids=token_type_ids,
1097
+ position_ids=position_ids,
1098
+ head_mask=head_mask,
1099
+ inputs_embeds=inputs_embeds,
1100
+ encoder_hidden_states=encoder_hidden_states,
1101
+ encoder_attention_mask=encoder_attention_mask,
1102
+ past_key_values=past_key_values,
1103
+ use_cache=use_cache,
1104
+ output_attentions=output_attentions,
1105
+ output_hidden_states=output_hidden_states,
1106
+ return_dict=return_dict,
1107
+ training=training,
1108
+ )
1109
+
1110
+ return outputs
1111
+
1112
+ def build(self, input_shape=None):
1113
+ if self.built:
1114
+ return
1115
+ self.built = True
1116
+ if getattr(self, "rembert", None) is not None:
1117
+ with tf.name_scope(self.rembert.name):
1118
+ self.rembert.build(None)
1119
+
1120
+
1121
+ @add_start_docstrings("""RemBERT Model with a `language modeling` head on top.""", REMBERT_START_DOCSTRING)
1122
+ class TFRemBertForMaskedLM(TFRemBertPreTrainedModel, TFMaskedLanguageModelingLoss):
1123
+ def __init__(self, config: RemBertConfig, *inputs, **kwargs):
1124
+ super().__init__(config, *inputs, **kwargs)
1125
+
1126
+ if config.is_decoder:
1127
+ logger.warning(
1128
+ "If you want to use `TFRemBertForMaskedLM` make sure `config.is_decoder=False` for "
1129
+ "bi-directional self-attention."
1130
+ )
1131
+
1132
+ self.rembert = TFRemBertMainLayer(config, name="rembert", add_pooling_layer=False)
1133
+ self.mlm = TFRemBertMLMHead(config, input_embeddings=self.rembert.embeddings, name="mlm___cls")
1134
+
1135
+ def get_lm_head(self) -> keras.layers.Layer:
1136
+ return self.mlm.predictions
1137
+
1138
+ @unpack_inputs
1139
+ @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1140
+ @add_code_sample_docstrings(
1141
+ checkpoint="google/rembert",
1142
+ output_type=TFMaskedLMOutput,
1143
+ config_class=_CONFIG_FOR_DOC,
1144
+ )
1145
+ def call(
1146
+ self,
1147
+ input_ids: TFModelInputType | None = None,
1148
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1149
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1150
+ position_ids: np.ndarray | tf.Tensor | None = None,
1151
+ head_mask: np.ndarray | tf.Tensor | None = None,
1152
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1153
+ output_attentions: Optional[bool] = None,
1154
+ output_hidden_states: Optional[bool] = None,
1155
+ return_dict: Optional[bool] = None,
1156
+ labels: np.ndarray | tf.Tensor | None = None,
1157
+ training: Optional[bool] = False,
1158
+ ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
1159
+ r"""
1160
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
1161
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1162
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1163
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1164
+ """
1165
+ outputs = self.rembert(
1166
+ input_ids=input_ids,
1167
+ attention_mask=attention_mask,
1168
+ token_type_ids=token_type_ids,
1169
+ position_ids=position_ids,
1170
+ head_mask=head_mask,
1171
+ inputs_embeds=inputs_embeds,
1172
+ output_attentions=output_attentions,
1173
+ output_hidden_states=output_hidden_states,
1174
+ return_dict=return_dict,
1175
+ training=training,
1176
+ )
1177
+ sequence_output = outputs[0]
1178
+ prediction_scores = self.mlm(sequence_output=sequence_output, training=training)
1179
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=prediction_scores)
1180
+
1181
+ if not return_dict:
1182
+ output = (prediction_scores,) + outputs[2:]
1183
+ return ((loss,) + output) if loss is not None else output
1184
+
1185
+ return TFMaskedLMOutput(
1186
+ loss=loss,
1187
+ logits=prediction_scores,
1188
+ hidden_states=outputs.hidden_states,
1189
+ attentions=outputs.attentions,
1190
+ )
1191
+
1192
+ def build(self, input_shape=None):
1193
+ if self.built:
1194
+ return
1195
+ self.built = True
1196
+ if getattr(self, "rembert", None) is not None:
1197
+ with tf.name_scope(self.rembert.name):
1198
+ self.rembert.build(None)
1199
+ if getattr(self, "mlm", None) is not None:
1200
+ with tf.name_scope(self.mlm.name):
1201
+ self.mlm.build(None)
1202
+
1203
+
1204
+ @add_start_docstrings(
1205
+ """RemBERT Model with a `language modeling` head on top for CLM fine-tuning.""", REMBERT_START_DOCSTRING
1206
+ )
1207
+ class TFRemBertForCausalLM(TFRemBertPreTrainedModel, TFCausalLanguageModelingLoss):
1208
+ def __init__(self, config: RemBertConfig, *inputs, **kwargs):
1209
+ super().__init__(config, *inputs, **kwargs)
1210
+
1211
+ if not config.is_decoder:
1212
+ logger.warning("If you want to use `TFRemBertForCausalLM` as a standalone, add `is_decoder=True.`")
1213
+
1214
+ self.rembert = TFRemBertMainLayer(config, name="rembert", add_pooling_layer=False)
1215
+ self.mlm = TFRemBertMLMHead(config, input_embeddings=self.rembert.embeddings, name="mlm___cls")
1216
+
1217
+ def get_lm_head(self) -> keras.layers.Layer:
1218
+ return self.mlm.predictions
1219
+
1220
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertLMHeadModel.prepare_inputs_for_generation
1221
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs):
1222
+ input_shape = input_ids.shape
1223
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
1224
+ if attention_mask is None:
1225
+ attention_mask = tf.ones(input_shape)
1226
+
1227
+ # cut decoder_input_ids if past is used
1228
+ if past_key_values is not None:
1229
+ input_ids = input_ids[:, -1:]
1230
+
1231
+ return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values}
1232
+
1233
+ @unpack_inputs
1234
+ @add_code_sample_docstrings(
1235
+ checkpoint="google/rembert",
1236
+ output_type=TFCausalLMOutputWithCrossAttentions,
1237
+ config_class=_CONFIG_FOR_DOC,
1238
+ )
1239
+ def call(
1240
+ self,
1241
+ input_ids: TFModelInputType | None = None,
1242
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1243
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1244
+ position_ids: np.ndarray | tf.Tensor | None = None,
1245
+ head_mask: np.ndarray | tf.Tensor | None = None,
1246
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1247
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
1248
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
1249
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
1250
+ use_cache: Optional[bool] = None,
1251
+ output_attentions: Optional[bool] = None,
1252
+ output_hidden_states: Optional[bool] = None,
1253
+ return_dict: Optional[bool] = None,
1254
+ labels: np.ndarray | tf.Tensor | None = None,
1255
+ training: Optional[bool] = False,
1256
+ ) -> Union[TFCausalLMOutputWithCrossAttentions, Tuple[tf.Tensor]]:
1257
+ r"""
1258
+ encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1259
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
1260
+ the model is configured as a decoder.
1261
+ encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1262
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
1263
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1264
+
1265
+ - 1 for tokens that are **not masked**,
1266
+ - 0 for tokens that are **masked**.
1267
+
1268
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
1269
+ contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1270
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1271
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1272
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1273
+ use_cache (`bool`, *optional*, defaults to `True`):
1274
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1275
+ `past_key_values`). Set to `False` during training, `True` during generation
1276
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
1277
+ Labels for computing the cross entropy classification loss. Indices should be in `[0, ...,
1278
+ config.vocab_size - 1]`.
1279
+ """
1280
+ outputs = self.rembert(
1281
+ input_ids=input_ids,
1282
+ attention_mask=attention_mask,
1283
+ token_type_ids=token_type_ids,
1284
+ position_ids=position_ids,
1285
+ head_mask=head_mask,
1286
+ inputs_embeds=inputs_embeds,
1287
+ encoder_hidden_states=encoder_hidden_states,
1288
+ encoder_attention_mask=encoder_attention_mask,
1289
+ past_key_values=past_key_values,
1290
+ use_cache=use_cache,
1291
+ output_attentions=output_attentions,
1292
+ output_hidden_states=output_hidden_states,
1293
+ return_dict=return_dict,
1294
+ training=training,
1295
+ )
1296
+ sequence_output = outputs[0]
1297
+ logits = self.mlm(sequence_output=sequence_output, training=training)
1298
+ loss = None
1299
+
1300
+ if labels is not None:
1301
+ # shift labels to the left and cut last logit token
1302
+ shifted_logits = logits[:, :-1]
1303
+ labels = labels[:, 1:]
1304
+ loss = self.hf_compute_loss(labels=labels, logits=shifted_logits)
1305
+
1306
+ if not return_dict:
1307
+ output = (logits,) + outputs[2:]
1308
+ return ((loss,) + output) if loss is not None else output
1309
+
1310
+ return TFCausalLMOutputWithCrossAttentions(
1311
+ loss=loss,
1312
+ logits=logits,
1313
+ past_key_values=outputs.past_key_values,
1314
+ hidden_states=outputs.hidden_states,
1315
+ attentions=outputs.attentions,
1316
+ cross_attentions=outputs.cross_attentions,
1317
+ )
1318
+
1319
+ def build(self, input_shape=None):
1320
+ if self.built:
1321
+ return
1322
+ self.built = True
1323
+ if getattr(self, "rembert", None) is not None:
1324
+ with tf.name_scope(self.rembert.name):
1325
+ self.rembert.build(None)
1326
+ if getattr(self, "mlm", None) is not None:
1327
+ with tf.name_scope(self.mlm.name):
1328
+ self.mlm.build(None)
1329
+
1330
+
1331
+ @add_start_docstrings(
1332
+ """
1333
+ RemBERT Model transformer with a sequence classification/regression head on top e.g., for GLUE tasks.
1334
+ """,
1335
+ REMBERT_START_DOCSTRING,
1336
+ )
1337
+ class TFRemBertForSequenceClassification(TFRemBertPreTrainedModel, TFSequenceClassificationLoss):
1338
+ def __init__(self, config: RemBertConfig, *inputs, **kwargs):
1339
+ super().__init__(config, *inputs, **kwargs)
1340
+
1341
+ self.num_labels = config.num_labels
1342
+
1343
+ self.rembert = TFRemBertMainLayer(config, name="rembert")
1344
+ self.dropout = keras.layers.Dropout(rate=config.classifier_dropout_prob)
1345
+ self.classifier = keras.layers.Dense(
1346
+ units=config.num_labels,
1347
+ kernel_initializer=get_initializer(config.initializer_range),
1348
+ name="classifier",
1349
+ )
1350
+ self.config = config
1351
+
1352
+ @unpack_inputs
1353
+ @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1354
+ @add_code_sample_docstrings(
1355
+ checkpoint="google/rembert",
1356
+ output_type=TFSequenceClassifierOutput,
1357
+ config_class=_CONFIG_FOR_DOC,
1358
+ )
1359
+ def call(
1360
+ self,
1361
+ input_ids: TFModelInputType | None = None,
1362
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1363
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1364
+ position_ids: np.ndarray | tf.Tensor | None = None,
1365
+ head_mask: np.ndarray | tf.Tensor | None = None,
1366
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1367
+ output_attentions: Optional[bool] = None,
1368
+ output_hidden_states: Optional[bool] = None,
1369
+ return_dict: Optional[bool] = None,
1370
+ labels: np.ndarray | tf.Tensor | None = None,
1371
+ training: Optional[bool] = False,
1372
+ ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
1373
+ r"""
1374
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
1375
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1376
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1377
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1378
+ """
1379
+ outputs = self.rembert(
1380
+ input_ids=input_ids,
1381
+ attention_mask=attention_mask,
1382
+ token_type_ids=token_type_ids,
1383
+ position_ids=position_ids,
1384
+ head_mask=head_mask,
1385
+ inputs_embeds=inputs_embeds,
1386
+ output_attentions=output_attentions,
1387
+ output_hidden_states=output_hidden_states,
1388
+ return_dict=return_dict,
1389
+ training=training,
1390
+ )
1391
+ pooled_output = outputs[1]
1392
+ pooled_output = self.dropout(inputs=pooled_output, training=training)
1393
+ logits = self.classifier(inputs=pooled_output)
1394
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
1395
+
1396
+ if not return_dict:
1397
+ output = (logits,) + outputs[2:]
1398
+ return ((loss,) + output) if loss is not None else output
1399
+
1400
+ return TFSequenceClassifierOutput(
1401
+ loss=loss,
1402
+ logits=logits,
1403
+ hidden_states=outputs.hidden_states,
1404
+ attentions=outputs.attentions,
1405
+ )
1406
+
1407
+ def build(self, input_shape=None):
1408
+ if self.built:
1409
+ return
1410
+ self.built = True
1411
+ if getattr(self, "rembert", None) is not None:
1412
+ with tf.name_scope(self.rembert.name):
1413
+ self.rembert.build(None)
1414
+ if getattr(self, "classifier", None) is not None:
1415
+ with tf.name_scope(self.classifier.name):
1416
+ self.classifier.build([None, None, self.config.hidden_size])
1417
+
1418
+
1419
+ @add_start_docstrings(
1420
+ """
1421
+ RemBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
1422
+ softmax) e.g. for RocStories/SWAG tasks.
1423
+ """,
1424
+ REMBERT_START_DOCSTRING,
1425
+ )
1426
+ class TFRemBertForMultipleChoice(TFRemBertPreTrainedModel, TFMultipleChoiceLoss):
1427
+ def __init__(self, config: RemBertConfig, *inputs, **kwargs):
1428
+ super().__init__(config, *inputs, **kwargs)
1429
+
1430
+ self.rembert = TFRemBertMainLayer(config, name="rembert")
1431
+ self.dropout = keras.layers.Dropout(rate=config.classifier_dropout_prob)
1432
+ self.classifier = keras.layers.Dense(
1433
+ units=1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
1434
+ )
1435
+ self.config = config
1436
+
1437
+ @unpack_inputs
1438
+ @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
1439
+ @add_code_sample_docstrings(
1440
+ checkpoint="google/rembert",
1441
+ output_type=TFMultipleChoiceModelOutput,
1442
+ config_class=_CONFIG_FOR_DOC,
1443
+ )
1444
+ def call(
1445
+ self,
1446
+ input_ids: TFModelInputType | None = None,
1447
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1448
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1449
+ position_ids: np.ndarray | tf.Tensor | None = None,
1450
+ head_mask: np.ndarray | tf.Tensor | None = None,
1451
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1452
+ output_attentions: Optional[bool] = None,
1453
+ output_hidden_states: Optional[bool] = None,
1454
+ return_dict: Optional[bool] = None,
1455
+ labels: np.ndarray | tf.Tensor | None = None,
1456
+ training: Optional[bool] = False,
1457
+ ) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]:
1458
+ r"""
1459
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
1460
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
1461
+ where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)
1462
+ """
1463
+
1464
+ if input_ids is not None:
1465
+ num_choices = shape_list(input_ids)[1]
1466
+ seq_length = shape_list(input_ids)[2]
1467
+ else:
1468
+ num_choices = shape_list(inputs_embeds)[1]
1469
+ seq_length = shape_list(inputs_embeds)[2]
1470
+
1471
+ flat_input_ids = tf.reshape(tensor=input_ids, shape=(-1, seq_length)) if input_ids is not None else None
1472
+ flat_attention_mask = (
1473
+ tf.reshape(tensor=attention_mask, shape=(-1, seq_length)) if attention_mask is not None else None
1474
+ )
1475
+ flat_token_type_ids = (
1476
+ tf.reshape(tensor=token_type_ids, shape=(-1, seq_length)) if token_type_ids is not None else None
1477
+ )
1478
+ flat_position_ids = (
1479
+ tf.reshape(tensor=position_ids, shape=(-1, seq_length)) if position_ids is not None else None
1480
+ )
1481
+ flat_inputs_embeds = (
1482
+ tf.reshape(tensor=inputs_embeds, shape=(-1, seq_length, shape_list(inputs_embeds)[3]))
1483
+ if inputs_embeds is not None
1484
+ else None
1485
+ )
1486
+ outputs = self.rembert(
1487
+ input_ids=flat_input_ids,
1488
+ attention_mask=flat_attention_mask,
1489
+ token_type_ids=flat_token_type_ids,
1490
+ position_ids=flat_position_ids,
1491
+ head_mask=head_mask,
1492
+ inputs_embeds=flat_inputs_embeds,
1493
+ output_attentions=output_attentions,
1494
+ output_hidden_states=output_hidden_states,
1495
+ return_dict=return_dict,
1496
+ training=training,
1497
+ )
1498
+ pooled_output = outputs[1]
1499
+ pooled_output = self.dropout(inputs=pooled_output, training=training)
1500
+ logits = self.classifier(inputs=pooled_output)
1501
+ reshaped_logits = tf.reshape(tensor=logits, shape=(-1, num_choices))
1502
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=reshaped_logits)
1503
+
1504
+ if not return_dict:
1505
+ output = (reshaped_logits,) + outputs[2:]
1506
+ return ((loss,) + output) if loss is not None else output
1507
+
1508
+ return TFMultipleChoiceModelOutput(
1509
+ loss=loss,
1510
+ logits=reshaped_logits,
1511
+ hidden_states=outputs.hidden_states,
1512
+ attentions=outputs.attentions,
1513
+ )
1514
+
1515
+ def build(self, input_shape=None):
1516
+ if self.built:
1517
+ return
1518
+ self.built = True
1519
+ if getattr(self, "rembert", None) is not None:
1520
+ with tf.name_scope(self.rembert.name):
1521
+ self.rembert.build(None)
1522
+ if getattr(self, "classifier", None) is not None:
1523
+ with tf.name_scope(self.classifier.name):
1524
+ self.classifier.build([None, None, self.config.hidden_size])
1525
+
1526
+
1527
+ @add_start_docstrings(
1528
+ """
1529
+ RemBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1530
+ Named-Entity-Recognition (NER) tasks.
1531
+ """,
1532
+ REMBERT_START_DOCSTRING,
1533
+ )
1534
+ class TFRemBertForTokenClassification(TFRemBertPreTrainedModel, TFTokenClassificationLoss):
1535
+ def __init__(self, config: RemBertConfig, *inputs, **kwargs):
1536
+ super().__init__(config, *inputs, **kwargs)
1537
+
1538
+ self.num_labels = config.num_labels
1539
+
1540
+ self.rembert = TFRemBertMainLayer(config, name="rembert", add_pooling_layer=False)
1541
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
1542
+ self.classifier = keras.layers.Dense(
1543
+ units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
1544
+ )
1545
+ self.config = config
1546
+
1547
+ @unpack_inputs
1548
+ @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1549
+ @add_code_sample_docstrings(
1550
+ checkpoint="google/rembert",
1551
+ output_type=TFTokenClassifierOutput,
1552
+ config_class=_CONFIG_FOR_DOC,
1553
+ )
1554
+ def call(
1555
+ self,
1556
+ input_ids: TFModelInputType | None = None,
1557
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1558
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1559
+ position_ids: np.ndarray | tf.Tensor | None = None,
1560
+ head_mask: np.ndarray | tf.Tensor | None = None,
1561
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1562
+ output_attentions: Optional[bool] = None,
1563
+ output_hidden_states: Optional[bool] = None,
1564
+ return_dict: Optional[bool] = None,
1565
+ labels: np.ndarray | tf.Tensor | None = None,
1566
+ training: Optional[bool] = False,
1567
+ ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
1568
+ r"""
1569
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
1570
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1571
+ """
1572
+ outputs = self.rembert(
1573
+ input_ids=input_ids,
1574
+ attention_mask=attention_mask,
1575
+ token_type_ids=token_type_ids,
1576
+ position_ids=position_ids,
1577
+ head_mask=head_mask,
1578
+ inputs_embeds=inputs_embeds,
1579
+ output_attentions=output_attentions,
1580
+ output_hidden_states=output_hidden_states,
1581
+ return_dict=return_dict,
1582
+ training=training,
1583
+ )
1584
+ sequence_output = outputs[0]
1585
+ sequence_output = self.dropout(inputs=sequence_output, training=training)
1586
+ logits = self.classifier(inputs=sequence_output)
1587
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
1588
+
1589
+ if not return_dict:
1590
+ output = (logits,) + outputs[1:]
1591
+ return ((loss,) + output) if loss is not None else output
1592
+
1593
+ return TFTokenClassifierOutput(
1594
+ loss=loss,
1595
+ logits=logits,
1596
+ hidden_states=outputs.hidden_states,
1597
+ attentions=outputs.attentions,
1598
+ )
1599
+
1600
+ def build(self, input_shape=None):
1601
+ if self.built:
1602
+ return
1603
+ self.built = True
1604
+ if getattr(self, "rembert", None) is not None:
1605
+ with tf.name_scope(self.rembert.name):
1606
+ self.rembert.build(None)
1607
+ if getattr(self, "classifier", None) is not None:
1608
+ with tf.name_scope(self.classifier.name):
1609
+ self.classifier.build([None, None, self.config.hidden_size])
1610
+
1611
+
1612
+ @add_start_docstrings(
1613
+ """
1614
+ RemBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1615
+ layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
1616
+ """,
1617
+ REMBERT_START_DOCSTRING,
1618
+ )
1619
+ class TFRemBertForQuestionAnswering(TFRemBertPreTrainedModel, TFQuestionAnsweringLoss):
1620
+ def __init__(self, config: RemBertConfig, *inputs, **kwargs):
1621
+ super().__init__(config, *inputs, **kwargs)
1622
+
1623
+ self.num_labels = config.num_labels
1624
+
1625
+ self.rembert = TFRemBertMainLayer(config, add_pooling_layer=False, name="rembert")
1626
+ self.qa_outputs = keras.layers.Dense(
1627
+ units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
1628
+ )
1629
+ self.config = config
1630
+
1631
+ @unpack_inputs
1632
+ @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1633
+ @add_code_sample_docstrings(
1634
+ checkpoint="google/rembert",
1635
+ output_type=TFQuestionAnsweringModelOutput,
1636
+ config_class=_CONFIG_FOR_DOC,
1637
+ )
1638
+ def call(
1639
+ self,
1640
+ input_ids: TFModelInputType | None = None,
1641
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1642
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1643
+ position_ids: np.ndarray | tf.Tensor | None = None,
1644
+ head_mask: np.ndarray | tf.Tensor | None = None,
1645
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1646
+ output_attentions: Optional[bool] = None,
1647
+ output_hidden_states: Optional[bool] = None,
1648
+ return_dict: Optional[bool] = None,
1649
+ start_positions: np.ndarray | tf.Tensor | None = None,
1650
+ end_positions: np.ndarray | tf.Tensor | None = None,
1651
+ training: Optional[bool] = False,
1652
+ ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
1653
+ r"""
1654
+ start_positions (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
1655
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1656
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1657
+ are not taken into account for computing the loss.
1658
+ end_positions (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
1659
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1660
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1661
+ are not taken into account for computing the loss.
1662
+ """
1663
+ outputs = self.rembert(
1664
+ input_ids=input_ids,
1665
+ attention_mask=attention_mask,
1666
+ token_type_ids=token_type_ids,
1667
+ position_ids=position_ids,
1668
+ head_mask=head_mask,
1669
+ inputs_embeds=inputs_embeds,
1670
+ output_attentions=output_attentions,
1671
+ output_hidden_states=output_hidden_states,
1672
+ return_dict=return_dict,
1673
+ training=training,
1674
+ )
1675
+ sequence_output = outputs[0]
1676
+ logits = self.qa_outputs(inputs=sequence_output)
1677
+ start_logits, end_logits = tf.split(value=logits, num_or_size_splits=2, axis=-1)
1678
+ start_logits = tf.squeeze(input=start_logits, axis=-1)
1679
+ end_logits = tf.squeeze(input=end_logits, axis=-1)
1680
+ loss = None
1681
+
1682
+ if start_positions is not None and end_positions is not None:
1683
+ labels = {"start_position": start_positions}
1684
+ labels["end_position"] = end_positions
1685
+ loss = self.hf_compute_loss(labels=labels, logits=(start_logits, end_logits))
1686
+
1687
+ if not return_dict:
1688
+ output = (start_logits, end_logits) + outputs[2:]
1689
+ return ((loss,) + output) if loss is not None else output
1690
+
1691
+ return TFQuestionAnsweringModelOutput(
1692
+ loss=loss,
1693
+ start_logits=start_logits,
1694
+ end_logits=end_logits,
1695
+ hidden_states=outputs.hidden_states,
1696
+ attentions=outputs.attentions,
1697
+ )
1698
+
1699
+ def build(self, input_shape=None):
1700
+ if self.built:
1701
+ return
1702
+ self.built = True
1703
+ if getattr(self, "rembert", None) is not None:
1704
+ with tf.name_scope(self.rembert.name):
1705
+ self.rembert.build(None)
1706
+ if getattr(self, "qa_outputs", None) is not None:
1707
+ with tf.name_scope(self.qa_outputs.name):
1708
+ self.qa_outputs.build([None, None, self.config.hidden_size])
1709
+
1710
+
1711
+ __all__ = [
1712
+ "TFRemBertForCausalLM",
1713
+ "TFRemBertForMaskedLM",
1714
+ "TFRemBertForMultipleChoice",
1715
+ "TFRemBertForQuestionAnswering",
1716
+ "TFRemBertForSequenceClassification",
1717
+ "TFRemBertForTokenClassification",
1718
+ "TFRemBertLayer",
1719
+ "TFRemBertModel",
1720
+ "TFRemBertPreTrainedModel",
1721
+ ]
.venv/lib/python3.11/site-packages/transformers/models/rembert/tokenization_rembert.py ADDED
@@ -0,0 +1,265 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright The HuggingFace Team and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for RemBERT."""
16
+
17
+ import os
18
+ from shutil import copyfile
19
+ from typing import List, Optional, Tuple
20
+
21
+ import sentencepiece as spm
22
+
23
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer
24
+ from ...utils import logging
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+ VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.model"}
30
+
31
+
32
+ class RemBertTokenizer(PreTrainedTokenizer):
33
+ """
34
+ Construct a RemBERT tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
35
+
36
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
37
+ this superclass for more information regarding those methods.
38
+
39
+ Args:
40
+ vocab_file (`str`):
41
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
42
+ contains the vocabulary necessary to instantiate a tokenizer.
43
+ bos_token (`str`, *optional*, defaults to `"[CLS]"`):
44
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
45
+
46
+ <Tip>
47
+
48
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
49
+ sequence. The token used is the `cls_token`.
50
+
51
+ </Tip>
52
+
53
+ eos_token (`str`, *optional*, defaults to `"[SEP]"`):
54
+ The end of sequence token.
55
+
56
+ <Tip>
57
+
58
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
59
+ The token used is the `sep_token`.
60
+
61
+ </Tip>
62
+
63
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
64
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
65
+ token instead.
66
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
67
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
68
+ sequence classification or for a text and a question for question answering. It is also used as the last
69
+ token of a sequence built with special tokens.
70
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
71
+ The token used for padding, for example when batching sequences of different lengths.
72
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
73
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
74
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
75
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
76
+ The token used for masking values. This is the token used when training this model with masked language
77
+ modeling. This is the token which the model will try to predict.
78
+
79
+ Attributes:
80
+ sp_model (`SentencePieceProcessor`):
81
+ The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
82
+ """
83
+
84
+ vocab_files_names = VOCAB_FILES_NAMES
85
+
86
+ def __init__(
87
+ self,
88
+ vocab_file,
89
+ do_lower_case=False,
90
+ remove_space=True,
91
+ keep_accents=True,
92
+ bos_token="[CLS]",
93
+ eos_token="[SEP]",
94
+ unk_token="[UNK]",
95
+ sep_token="[SEP]",
96
+ pad_token="[PAD]",
97
+ cls_token="[CLS]",
98
+ mask_token="[MASK]",
99
+ **kwargs,
100
+ ):
101
+ # Mask token behave like a normal word, i.e. include the space before it
102
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
103
+
104
+ self.do_lower_case = do_lower_case
105
+ self.remove_space = remove_space
106
+ self.keep_accents = keep_accents
107
+ self.vocab_file = vocab_file
108
+
109
+ self.sp_model = spm.SentencePieceProcessor()
110
+ self.sp_model.Load(vocab_file)
111
+ super().__init__(
112
+ do_lower_case=do_lower_case,
113
+ remove_space=remove_space,
114
+ keep_accents=keep_accents,
115
+ bos_token=bos_token,
116
+ eos_token=eos_token,
117
+ unk_token=unk_token,
118
+ sep_token=sep_token,
119
+ pad_token=pad_token,
120
+ cls_token=cls_token,
121
+ mask_token=mask_token,
122
+ **kwargs,
123
+ )
124
+
125
+ @property
126
+ def vocab_size(self):
127
+ return len(self.sp_model)
128
+
129
+ def get_vocab(self):
130
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
131
+ vocab.update(self.added_tokens_encoder)
132
+ return vocab
133
+
134
+ def __getstate__(self):
135
+ state = self.__dict__.copy()
136
+ state["sp_model"] = None
137
+ return state
138
+
139
+ def __setstate__(self, d):
140
+ self.__dict__ = d
141
+ self.sp_model = spm.SentencePieceProcessor()
142
+ self.sp_model.Load(self.vocab_file)
143
+
144
+ def _tokenize(self, text, sample=False):
145
+ """Tokenize a string."""
146
+ pieces = self.sp_model.EncodeAsPieces(text)
147
+ return pieces
148
+
149
+ def _convert_token_to_id(self, token):
150
+ """Converts a token (str) in an id using the vocab."""
151
+ return self.sp_model.PieceToId(token)
152
+
153
+ def _convert_id_to_token(self, index):
154
+ """Converts an index (integer) in a token (str) using the vocab."""
155
+ return self.sp_model.IdToPiece(index)
156
+
157
+ def convert_tokens_to_string(self, tokens):
158
+ out_string = self.sp_model.decode_pieces(tokens)
159
+ return out_string
160
+
161
+ def build_inputs_with_special_tokens(
162
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
163
+ ) -> List[int]:
164
+ """
165
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
166
+ adding special tokens. A REMBERT sequence has the following format:
167
+
168
+ - single sequence: `[CLS] X [SEP]`
169
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
170
+
171
+ Args:
172
+ token_ids_0 (`List[int]`):
173
+ List of IDs to which the special tokens will be added.
174
+ token_ids_1 (`List[int]`, *optional*):
175
+ Optional second list of IDs for sequence pairs.
176
+
177
+ Returns:
178
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
179
+ """
180
+ sep = [self.sep_token_id]
181
+ cls = [self.cls_token_id]
182
+ if token_ids_1 is None:
183
+ return cls + token_ids_0 + sep
184
+ return cls + token_ids_0 + sep + token_ids_1 + sep
185
+
186
+ def get_special_tokens_mask(
187
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
188
+ ) -> List[int]:
189
+ """
190
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
191
+ special tokens using the tokenizer `prepare_for_model` method.
192
+
193
+ Args:
194
+ token_ids_0 (`List[int]`):
195
+ List of IDs.
196
+ token_ids_1 (`List[int]`, *optional*):
197
+ Optional second list of IDs for sequence pairs.
198
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
199
+ Whether or not the token list is already formatted with special tokens for the model.
200
+
201
+ Returns:
202
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
203
+ """
204
+
205
+ if already_has_special_tokens:
206
+ if token_ids_1 is not None:
207
+ raise ValueError(
208
+ "You should not supply a second sequence if the provided sequence of "
209
+ "ids is already formatted with special tokens for the model."
210
+ )
211
+ return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_0]
212
+
213
+ if token_ids_1 is not None:
214
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
215
+ return [1] + ([0] * len(token_ids_0)) + [1]
216
+
217
+ def create_token_type_ids_from_sequences(
218
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
219
+ ) -> List[int]:
220
+ """
221
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A RemBERT
222
+ sequence pair mask has the following format:
223
+
224
+ ```
225
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
226
+ | first sequence | second sequence |
227
+ ```
228
+
229
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
230
+
231
+ Args:
232
+ token_ids_0 (`List[int]`):
233
+ List of IDs.
234
+ token_ids_1 (`List[int]`, *optional*):
235
+ Optional second list of IDs for sequence pairs.
236
+
237
+ Returns:
238
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
239
+ """
240
+ sep = [self.sep_token_id]
241
+ cls = [self.cls_token_id]
242
+
243
+ if token_ids_1 is None:
244
+ return len(cls + token_ids_0 + sep) * [0]
245
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
246
+
247
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
248
+ if not os.path.isdir(save_directory):
249
+ logger.error("Vocabulary path ({}) should be a directory".format(save_directory))
250
+ return
251
+ out_vocab_file = os.path.join(
252
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
253
+ )
254
+
255
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
256
+ copyfile(self.vocab_file, out_vocab_file)
257
+ elif not os.path.isfile(self.vocab_file):
258
+ with open(out_vocab_file, "wb") as fi:
259
+ content_spiece_model = self.sp_model.serialized_model_proto()
260
+ fi.write(content_spiece_model)
261
+
262
+ return (out_vocab_file,)
263
+
264
+
265
+ __all__ = ["RemBertTokenizer"]
.venv/lib/python3.11/site-packages/transformers/models/rembert/tokenization_rembert_fast.py ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 Google AI, Google Brain and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for RemBERT model."""
16
+
17
+ import os
18
+ from shutil import copyfile
19
+ from typing import List, Optional, Tuple
20
+
21
+ from ...tokenization_utils import AddedToken
22
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
23
+ from ...utils import is_sentencepiece_available, logging
24
+
25
+
26
+ if is_sentencepiece_available():
27
+ from .tokenization_rembert import RemBertTokenizer
28
+ else:
29
+ RemBertTokenizer = None
30
+
31
+ logger = logging.get_logger(__name__)
32
+ VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"}
33
+
34
+
35
+ SPIECE_UNDERLINE = "▁"
36
+
37
+
38
+ class RemBertTokenizerFast(PreTrainedTokenizerFast):
39
+ """
40
+ Construct a "fast" RemBert tokenizer (backed by HuggingFace's *tokenizers* library). Based on
41
+ [Unigram](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models). This
42
+ tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to
43
+ this superclass for more information regarding those methods
44
+
45
+ Args:
46
+ vocab_file (`str`):
47
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
48
+ contains the vocabulary necessary to instantiate a tokenizer.
49
+ do_lower_case (`bool`, *optional*, defaults to `True`):
50
+ Whether or not to lowercase the input when tokenizing.
51
+ remove_space (`bool`, *optional*, defaults to `True`):
52
+ Whether or not to strip the text when tokenizing (removing excess spaces before and after the string).
53
+ keep_accents (`bool`, *optional*, defaults to `False`):
54
+ Whether or not to keep accents when tokenizing.
55
+ bos_token (`str`, *optional*, defaults to `"[CLS]"`):
56
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
57
+
58
+ <Tip>
59
+
60
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
61
+ sequence. The token used is the `cls_token`.
62
+
63
+ </Tip>
64
+
65
+ eos_token (`str`, *optional*, defaults to `"[SEP]"`):
66
+ The end of sequence token. .. note:: When building a sequence using special tokens, this is not the token
67
+ that is used for the end of sequence. The token used is the `sep_token`.
68
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
69
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
70
+ token instead.
71
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
72
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
73
+ sequence classification or for a text and a question for question answering. It is also used as the last
74
+ token of a sequence built with special tokens.
75
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
76
+ The token used for padding, for example when batching sequences of different lengths.
77
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
78
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
79
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
80
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
81
+ The token used for masking values. This is the token used when training this model with masked language
82
+ modeling. This is the token which the model will try to predict.
83
+ """
84
+
85
+ vocab_files_names = VOCAB_FILES_NAMES
86
+ slow_tokenizer_class = RemBertTokenizer
87
+
88
+ def __init__(
89
+ self,
90
+ vocab_file=None,
91
+ tokenizer_file=None,
92
+ do_lower_case=True,
93
+ remove_space=True,
94
+ keep_accents=False,
95
+ bos_token="[CLS]",
96
+ eos_token="[SEP]",
97
+ unk_token="<unk>",
98
+ sep_token="[SEP]",
99
+ pad_token="<pad>",
100
+ cls_token="[CLS]",
101
+ mask_token="[MASK]",
102
+ **kwargs,
103
+ ):
104
+ # Mask token behave like a normal word, i.e. include the space before it
105
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
106
+
107
+ super().__init__(
108
+ vocab_file,
109
+ tokenizer_file=tokenizer_file,
110
+ do_lower_case=do_lower_case,
111
+ remove_space=remove_space,
112
+ keep_accents=keep_accents,
113
+ bos_token=bos_token,
114
+ eos_token=eos_token,
115
+ unk_token=unk_token,
116
+ sep_token=sep_token,
117
+ pad_token=pad_token,
118
+ cls_token=cls_token,
119
+ mask_token=mask_token,
120
+ **kwargs,
121
+ )
122
+
123
+ self.do_lower_case = do_lower_case
124
+ self.remove_space = remove_space
125
+ self.keep_accents = keep_accents
126
+ self.vocab_file = vocab_file
127
+
128
+ @property
129
+ def can_save_slow_tokenizer(self) -> bool:
130
+ return os.path.isfile(self.vocab_file) if self.vocab_file else False
131
+
132
+ def build_inputs_with_special_tokens(
133
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
134
+ ) -> List[int]:
135
+ """
136
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
137
+ adding special tokens. A RemBERT sequence has the following format:
138
+
139
+ - single sequence: `[CLS] X [SEP]`
140
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
141
+
142
+ Args:
143
+ token_ids_0 (`List[int]`):
144
+ List of IDs to which the special tokens will be added
145
+ token_ids_1 (`List[int]`, *optional*, defaults to `None`):
146
+ Optional second list of IDs for sequence pairs.
147
+
148
+ Returns:
149
+ `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.
150
+ """
151
+ sep = [self.sep_token_id]
152
+ cls = [self.cls_token_id]
153
+ if token_ids_1 is None:
154
+ return cls + token_ids_0 + sep
155
+ return cls + token_ids_0 + sep + token_ids_1 + sep
156
+
157
+ def get_special_tokens_mask(
158
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
159
+ ) -> List[int]:
160
+ """
161
+ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
162
+ special tokens using the tokenizer `prepare_for_model` method.
163
+
164
+ Args:
165
+ token_ids_0 (`List[int]`):
166
+ List of ids.
167
+ token_ids_1 (`List[int]`, *optional*, defaults to `None`):
168
+ Optional second list of IDs for sequence pairs.
169
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
170
+ Set to True if the token list is already formatted with special tokens for the model
171
+
172
+ Returns:
173
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
174
+ """
175
+
176
+ if already_has_special_tokens:
177
+ if token_ids_1 is not None:
178
+ raise ValueError(
179
+ "You should not supply a second sequence if the provided sequence of "
180
+ "ids is already formatted with special tokens for the model."
181
+ )
182
+ return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_0]
183
+
184
+ if token_ids_1 is not None:
185
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
186
+ return [1] + ([0] * len(token_ids_0)) + [1]
187
+
188
+ def create_token_type_ids_from_sequences(
189
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
190
+ ) -> List[int]:
191
+ """
192
+ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. A RemBERT
193
+ sequence pair mask has the following format:
194
+
195
+ ```
196
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
197
+ | first sequence | second sequence |
198
+ ```
199
+
200
+ if token_ids_1 is None, only returns the first portion of the mask (0s).
201
+
202
+ Args:
203
+ token_ids_0 (`List[int]`):
204
+ List of ids.
205
+ token_ids_1 (`List[int]`, *optional*, defaults to `None`):
206
+ Optional second list of IDs for sequence pairs.
207
+
208
+ Returns:
209
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
210
+ """
211
+ sep = [self.sep_token_id]
212
+ cls = [self.cls_token_id]
213
+
214
+ if token_ids_1 is None:
215
+ return len(cls + token_ids_0 + sep) * [0]
216
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
217
+
218
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
219
+ if not os.path.isdir(save_directory):
220
+ logger.error("Vocabulary path ({}) should be a directory".format(save_directory))
221
+ return
222
+ out_vocab_file = os.path.join(
223
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
224
+ )
225
+
226
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
227
+ copyfile(self.vocab_file, out_vocab_file)
228
+
229
+ return (out_vocab_file,)
230
+
231
+
232
+ __all__ = ["RemBertTokenizerFast"]
.venv/lib/python3.11/site-packages/transformers/models/wav2vec2/__init__.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import _LazyModule
17
+ from ...utils.import_utils import define_import_structure
18
+
19
+
20
+ if TYPE_CHECKING:
21
+ from .configuration_wav2vec2 import *
22
+ from .feature_extraction_wav2vec2 import *
23
+ from .modeling_flax_wav2vec2 import *
24
+ from .modeling_tf_wav2vec2 import *
25
+ from .modeling_wav2vec2 import *
26
+ from .processing_wav2vec2 import *
27
+ from .tokenization_wav2vec2 import *
28
+ else:
29
+ import sys
30
+
31
+ _file = globals()["__file__"]
32
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
.venv/lib/python3.11/site-packages/transformers/models/wav2vec2/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (991 Bytes). View file
 
.venv/lib/python3.11/site-packages/transformers/models/wav2vec2/__pycache__/configuration_wav2vec2.cpython-311.pyc ADDED
Binary file (19.3 kB). View file
 
.venv/lib/python3.11/site-packages/transformers/models/wav2vec2/__pycache__/feature_extraction_wav2vec2.cpython-311.pyc ADDED
Binary file (13.4 kB). View file
 
.venv/lib/python3.11/site-packages/transformers/models/wav2vec2/__pycache__/modeling_flax_wav2vec2.cpython-311.pyc ADDED
Binary file (71.3 kB). View file
 
.venv/lib/python3.11/site-packages/transformers/models/wav2vec2/__pycache__/processing_wav2vec2.cpython-311.pyc ADDED
Binary file (8.6 kB). View file
 
.venv/lib/python3.11/site-packages/transformers/models/wav2vec2/__pycache__/tokenization_wav2vec2.cpython-311.pyc ADDED
Binary file (45.4 kB). View file
 
.venv/lib/python3.11/site-packages/transformers/models/wav2vec2/configuration_wav2vec2.py ADDED
@@ -0,0 +1,347 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Wav2Vec2 model configuration"""
16
+
17
+ import functools
18
+ import operator
19
+
20
+ from ...configuration_utils import PretrainedConfig
21
+ from ...utils import logging
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+
27
+ class Wav2Vec2Config(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`Wav2Vec2Model`]. It is used to instantiate an
30
+ Wav2Vec2 model according to the specified arguments, defining the model architecture. Instantiating a configuration
31
+ with the defaults will yield a similar configuration to that of the Wav2Vec2
32
+ [facebook/wav2vec2-base-960h](https://huggingface.co/facebook/wav2vec2-base-960h) architecture.
33
+
34
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
35
+ documentation from [`PretrainedConfig`] for more information.
36
+
37
+
38
+ Args:
39
+ vocab_size (`int`, *optional*, defaults to 32):
40
+ Vocabulary size of the Wav2Vec2 model. Defines the number of different tokens that can be represented by
41
+ the `inputs_ids` passed when calling [`Wav2Vec2Model`] or [`TFWav2Vec2Model`]. Vocabulary size of the
42
+ model. Defines the different tokens that can be represented by the *inputs_ids* passed to the forward
43
+ method of [`Wav2Vec2Model`].
44
+ hidden_size (`int`, *optional*, defaults to 768):
45
+ Dimensionality of the encoder layers and the pooler layer.
46
+ num_hidden_layers (`int`, *optional*, defaults to 12):
47
+ Number of hidden layers in the Transformer encoder.
48
+ num_attention_heads (`int`, *optional*, defaults to 12):
49
+ Number of attention heads for each attention layer in the Transformer encoder.
50
+ intermediate_size (`int`, *optional*, defaults to 3072):
51
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
52
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
53
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
54
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
55
+ hidden_dropout (`float`, *optional*, defaults to 0.1):
56
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
57
+ activation_dropout (`float`, *optional*, defaults to 0.1):
58
+ The dropout ratio for activations inside the fully connected layer.
59
+ attention_dropout (`float`, *optional*, defaults to 0.1):
60
+ The dropout ratio for the attention probabilities.
61
+ final_dropout (`float`, *optional*, defaults to 0.1):
62
+ The dropout probability for the final projection layer of [`Wav2Vec2ForCTC`].
63
+ layerdrop (`float`, *optional*, defaults to 0.1):
64
+ The LayerDrop probability. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more
65
+ details.
66
+ initializer_range (`float`, *optional*, defaults to 0.02):
67
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
68
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
69
+ The epsilon used by the layer normalization layers.
70
+ feat_extract_norm (`str`, *optional*, defaults to `"group"`):
71
+ The norm to be applied to 1D convolutional layers in feature encoder. One of `"group"` for group
72
+ normalization of only the first 1D convolutional layer or `"layer"` for layer normalization of all 1D
73
+ convolutional layers.
74
+ feat_proj_dropout (`float`, *optional*, defaults to 0.0):
75
+ The dropout probability for output of the feature encoder.
76
+ feat_extract_activation (`str, `optional`, defaults to `"gelu"`):
77
+ The non-linear activation function (function or string) in the 1D convolutional layers of the feature
78
+ extractor. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported.
79
+ feat_quantizer_dropout (`float`, *optional*, defaults to 0.0):
80
+ The dropout probability for quantized feature encoder states.
81
+ conv_dim (`Tuple[int]` or `List[int]`, *optional*, defaults to `(512, 512, 512, 512, 512, 512, 512)`):
82
+ A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the
83
+ feature encoder. The length of *conv_dim* defines the number of 1D convolutional layers.
84
+ conv_stride (`Tuple[int]` or `List[int]`, *optional*, defaults to `(5, 2, 2, 2, 2, 2, 2)`):
85
+ A tuple of integers defining the stride of each 1D convolutional layer in the feature encoder. The length
86
+ of *conv_stride* defines the number of convolutional layers and has to match the length of *conv_dim*.
87
+ conv_kernel (`Tuple[int]` or `List[int]`, *optional*, defaults to `(10, 3, 3, 3, 3, 3, 3)`):
88
+ A tuple of integers defining the kernel size of each 1D convolutional layer in the feature encoder. The
89
+ length of *conv_kernel* defines the number of convolutional layers and has to match the length of
90
+ *conv_dim*.
91
+ conv_bias (`bool`, *optional*, defaults to `False`):
92
+ Whether the 1D convolutional layers have a bias.
93
+ num_conv_pos_embeddings (`int`, *optional*, defaults to 128):
94
+ Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional
95
+ embeddings layer.
96
+ num_conv_pos_embedding_groups (`int`, *optional*, defaults to 16):
97
+ Number of groups of 1D convolutional positional embeddings layer.
98
+ do_stable_layer_norm (`bool`, *optional*, defaults to `False`):
99
+ Whether to apply *stable* layer norm architecture of the Transformer encoder. `do_stable_layer_norm is
100
+ True` corresponds to applying layer norm before the attention layer, whereas `do_stable_layer_norm is
101
+ False` corresponds to applying layer norm after the attention layer.
102
+ apply_spec_augment (`bool`, *optional*, defaults to `True`):
103
+ Whether to apply *SpecAugment* data augmentation to the outputs of the feature encoder. For reference see
104
+ [SpecAugment: A Simple Data Augmentation Method for Automatic Speech
105
+ Recognition](https://arxiv.org/abs/1904.08779).
106
+ mask_time_prob (`float`, *optional*, defaults to 0.05):
107
+ Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking
108
+ procecure generates ''mask_time_prob*len(time_axis)/mask_time_length'' independent masks over the axis. If
109
+ reasoning from the propability of each feature vector to be chosen as the start of the vector span to be
110
+ masked, *mask_time_prob* should be `prob_vector_start*mask_time_length`. Note that overlap may decrease the
111
+ actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`.
112
+ mask_time_length (`int`, *optional*, defaults to 10):
113
+ Length of vector span along the time axis.
114
+ mask_time_min_masks (`int`, *optional*, defaults to 2),:
115
+ The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step,
116
+ irrespectively of `mask_feature_prob`. Only relevant if ''mask_time_prob*len(time_axis)/mask_time_length <
117
+ mask_time_min_masks''
118
+ mask_feature_prob (`float`, *optional*, defaults to 0.0):
119
+ Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The
120
+ masking procecure generates ''mask_feature_prob*len(feature_axis)/mask_time_length'' independent masks over
121
+ the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector
122
+ span to be masked, *mask_feature_prob* should be `prob_vector_start*mask_feature_length`. Note that overlap
123
+ may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is
124
+ True`.
125
+ mask_feature_length (`int`, *optional*, defaults to 10):
126
+ Length of vector span along the feature axis.
127
+ mask_feature_min_masks (`int`, *optional*, defaults to 0),:
128
+ The minimum number of masks of length `mask_feature_length` generated along the feature axis, each time
129
+ step, irrespectively of `mask_feature_prob`. Only relevant if
130
+ ''mask_feature_prob*len(feature_axis)/mask_feature_length < mask_feature_min_masks''
131
+ num_codevectors_per_group (`int`, *optional*, defaults to 320):
132
+ Number of entries in each quantization codebook (group).
133
+ num_codevector_groups (`int`, *optional*, defaults to 2):
134
+ Number of codevector groups for product codevector quantization.
135
+ contrastive_logits_temperature (`float`, *optional*, defaults to 0.1):
136
+ The temperature *kappa* in the contrastive loss.
137
+ feat_quantizer_dropout (`float`, *optional*, defaults to 0.0):
138
+ The dropout probability for the output of the feature encoder that's used by the quantizer.
139
+ num_negatives (`int`, *optional*, defaults to 100):
140
+ Number of negative samples for the contrastive loss.
141
+ codevector_dim (`int`, *optional*, defaults to 256):
142
+ Dimensionality of the quantized feature vectors.
143
+ proj_codevector_dim (`int`, *optional*, defaults to 256):
144
+ Dimensionality of the final projection of both the quantized and the transformer features.
145
+ diversity_loss_weight (`int`, *optional*, defaults to 0.1):
146
+ The weight of the codebook diversity loss component.
147
+ ctc_loss_reduction (`str`, *optional*, defaults to `"sum"`):
148
+ Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an
149
+ instance of [`Wav2Vec2ForCTC`].
150
+ ctc_zero_infinity (`bool`, *optional*, defaults to `False`):
151
+ Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly
152
+ occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance
153
+ of [`Wav2Vec2ForCTC`].
154
+ use_weighted_layer_sum (`bool`, *optional*, defaults to `False`):
155
+ Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an
156
+ instance of [`Wav2Vec2ForSequenceClassification`].
157
+ classifier_proj_size (`int`, *optional*, defaults to 256):
158
+ Dimensionality of the projection before token mean-pooling for classification.
159
+ tdnn_dim (`Tuple[int]` or `List[int]`, *optional*, defaults to `(512, 512, 512, 512, 1500)`):
160
+ A tuple of integers defining the number of output channels of each 1D convolutional layer in the *TDNN*
161
+ module of the *XVector* model. The length of *tdnn_dim* defines the number of *TDNN* layers.
162
+ tdnn_kernel (`Tuple[int]` or `List[int]`, *optional*, defaults to `(5, 3, 3, 1, 1)`):
163
+ A tuple of integers defining the kernel size of each 1D convolutional layer in the *TDNN* module of the
164
+ *XVector* model. The length of *tdnn_kernel* has to match the length of *tdnn_dim*.
165
+ tdnn_dilation (`Tuple[int]` or `List[int]`, *optional*, defaults to `(1, 2, 3, 1, 1)`):
166
+ A tuple of integers defining the dilation factor of each 1D convolutional layer in *TDNN* module of the
167
+ *XVector* model. The length of *tdnn_dilation* has to match the length of *tdnn_dim*.
168
+ xvector_output_dim (`int`, *optional*, defaults to 512):
169
+ Dimensionality of the *XVector* embedding vectors.
170
+ add_adapter (`bool`, *optional*, defaults to `False`):
171
+ Whether a convolutional network should be stacked on top of the Wav2Vec2 Encoder. Can be very useful for
172
+ warm-starting Wav2Vec2 for SpeechEncoderDecoder models.
173
+ adapter_kernel_size (`int`, *optional*, defaults to 3):
174
+ Kernel size of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`.
175
+ adapter_stride (`int`, *optional*, defaults to 2):
176
+ Stride of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`.
177
+ num_adapter_layers (`int`, *optional*, defaults to 3):
178
+ Number of convolutional layers that should be used in the adapter network. Only relevant if `add_adapter is
179
+ True`.
180
+ adapter_attn_dim (`int`, *optional*):
181
+ Dimension of the attention adapter weights to be used in each attention block. An example of a model using
182
+ attention adapters is [facebook/mms-1b-all](https://huggingface.co/facebook/mms-1b-all).
183
+ output_hidden_size (`int`, *optional*):
184
+ Dimensionality of the encoder output layer. If not defined, this defaults to *hidden-size*. Only relevant
185
+ if `add_adapter is True`.
186
+
187
+ Example:
188
+
189
+ ```python
190
+ >>> from transformers import Wav2Vec2Config, Wav2Vec2Model
191
+
192
+ >>> # Initializing a Wav2Vec2 facebook/wav2vec2-base-960h style configuration
193
+ >>> configuration = Wav2Vec2Config()
194
+
195
+ >>> # Initializing a model (with random weights) from the facebook/wav2vec2-base-960h style configuration
196
+ >>> model = Wav2Vec2Model(configuration)
197
+
198
+ >>> # Accessing the model configuration
199
+ >>> configuration = model.config
200
+ ```"""
201
+
202
+ model_type = "wav2vec2"
203
+
204
+ def __init__(
205
+ self,
206
+ vocab_size=32,
207
+ hidden_size=768,
208
+ num_hidden_layers=12,
209
+ num_attention_heads=12,
210
+ intermediate_size=3072,
211
+ hidden_act="gelu",
212
+ hidden_dropout=0.1,
213
+ activation_dropout=0.1,
214
+ attention_dropout=0.1,
215
+ feat_proj_dropout=0.0,
216
+ feat_quantizer_dropout=0.0,
217
+ final_dropout=0.1,
218
+ layerdrop=0.1,
219
+ initializer_range=0.02,
220
+ layer_norm_eps=1e-5,
221
+ feat_extract_norm="group",
222
+ feat_extract_activation="gelu",
223
+ conv_dim=(512, 512, 512, 512, 512, 512, 512),
224
+ conv_stride=(5, 2, 2, 2, 2, 2, 2),
225
+ conv_kernel=(10, 3, 3, 3, 3, 2, 2),
226
+ conv_bias=False,
227
+ num_conv_pos_embeddings=128,
228
+ num_conv_pos_embedding_groups=16,
229
+ do_stable_layer_norm=False,
230
+ apply_spec_augment=True,
231
+ mask_time_prob=0.05,
232
+ mask_time_length=10,
233
+ mask_time_min_masks=2,
234
+ mask_feature_prob=0.0,
235
+ mask_feature_length=10,
236
+ mask_feature_min_masks=0,
237
+ num_codevectors_per_group=320,
238
+ num_codevector_groups=2,
239
+ contrastive_logits_temperature=0.1,
240
+ num_negatives=100,
241
+ codevector_dim=256,
242
+ proj_codevector_dim=256,
243
+ diversity_loss_weight=0.1,
244
+ ctc_loss_reduction="sum",
245
+ ctc_zero_infinity=False,
246
+ use_weighted_layer_sum=False,
247
+ classifier_proj_size=256,
248
+ tdnn_dim=(512, 512, 512, 512, 1500),
249
+ tdnn_kernel=(5, 3, 3, 1, 1),
250
+ tdnn_dilation=(1, 2, 3, 1, 1),
251
+ xvector_output_dim=512,
252
+ pad_token_id=0,
253
+ bos_token_id=1,
254
+ eos_token_id=2,
255
+ add_adapter=False,
256
+ adapter_kernel_size=3,
257
+ adapter_stride=2,
258
+ num_adapter_layers=3,
259
+ output_hidden_size=None,
260
+ adapter_attn_dim=None,
261
+ **kwargs,
262
+ ):
263
+ super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id)
264
+ self.hidden_size = hidden_size
265
+ self.feat_extract_norm = feat_extract_norm
266
+ self.feat_extract_activation = feat_extract_activation
267
+ self.conv_dim = list(conv_dim)
268
+ self.conv_stride = list(conv_stride)
269
+ self.conv_kernel = list(conv_kernel)
270
+ self.conv_bias = conv_bias
271
+ self.num_conv_pos_embeddings = num_conv_pos_embeddings
272
+ self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups
273
+ self.num_feat_extract_layers = len(self.conv_dim)
274
+ self.num_hidden_layers = num_hidden_layers
275
+ self.intermediate_size = intermediate_size
276
+ self.hidden_act = hidden_act
277
+ self.num_attention_heads = num_attention_heads
278
+ self.hidden_dropout = hidden_dropout
279
+ self.attention_dropout = attention_dropout
280
+ self.activation_dropout = activation_dropout
281
+ self.feat_proj_dropout = feat_proj_dropout
282
+ self.final_dropout = final_dropout
283
+ self.layerdrop = layerdrop
284
+ self.layer_norm_eps = layer_norm_eps
285
+ self.initializer_range = initializer_range
286
+ self.vocab_size = vocab_size
287
+ self.do_stable_layer_norm = do_stable_layer_norm
288
+ self.use_weighted_layer_sum = use_weighted_layer_sum
289
+
290
+ if (
291
+ (len(self.conv_stride) != self.num_feat_extract_layers)
292
+ or (len(self.conv_kernel) != self.num_feat_extract_layers)
293
+ or (len(self.conv_dim) != self.num_feat_extract_layers)
294
+ ):
295
+ raise ValueError(
296
+ "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
297
+ " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
298
+ f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"
299
+ f" `len(config.conv_kernel) = {len(self.conv_kernel)}`."
300
+ )
301
+
302
+ # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
303
+ self.apply_spec_augment = apply_spec_augment
304
+ self.mask_time_prob = mask_time_prob
305
+ self.mask_time_length = mask_time_length
306
+ self.mask_time_min_masks = mask_time_min_masks
307
+ self.mask_feature_prob = mask_feature_prob
308
+ self.mask_feature_length = mask_feature_length
309
+ self.mask_feature_min_masks = mask_feature_min_masks
310
+
311
+ # parameters for pretraining with codevector quantized representations
312
+ self.num_codevectors_per_group = num_codevectors_per_group
313
+ self.num_codevector_groups = num_codevector_groups
314
+ self.contrastive_logits_temperature = contrastive_logits_temperature
315
+ self.feat_quantizer_dropout = feat_quantizer_dropout
316
+ self.num_negatives = num_negatives
317
+ self.codevector_dim = codevector_dim
318
+ self.proj_codevector_dim = proj_codevector_dim
319
+ self.diversity_loss_weight = diversity_loss_weight
320
+
321
+ # ctc loss
322
+ self.ctc_loss_reduction = ctc_loss_reduction
323
+ self.ctc_zero_infinity = ctc_zero_infinity
324
+
325
+ # adapter
326
+ self.add_adapter = add_adapter
327
+ self.adapter_kernel_size = adapter_kernel_size
328
+ self.adapter_stride = adapter_stride
329
+ self.num_adapter_layers = num_adapter_layers
330
+ self.output_hidden_size = output_hidden_size or hidden_size
331
+ self.adapter_attn_dim = adapter_attn_dim
332
+
333
+ # SequenceClassification-specific parameter. Feel free to ignore for other classes.
334
+ self.classifier_proj_size = classifier_proj_size
335
+
336
+ # XVector-specific parameters. Feel free to ignore for other classes.
337
+ self.tdnn_dim = list(tdnn_dim)
338
+ self.tdnn_kernel = list(tdnn_kernel)
339
+ self.tdnn_dilation = list(tdnn_dilation)
340
+ self.xvector_output_dim = xvector_output_dim
341
+
342
+ @property
343
+ def inputs_to_logits_ratio(self):
344
+ return functools.reduce(operator.mul, self.conv_stride, 1)
345
+
346
+
347
+ __all__ = ["Wav2Vec2Config"]
.venv/lib/python3.11/site-packages/transformers/models/wav2vec2/feature_extraction_wav2vec2.py ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Feature extractor class for Wav2Vec2
17
+ """
18
+
19
+ from typing import List, Optional, Union
20
+
21
+ import numpy as np
22
+
23
+ from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
24
+ from ...feature_extraction_utils import BatchFeature
25
+ from ...utils import PaddingStrategy, TensorType, logging
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+
31
+ class Wav2Vec2FeatureExtractor(SequenceFeatureExtractor):
32
+ r"""
33
+ Constructs a Wav2Vec2 feature extractor.
34
+
35
+ This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains
36
+ most of the main methods. Users should refer to this superclass for more information regarding those methods.
37
+
38
+ Args:
39
+ feature_size (`int`, *optional*, defaults to 1):
40
+ The feature dimension of the extracted features.
41
+ sampling_rate (`int`, *optional*, defaults to 16000):
42
+ The sampling rate at which the audio files should be digitalized expressed in hertz (Hz).
43
+ padding_value (`float`, *optional*, defaults to 0.0):
44
+ The value that is used to fill the padding values.
45
+ do_normalize (`bool`, *optional*, defaults to `True`):
46
+ Whether or not to zero-mean unit-variance normalize the input. Normalizing can help to significantly
47
+ improve the performance for some models, *e.g.*,
48
+ [wav2vec2-lv60](https://huggingface.co/models?search=lv60).
49
+ return_attention_mask (`bool`, *optional*, defaults to `False`):
50
+ Whether or not [`~Wav2Vec2FeatureExtractor.__call__`] should return `attention_mask`.
51
+
52
+ <Tip>
53
+
54
+ Wav2Vec2 models that have set `config.feat_extract_norm == "group"`, such as
55
+ [wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base-960h), have **not** been trained using
56
+ `attention_mask`. For such models, `input_values` should simply be padded with 0 and no `attention_mask`
57
+ should be passed.
58
+
59
+ For Wav2Vec2 models that have set `config.feat_extract_norm == "layer"`, such as
60
+ [wav2vec2-lv60](https://huggingface.co/facebook/wav2vec2-large-960h-lv60-self), `attention_mask` should be
61
+ passed for batched inference.
62
+
63
+ </Tip>"""
64
+
65
+ model_input_names = ["input_values", "attention_mask"]
66
+
67
+ def __init__(
68
+ self,
69
+ feature_size=1,
70
+ sampling_rate=16000,
71
+ padding_value=0.0,
72
+ return_attention_mask=False,
73
+ do_normalize=True,
74
+ **kwargs,
75
+ ):
76
+ super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs)
77
+ self.return_attention_mask = return_attention_mask
78
+ self.do_normalize = do_normalize
79
+
80
+ @staticmethod
81
+ def zero_mean_unit_var_norm(
82
+ input_values: List[np.ndarray], attention_mask: List[np.ndarray], padding_value: float = 0.0
83
+ ) -> List[np.ndarray]:
84
+ """
85
+ Every array in the list is normalized to have zero mean and unit variance
86
+ """
87
+ if attention_mask is not None:
88
+ attention_mask = np.array(attention_mask, np.int32)
89
+ normed_input_values = []
90
+
91
+ for vector, length in zip(input_values, attention_mask.sum(-1)):
92
+ normed_slice = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7)
93
+ if length < normed_slice.shape[0]:
94
+ normed_slice[length:] = padding_value
95
+
96
+ normed_input_values.append(normed_slice)
97
+ else:
98
+ normed_input_values = [(x - x.mean()) / np.sqrt(x.var() + 1e-7) for x in input_values]
99
+
100
+ return normed_input_values
101
+
102
+ def __call__(
103
+ self,
104
+ raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],
105
+ padding: Union[bool, str, PaddingStrategy] = False,
106
+ max_length: Optional[int] = None,
107
+ truncation: bool = False,
108
+ pad_to_multiple_of: Optional[int] = None,
109
+ return_attention_mask: Optional[bool] = None,
110
+ return_tensors: Optional[Union[str, TensorType]] = None,
111
+ sampling_rate: Optional[int] = None,
112
+ **kwargs,
113
+ ) -> BatchFeature:
114
+ """
115
+ Main method to featurize and prepare for the model one or several sequence(s).
116
+
117
+ Args:
118
+ raw_speech (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`):
119
+ The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float
120
+ values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not
121
+ stereo, i.e. single float per timestep.
122
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
123
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding
124
+ index) among:
125
+
126
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
127
+ sequence if provided).
128
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
129
+ acceptable input length for the model if that argument is not provided.
130
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
131
+ lengths).
132
+ max_length (`int`, *optional*):
133
+ Maximum length of the returned list and optionally padding length (see above).
134
+ truncation (`bool`):
135
+ Activates truncation to cut input sequences longer than *max_length* to *max_length*.
136
+ pad_to_multiple_of (`int`, *optional*):
137
+ If set will pad the sequence to a multiple of the provided value.
138
+
139
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
140
+ `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.
141
+ return_attention_mask (`bool`, *optional*):
142
+ Whether to return the attention mask. If left to the default, will return the attention mask according
143
+ to the specific feature_extractor's default.
144
+
145
+ [What are attention masks?](../glossary#attention-mask)
146
+
147
+ <Tip>
148
+
149
+ Wav2Vec2 models that have set `config.feat_extract_norm == "group"`, such as
150
+ [wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base-960h), have **not** been trained using
151
+ `attention_mask`. For such models, `input_values` should simply be padded with 0 and no
152
+ `attention_mask` should be passed.
153
+
154
+ For Wav2Vec2 models that have set `config.feat_extract_norm == "layer"`, such as
155
+ [wav2vec2-lv60](https://huggingface.co/facebook/wav2vec2-large-960h-lv60-self), `attention_mask` should
156
+ be passed for batched inference.
157
+
158
+ </Tip>
159
+
160
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
161
+ If set, will return tensors instead of list of python integers. Acceptable values are:
162
+
163
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
164
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
165
+ - `'np'`: Return Numpy `np.ndarray` objects.
166
+ sampling_rate (`int`, *optional*):
167
+ The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass
168
+ `sampling_rate` at the forward call to prevent silent errors.
169
+ padding_value (`float`, *optional*, defaults to 0.0):
170
+ """
171
+
172
+ if sampling_rate is not None:
173
+ if sampling_rate != self.sampling_rate:
174
+ raise ValueError(
175
+ f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
176
+ f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
177
+ f" {self.sampling_rate} and not {sampling_rate}."
178
+ )
179
+ else:
180
+ logger.warning(
181
+ "It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
182
+ "Failing to do so can result in silent errors that might be hard to debug."
183
+ )
184
+
185
+ is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1
186
+ if is_batched_numpy and len(raw_speech.shape) > 2:
187
+ raise ValueError(f"Only mono-channel audio is supported for input to {self}")
188
+ is_batched = is_batched_numpy or (
189
+ isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list)))
190
+ )
191
+
192
+ # always return batch
193
+ if not is_batched:
194
+ raw_speech = [raw_speech]
195
+
196
+ # convert into correct format for padding
197
+ encoded_inputs = BatchFeature({"input_values": raw_speech})
198
+
199
+ padded_inputs = self.pad(
200
+ encoded_inputs,
201
+ padding=padding,
202
+ max_length=max_length,
203
+ truncation=truncation,
204
+ pad_to_multiple_of=pad_to_multiple_of,
205
+ return_attention_mask=return_attention_mask,
206
+ )
207
+
208
+ # convert input values to correct format
209
+ input_values = padded_inputs["input_values"]
210
+ if not isinstance(input_values[0], np.ndarray):
211
+ padded_inputs["input_values"] = [np.asarray(array, dtype=np.float32) for array in input_values]
212
+ elif (
213
+ not isinstance(input_values, np.ndarray)
214
+ and isinstance(input_values[0], np.ndarray)
215
+ and input_values[0].dtype is np.dtype(np.float64)
216
+ ):
217
+ padded_inputs["input_values"] = [array.astype(np.float32) for array in input_values]
218
+ elif isinstance(input_values, np.ndarray) and input_values.dtype is np.dtype(np.float64):
219
+ padded_inputs["input_values"] = input_values.astype(np.float32)
220
+
221
+ # convert attention_mask to correct format
222
+ attention_mask = padded_inputs.get("attention_mask")
223
+ if attention_mask is not None:
224
+ padded_inputs["attention_mask"] = [np.asarray(array, dtype=np.int32) for array in attention_mask]
225
+
226
+ # zero-mean and unit-variance normalization
227
+ if self.do_normalize:
228
+ attention_mask = (
229
+ attention_mask
230
+ if self._get_padding_strategies(padding, max_length=max_length) is not PaddingStrategy.DO_NOT_PAD
231
+ else None
232
+ )
233
+ padded_inputs["input_values"] = self.zero_mean_unit_var_norm(
234
+ padded_inputs["input_values"], attention_mask=attention_mask, padding_value=self.padding_value
235
+ )
236
+
237
+ if return_tensors is not None:
238
+ padded_inputs = padded_inputs.convert_to_tensors(return_tensors)
239
+
240
+ return padded_inputs
241
+
242
+
243
+ __all__ = ["Wav2Vec2FeatureExtractor"]
.venv/lib/python3.11/site-packages/transformers/models/wav2vec2/modeling_flax_wav2vec2.py ADDED
@@ -0,0 +1,1428 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Fairseq Authors and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Flax Wav2Vec2 model."""
16
+
17
+ from functools import partial
18
+ from typing import Optional, Tuple, Union
19
+
20
+ import flax
21
+ import flax.linen as nn
22
+ import jax
23
+ import jax.numpy as jnp
24
+ import numpy as np
25
+ from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
26
+ from flax.linen.attention import dot_product_attention_weights
27
+ from flax.traverse_util import flatten_dict, unflatten_dict
28
+ from jax import lax
29
+
30
+ from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxCausalLMOutput
31
+ from ...modeling_flax_utils import (
32
+ ACT2FN,
33
+ FlaxPreTrainedModel,
34
+ append_replace_return_docstrings,
35
+ overwrite_call_docstring,
36
+ )
37
+ from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging
38
+ from .configuration_wav2vec2 import Wav2Vec2Config
39
+
40
+
41
+ logger = logging.get_logger(__name__)
42
+
43
+
44
+ @flax.struct.dataclass
45
+ class FlaxWav2Vec2BaseModelOutput(ModelOutput):
46
+ """
47
+ Output type of [`FlaxWav2Vec2BaseModelOutput`], with potential hidden states and attentions.
48
+
49
+ Args:
50
+ last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`):
51
+ Sequence of hidden-states at the output of the last layer of the model.
52
+ extract_features (`jnp.ndarray` of shape `(batch_size, sequence_length, last_conv_dim)`):
53
+ Sequence of extracted feature vectors of the last convolutional layer of the model with `last_conv_dim`
54
+ being the dimension of the last convolutional layer.
55
+ hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
56
+ Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape
57
+ `(batch_size, sequence_length, hidden_size)`.
58
+
59
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
60
+ attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
61
+ Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
62
+ sequence_length)`.
63
+
64
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
65
+ heads.
66
+ """
67
+
68
+ last_hidden_state: jnp.ndarray = None
69
+ extract_features: jnp.ndarray = None
70
+ hidden_states: Optional[Tuple[jnp.ndarray]] = None
71
+ attentions: Optional[Tuple[jnp.ndarray]] = None
72
+
73
+
74
+ @flax.struct.dataclass
75
+ class FlaxWav2Vec2ForPreTrainingOutput(ModelOutput):
76
+ """
77
+ Output type of [`FlaxWav2Vec2ForPreTrainingOutput`], with potential hidden states and attentions.
78
+
79
+ Args:
80
+ loss (*optional*, returned when model is in train mode, `jnp.ndarray` of shape `(1,)`):
81
+ Total loss as the sum of the contrastive loss (L_m) and the diversity loss (L_d) as stated in the [official
82
+ paper](https://arxiv.org/pdf/2006.11477.pdf) . (classification) loss.
83
+ projected_states (`jnp.ndarray` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`):
84
+ Hidden-states of the model projected to *config.proj_codevector_dim* that can be used to predict the masked
85
+ projected quantized states.
86
+ projected_quantized_states (`jnp.ndarray` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`):
87
+ Quantized extracted feature vectors projected to *config.proj_codevector_dim* representing the positive
88
+ target vectors for contrastive loss.
89
+ hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
90
+ Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape
91
+ `(batch_size, sequence_length, hidden_size)`.
92
+
93
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
94
+ attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
95
+ Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
96
+ sequence_length)`.
97
+
98
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
99
+ heads.
100
+ """
101
+
102
+ projected_states: jnp.ndarray = None
103
+ projected_quantized_states: jnp.ndarray = None
104
+ codevector_perplexity: jnp.ndarray = None
105
+ hidden_states: Optional[Tuple[jnp.ndarray]] = None
106
+ attentions: Optional[Tuple[jnp.ndarray]] = None
107
+
108
+
109
+ def _compute_mask_indices(
110
+ shape: Tuple[int, int],
111
+ mask_prob: float,
112
+ mask_length: int,
113
+ attention_mask: Optional[np.ndarray] = None,
114
+ min_masks: int = 0,
115
+ ) -> np.ndarray:
116
+ """
117
+ Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for
118
+ ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on
119
+ CPU as part of the preprocessing during training.
120
+
121
+ Args:
122
+ shape: the shape for which to compute masks.
123
+ should be of size 2 where first element is batch size and 2nd is timesteps
124
+ mask_prob:
125
+ probability for each token to be chosen as start of the span to be masked. this will be multiplied by
126
+ number of timesteps divided by length of mask span to mask approximately this percentage of all elements.
127
+ however due to overlaps, the actual number will be smaller (unless no_overlap is True)
128
+ mask_length: size of the mask
129
+ min_masks: minimum number of masked spans
130
+
131
+ """
132
+ batch_size, sequence_length = shape
133
+
134
+ if mask_length < 1:
135
+ raise ValueError("`mask_length` has to be bigger than 0.")
136
+
137
+ if mask_length > sequence_length:
138
+ raise ValueError(
139
+ f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and"
140
+ f" `sequence_length`: {sequence_length}`"
141
+ )
142
+
143
+ # compute number of masked spans in batch
144
+ num_masked_spans = int(mask_prob * sequence_length / mask_length + np.random.rand(1).item())
145
+ num_masked_spans = max(num_masked_spans, min_masks)
146
+
147
+ # make sure num masked indices <= sequence_length
148
+ if num_masked_spans * mask_length > sequence_length:
149
+ num_masked_spans = sequence_length // mask_length
150
+
151
+ # SpecAugment mask to fill
152
+ spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=bool)
153
+
154
+ # get random indices to mask
155
+ spec_aug_mask_idxs = np.array(
156
+ [
157
+ np.random.choice(np.arange(sequence_length - (mask_length - 1)), num_masked_spans, replace=False)
158
+ for _ in range(batch_size)
159
+ ]
160
+ )
161
+
162
+ # expand masked indices to masked spans
163
+ spec_aug_mask_idxs = np.broadcast_to(spec_aug_mask_idxs[:, :, None], (batch_size, num_masked_spans, mask_length))
164
+ spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, num_masked_spans * mask_length)
165
+
166
+ offsets = np.arange(mask_length)[None, None, :]
167
+ offsets = np.broadcast_to(offsets, (batch_size, num_masked_spans, mask_length)).reshape(
168
+ batch_size, num_masked_spans * mask_length
169
+ )
170
+ spec_aug_mask_idxs = spec_aug_mask_idxs + offsets
171
+
172
+ # scatter indices to mask
173
+ np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1)
174
+
175
+ if attention_mask is not None:
176
+ # make sure padded input ids cannot be masked
177
+ spec_aug_mask = np.where(attention_mask, spec_aug_mask, False)
178
+
179
+ return spec_aug_mask
180
+
181
+
182
+ def _sample_negative_indices(features_shape: Tuple, num_negatives: int, attention_mask: Optional[np.ndarray] = None):
183
+ """
184
+ Sample `num_negatives` vectors from feature vectors.
185
+ """
186
+ batch_size, sequence_length, hidden_size = features_shape
187
+ if sequence_length <= 1:
188
+ raise ValueError(
189
+ "`features should have `sequence_length` > 1, but are of shape "
190
+ f"(batch_size, sequence_length, hidden_size) = ({batch_size, sequence_length, hidden_size})."
191
+ )
192
+
193
+ # get `num_negatives` random vector indices from the same utterance
194
+ sampled_negative_indices = []
195
+ for batch_idx in range(batch_size):
196
+ high = attention_mask[batch_idx].sum() - 1 if attention_mask is not None else sequence_length - 1
197
+ sampled_indices_slice = np.random.randint(0, high, size=(num_negatives * sequence_length,))
198
+ sampled_negative_indices.append(sampled_indices_slice)
199
+
200
+ sampled_negative_indices = np.asarray(sampled_negative_indices, dtype=np.int32)
201
+
202
+ # generate indices of the positive vectors themselves, repeat them `num_negatives` times
203
+ feature_indices = np.broadcast_to(np.arange(sequence_length)[:, None], (sequence_length, num_negatives)).flatten()
204
+
205
+ # avoid sampling the same positive vector, but keep the distribution uniform
206
+ sampled_negative_indices[sampled_negative_indices >= feature_indices] += 1
207
+
208
+ # correct for batch size
209
+ for batch_idx in range(1, batch_size):
210
+ sampled_negative_indices[batch_idx] += batch_idx * sequence_length
211
+
212
+ return sampled_negative_indices
213
+
214
+
215
+ WAV_2_VEC_2_START_DOCSTRING = r"""
216
+ Wav2Vec2 was proposed in [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech
217
+ Representations](https://arxiv.org/abs/2006.11477) by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael
218
+ Auli.
219
+
220
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
221
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
222
+ etc.)
223
+
224
+ This model is also a Flax Linen
225
+ [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a
226
+ regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.
227
+
228
+ Finally, this model supports inherent JAX features such as:
229
+
230
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
231
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
232
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
233
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
234
+
235
+ Parameters:
236
+ config ([`Wav2Vec2Config`]): Model configuration class with all the parameters of the model.
237
+ Initializing with a config file does not load the weights associated with the model, only the
238
+ configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
239
+ dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
240
+ The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
241
+ `jax.numpy.bfloat16` (on TPUs).
242
+
243
+ This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
244
+ specified all the computation will be performed with the given `dtype`.
245
+
246
+ **Note that this only specifies the dtype of the computation and does not influence the dtype of model
247
+ parameters.**
248
+
249
+ If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
250
+ [`~FlaxPreTrainedModel.to_bf16`].
251
+ """
252
+
253
+
254
+ WAV_2_VEC_2_INPUTS_DOCSTRING = r"""
255
+ Args:
256
+ input_values (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
257
+ Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file
258
+ into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (`pip install
259
+ soundfile`). To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and
260
+ conversion into a tensor of type `jnp.ndarray`. See [`Wav2Vec2Processor.__call__`] for details.
261
+ attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
262
+ Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0,
263
+ 1]`:
264
+
265
+ - 1 for tokens that are **not masked**,
266
+ - 0 for tokens that are **masked**.
267
+
268
+ [What are attention masks?](../glossary#attention-mask) .. warning:: `attention_mask` should only be passed
269
+ if the corresponding processor has `config.return_attention_mask == True`. For all models whose processor
270
+ has `config.return_attention_mask == False`, such as
271
+ [wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base-960h), `attention_mask` should **not** be
272
+ passed to avoid degraded performance when doing batched inference. For such models `input_values` should
273
+ simply be padded with 0 and passed without `attention_mask`. Be aware that these models also yield slightly
274
+ different results depending on whether `input_values` is padded or not.
275
+ mask_time_indices (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
276
+ Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict
277
+ masked extracted features in *config.proj_codevector_dim* space.
278
+ output_attentions (`bool`, *optional*):
279
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
280
+ tensors for more detail.
281
+ output_hidden_states (`bool`, *optional*):
282
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
283
+ more detail.
284
+ return_dict (`bool`, *optional*):
285
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
286
+ """
287
+
288
+
289
+ class FlaxWav2Vec2LayerNormConvLayer(nn.Module):
290
+ config: Wav2Vec2Config
291
+ layer_id: int = 0
292
+ dtype: jnp.dtype = jnp.float32
293
+
294
+ def setup(self):
295
+ self.in_conv_dim = self.config.conv_dim[self.layer_id] if self.layer_id > 0 else 1
296
+ self.out_conv_dim = self.config.conv_dim[self.layer_id]
297
+
298
+ self.conv = nn.Conv(
299
+ features=self.config.conv_dim[self.layer_id],
300
+ kernel_size=(self.config.conv_kernel[self.layer_id],),
301
+ strides=(self.config.conv_stride[self.layer_id],),
302
+ use_bias=self.config.conv_bias,
303
+ kernel_init=jax.nn.initializers.he_normal(),
304
+ padding="VALID",
305
+ dtype=self.dtype,
306
+ )
307
+ self.layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
308
+ self.activation = ACT2FN[self.config.feat_extract_activation]
309
+
310
+ def __call__(self, hidden_states):
311
+ hidden_states = self.conv(hidden_states)
312
+ hidden_states = self.layer_norm(hidden_states)
313
+ hidden_states = self.activation(hidden_states)
314
+ return hidden_states
315
+
316
+
317
+ class FlaxConvWithWeightNorm(nn.Module):
318
+ config: Wav2Vec2Config
319
+ dtype: jnp.dtype = jnp.float32
320
+
321
+ def setup(self):
322
+ self.conv = nn.Conv(
323
+ features=self.config.hidden_size,
324
+ kernel_size=(self.config.num_conv_pos_embeddings,),
325
+ kernel_init=jax.nn.initializers.he_normal(),
326
+ padding="VALID",
327
+ feature_group_count=self.config.num_conv_pos_embedding_groups,
328
+ dtype=self.dtype,
329
+ )
330
+ weight_shape = (
331
+ self.conv.features,
332
+ self.conv.features // self.conv.feature_group_count,
333
+ self.conv.kernel_size[0],
334
+ )
335
+ self.weight_v = self.param("weight_v", jax.nn.initializers.he_normal(), weight_shape)
336
+ self.weight_g = self.param("weight_g", lambda _: jnp.linalg.norm(self.weight_v, axis=(0, 1))[None, None, :])
337
+ self.bias = self.param("bias", jax.nn.initializers.zeros, (self.conv.features,))
338
+ self.prev_padding = self.conv.kernel_size[0] // 2
339
+
340
+ def _get_normed_weights(self):
341
+ weight_v_norm = jnp.linalg.norm(self.weight_v, axis=(0, 1))[None, None, :]
342
+ normed_weight_v = jnp.divide(self.weight_v, weight_v_norm)
343
+ normed_kernel = jnp.multiply(normed_weight_v, self.weight_g)
344
+ return normed_kernel
345
+
346
+ def __call__(self, hidden_states):
347
+ kernel = self._get_normed_weights()
348
+ hidden_states = jnp.pad(hidden_states, ((0, 0), (self.prev_padding, self.prev_padding), (0, 0)))
349
+ hidden_states = self.conv.apply({"params": {"kernel": kernel.T, "bias": self.bias}}, hidden_states)
350
+ return hidden_states
351
+
352
+
353
+ class FlaxWav2Vec2PositionalConvEmbedding(nn.Module):
354
+ config: Wav2Vec2Config
355
+ dtype: jnp.dtype = jnp.float32
356
+
357
+ def setup(self):
358
+ self.conv = FlaxConvWithWeightNorm(self.config, dtype=self.dtype)
359
+ self.activation = ACT2FN[self.config.feat_extract_activation]
360
+ self.num_pad_remove = 1 if self.config.num_conv_pos_embeddings % 2 == 0 else 0
361
+
362
+ def __call__(self, hidden_states):
363
+ hidden_states = hidden_states.transpose((0, 1, 2))
364
+
365
+ hidden_states = self.conv(hidden_states)
366
+
367
+ if self.num_pad_remove > 0:
368
+ hidden_states = hidden_states[:, : -self.num_pad_remove, :]
369
+ hidden_states = self.activation(hidden_states)
370
+
371
+ hidden_states = hidden_states.transpose((0, 1, 2))
372
+ return hidden_states
373
+
374
+
375
+ class FlaxConvLayersCollection(nn.Module):
376
+ config: Wav2Vec2Config
377
+ dtype: jnp.dtype = jnp.float32
378
+
379
+ def setup(self):
380
+ if self.config.feat_extract_norm == "layer":
381
+ self.layers = [
382
+ FlaxWav2Vec2LayerNormConvLayer(self.config, layer_id=i, name=str(i), dtype=self.dtype)
383
+ for i in range(self.config.num_feat_extract_layers)
384
+ ]
385
+ elif self.config.feat_extract_norm == "group":
386
+ raise NotImplementedError("At the moment only ``config.feat_extact_norm == 'layer'`` is supported")
387
+ else:
388
+ raise ValueError(
389
+ f"`config.feat_extract_norm` is {self.config.feat_extract_norm}, but has to be one of ['group',"
390
+ " 'layer']"
391
+ )
392
+
393
+ def __call__(self, hidden_states):
394
+ for i, conv_layer in enumerate(self.layers):
395
+ hidden_states = conv_layer(hidden_states)
396
+ return hidden_states
397
+
398
+
399
+ class FlaxWav2Vec2FeatureEncoder(nn.Module):
400
+ """Construct the features from raw audio waveform"""
401
+
402
+ config: Wav2Vec2Config
403
+ dtype: jnp.dtype = jnp.float32
404
+
405
+ def setup(self):
406
+ self.conv_layers = FlaxConvLayersCollection(self.config, dtype=self.dtype)
407
+
408
+ def __call__(self, input_values, freeze_feature_encoder=False):
409
+ hidden_states = input_values[:, :, None]
410
+ hidden_states = self.conv_layers(hidden_states)
411
+ if freeze_feature_encoder:
412
+ hidden_states = jax.lax.stop_gradient(hidden_states)
413
+ return hidden_states
414
+
415
+
416
+ class FlaxWav2Vec2FeatureProjection(nn.Module):
417
+ config: Wav2Vec2Config
418
+ dtype: jnp.dtype = jnp.float32
419
+
420
+ def setup(self):
421
+ self.layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
422
+ self.projection = nn.Dense(
423
+ self.config.hidden_size,
424
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
425
+ dtype=self.dtype,
426
+ )
427
+ self.dropout = nn.Dropout(rate=self.config.feat_proj_dropout)
428
+
429
+ def __call__(self, hidden_states, deterministic=True):
430
+ norm_hidden_states = self.layer_norm(hidden_states)
431
+ hidden_states = self.projection(norm_hidden_states)
432
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
433
+ return hidden_states, norm_hidden_states
434
+
435
+
436
+ class FlaxWav2Vec2Attention(nn.Module):
437
+ config: Wav2Vec2Config
438
+ embed_dim: int
439
+ num_heads: int
440
+ dropout: float = 0.0
441
+ bias: bool = True
442
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
443
+
444
+ def setup(self) -> None:
445
+ self.head_dim = self.embed_dim // self.num_heads
446
+ if self.head_dim * self.num_heads != self.embed_dim:
447
+ raise ValueError(
448
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
449
+ f" {self.num_heads})."
450
+ )
451
+
452
+ dense = partial(
453
+ nn.Dense,
454
+ self.embed_dim,
455
+ use_bias=self.bias,
456
+ dtype=self.dtype,
457
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
458
+ )
459
+
460
+ self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense()
461
+ self.out_proj = dense()
462
+
463
+ self.dropout_layer = nn.Dropout(rate=self.dropout)
464
+
465
+ def _split_heads(self, hidden_states):
466
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim))
467
+
468
+ def _merge_heads(self, hidden_states):
469
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
470
+
471
+ def __call__(
472
+ self,
473
+ hidden_states: jnp.ndarray,
474
+ key_value_states: Optional[jnp.ndarray] = None,
475
+ attention_mask: Optional[jnp.ndarray] = None,
476
+ deterministic: bool = True,
477
+ ) -> Tuple[jnp.ndarray]:
478
+ """Input shape: Batch x Time x Channel"""
479
+
480
+ # get query proj
481
+ query_states = self.q_proj(hidden_states)
482
+
483
+ key_states = self.k_proj(hidden_states)
484
+ value_states = self.v_proj(hidden_states)
485
+
486
+ query_states = self._split_heads(query_states)
487
+ key_states = self._split_heads(key_states)
488
+ value_states = self._split_heads(value_states)
489
+
490
+ if attention_mask is not None:
491
+ attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
492
+
493
+ # Convert the boolean attention mask to an attention bias.
494
+ if attention_mask is not None:
495
+ # attention mask in the form of attention bias
496
+ attention_bias = lax.select(
497
+ attention_mask > 0,
498
+ jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
499
+ jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype),
500
+ )
501
+ else:
502
+ attention_bias = None
503
+
504
+ dropout_rng = None
505
+ if not deterministic and self.dropout > 0.0:
506
+ dropout_rng = self.make_rng("dropout")
507
+
508
+ attn_weights = dot_product_attention_weights(
509
+ query_states,
510
+ key_states,
511
+ bias=attention_bias,
512
+ dropout_rng=dropout_rng,
513
+ dropout_rate=self.dropout,
514
+ broadcast_dropout=True,
515
+ deterministic=deterministic,
516
+ dtype=self.dtype,
517
+ precision=None,
518
+ )
519
+
520
+ attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
521
+ attn_output = self._merge_heads(attn_output)
522
+ attn_output = self.out_proj(attn_output)
523
+
524
+ return attn_output, attn_weights
525
+
526
+
527
+ class FlaxWav2Vec2FeedForward(nn.Module):
528
+ config: Wav2Vec2Config
529
+ dtype: jnp.dtype = jnp.float32
530
+
531
+ def setup(self):
532
+ self.intermediate_dropout = nn.Dropout(rate=self.config.activation_dropout)
533
+
534
+ self.intermediate_dense = nn.Dense(
535
+ self.config.intermediate_size,
536
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
537
+ dtype=self.dtype,
538
+ )
539
+ if isinstance(self.config.hidden_act, str):
540
+ self.intermediate_act_fn = ACT2FN[self.config.hidden_act]
541
+ else:
542
+ self.intermediate_act_fn = self.config.hidden_act
543
+
544
+ self.output_dense = nn.Dense(
545
+ self.config.hidden_size,
546
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
547
+ dtype=self.dtype,
548
+ )
549
+ self.output_dropout = nn.Dropout(rate=self.config.hidden_dropout)
550
+
551
+ def __call__(self, hidden_states, deterministic=True):
552
+ hidden_states = self.intermediate_dense(hidden_states)
553
+ hidden_states = self.intermediate_act_fn(hidden_states)
554
+ hidden_states = self.intermediate_dropout(hidden_states, deterministic=deterministic)
555
+
556
+ hidden_states = self.output_dense(hidden_states)
557
+ hidden_states = self.output_dropout(hidden_states, deterministic=deterministic)
558
+ return hidden_states
559
+
560
+
561
+ class FlaxWav2Vec2EncoderLayerStableLayerNorm(nn.Module):
562
+ config: Wav2Vec2Config
563
+ dtype: jnp.dtype = jnp.float32
564
+
565
+ def setup(self):
566
+ self.attention = FlaxWav2Vec2Attention(
567
+ config=self.config,
568
+ embed_dim=self.config.hidden_size,
569
+ num_heads=self.config.num_attention_heads,
570
+ dropout=self.config.attention_dropout,
571
+ dtype=self.dtype,
572
+ )
573
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout)
574
+ self.layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
575
+ self.feed_forward = FlaxWav2Vec2FeedForward(self.config, dtype=self.dtype)
576
+ self.final_layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
577
+
578
+ def __call__(self, hidden_states, attention_mask=None, deterministic=True, output_attentions=False):
579
+ attn_residual = hidden_states
580
+ hidden_states = self.layer_norm(hidden_states)
581
+ hidden_states, attn_weights = self.attention(
582
+ hidden_states, attention_mask=attention_mask, deterministic=deterministic
583
+ )
584
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
585
+ hidden_states = attn_residual + hidden_states
586
+ hidden_states = hidden_states + self.feed_forward(
587
+ self.final_layer_norm(hidden_states), deterministic=deterministic
588
+ )
589
+
590
+ outputs = (hidden_states,)
591
+
592
+ if output_attentions:
593
+ outputs += (attn_weights,)
594
+
595
+ return outputs
596
+
597
+
598
+ class FlaxWav2Vec2EncoderLayerStableLayerNormCollection(nn.Module):
599
+ config: Wav2Vec2Config
600
+ dtype: jnp.dtype = jnp.float32
601
+
602
+ def setup(self):
603
+ self.layers = [
604
+ FlaxWav2Vec2EncoderLayerStableLayerNorm(self.config, name=str(i), dtype=self.dtype)
605
+ for i in range(self.config.num_hidden_layers)
606
+ ]
607
+
608
+ def __call__(
609
+ self,
610
+ hidden_states,
611
+ attention_mask=None,
612
+ deterministic: bool = True,
613
+ output_attentions: bool = False,
614
+ output_hidden_states: bool = False,
615
+ return_dict: bool = True,
616
+ ):
617
+ all_attentions = () if output_attentions else None
618
+ all_hidden_states = () if output_hidden_states else None
619
+
620
+ for i, layer in enumerate(self.layers):
621
+ if output_hidden_states:
622
+ all_hidden_states += (hidden_states,)
623
+
624
+ layer_outputs = layer(
625
+ hidden_states, attention_mask, deterministic=deterministic, output_attentions=output_attentions
626
+ )
627
+
628
+ hidden_states = layer_outputs[0]
629
+
630
+ if output_attentions:
631
+ all_attentions += (layer_outputs[1],)
632
+
633
+ if output_hidden_states:
634
+ all_hidden_states += (hidden_states,)
635
+
636
+ outputs = (hidden_states, all_hidden_states, all_attentions)
637
+
638
+ if not return_dict:
639
+ return tuple(v for v in outputs if v is not None)
640
+
641
+ return FlaxBaseModelOutput(
642
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
643
+ )
644
+
645
+
646
+ class FlaxWav2Vec2StableLayerNormEncoder(nn.Module):
647
+ config: Wav2Vec2Config
648
+ dtype: jnp.dtype = jnp.float32
649
+
650
+ def setup(self):
651
+ self.pos_conv_embed = FlaxWav2Vec2PositionalConvEmbedding(self.config, dtype=self.dtype)
652
+ self.layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
653
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout)
654
+ self.layers = FlaxWav2Vec2EncoderLayerStableLayerNormCollection(self.config, dtype=self.dtype)
655
+
656
+ def __call__(
657
+ self,
658
+ hidden_states,
659
+ attention_mask=None,
660
+ deterministic=True,
661
+ output_attentions=False,
662
+ output_hidden_states=False,
663
+ return_dict=True,
664
+ ):
665
+ if attention_mask is not None:
666
+ # make sure padded tokens are not attended to
667
+ hidden_states = jnp.where(
668
+ jnp.broadcast_to(attention_mask[:, :, None], hidden_states.shape), hidden_states, 0
669
+ )
670
+
671
+ position_embeddings = self.pos_conv_embed(hidden_states)
672
+
673
+ hidden_states = hidden_states + position_embeddings
674
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
675
+
676
+ outputs = self.layers(
677
+ hidden_states,
678
+ attention_mask,
679
+ output_attentions=output_attentions,
680
+ output_hidden_states=output_hidden_states,
681
+ return_dict=return_dict,
682
+ )
683
+
684
+ last_hidden_state = self.layer_norm(outputs[0])
685
+
686
+ # update the last element in `hidden_states` after applying `layernorm` above
687
+ hidden_states = None
688
+ if output_hidden_states:
689
+ hidden_states = outputs[1]
690
+ hidden_states = hidden_states[:-1] + (last_hidden_state,)
691
+
692
+ if not return_dict:
693
+ outputs = (last_hidden_state, hidden_states) + (outputs[2:] if output_hidden_states else outputs[1:])
694
+ return tuple(v for v in outputs if v is not None)
695
+
696
+ return FlaxBaseModelOutput(
697
+ last_hidden_state=last_hidden_state, hidden_states=hidden_states, attentions=outputs.attentions
698
+ )
699
+
700
+
701
+ class FlaxWav2Vec2GumbelVectorQuantizer(nn.Module):
702
+ """
703
+ Vector quantization using gumbel softmax. See [CATEGORICAL REPARAMETERIZATION WITH
704
+ GUMBEL-SOFTMAX](https://arxiv.org/pdf/1611.01144.pdf) for more information.
705
+ """
706
+
707
+ config: Wav2Vec2Config
708
+ dtype: jnp.dtype = jnp.float32
709
+
710
+ def setup(self):
711
+ self.num_groups = self.config.num_codevector_groups
712
+ self.num_vars = self.config.num_codevectors_per_group
713
+
714
+ if self.config.codevector_dim % self.num_groups != 0:
715
+ raise ValueError(
716
+ f"`config.codevector_dim {self.config.codevector_dim} must be divisible by"
717
+ f" `config.num_codevector_groups` {self.num_groups} for concatenation"
718
+ )
719
+
720
+ # storage for codebook variables (codewords)
721
+ self.codevectors = self.param(
722
+ "codevectors",
723
+ jax.nn.initializers.uniform(),
724
+ (1, self.num_groups * self.num_vars, self.config.codevector_dim // self.num_groups),
725
+ )
726
+ self.weight_proj = nn.Dense(
727
+ self.num_groups * self.num_vars,
728
+ kernel_init=jax.nn.initializers.normal(1.0),
729
+ dtype=self.dtype,
730
+ )
731
+
732
+ @staticmethod
733
+ def _compute_perplexity(probs, mask=None):
734
+ if mask is not None:
735
+ mask_extended = jnp.broadcast_to(mask.flatten()[:, None, None], probs.shape)
736
+ probs = jnp.where(mask_extended, probs, jnp.zeros_like(probs))
737
+ marginal_probs = probs.sum(axis=0) / mask.sum()
738
+ else:
739
+ marginal_probs = probs.mean(axis=0)
740
+
741
+ perplexity = jnp.exp(-jnp.sum(marginal_probs * jnp.log(marginal_probs + 1e-7), axis=-1)).sum()
742
+ return perplexity
743
+
744
+ def __call__(self, hidden_states, mask_time_indices=None, deterministic=True, temperature=1):
745
+ batch_size, sequence_length, hidden_size = hidden_states.shape
746
+
747
+ # project to codevector dim
748
+ hidden_states = self.weight_proj(hidden_states)
749
+ hidden_states = hidden_states.reshape(batch_size * sequence_length * self.num_groups, -1)
750
+
751
+ if not deterministic:
752
+ # sample code vector probs via gumbel in differentiateable way
753
+ gumbel_rng = self.make_rng("gumbel")
754
+ gumbels = jax.random.gumbel(gumbel_rng, hidden_states.shape)
755
+ codevector_probs = nn.softmax((hidden_states + gumbels) / temperature)
756
+
757
+ # compute perplexity
758
+ codevector_soft_dist = nn.softmax(
759
+ hidden_states.reshape(batch_size * sequence_length, self.num_groups, -1), axis=-1
760
+ )
761
+ perplexity = self._compute_perplexity(codevector_soft_dist, mask_time_indices)
762
+ else:
763
+ # take argmax in non-differentiable way
764
+ # comptute hard codevector distribution (one hot)
765
+ codevector_idx = hidden_states.argmax(axis=-1)
766
+ codevector_probs = jax.nn.one_hot(codevector_idx, hidden_states.shape[-1]) * 1.0
767
+ codevector_probs = codevector_probs.reshape(batch_size * sequence_length, self.num_groups, -1)
768
+ perplexity = self._compute_perplexity(codevector_probs, mask_time_indices)
769
+
770
+ codevector_probs = codevector_probs.reshape(batch_size * sequence_length, -1)
771
+ # use probs to retrieve codevectors
772
+ codevectors_per_group = jnp.expand_dims(codevector_probs, axis=-1) * self.codevectors
773
+ codevectors = codevectors_per_group.reshape(batch_size * sequence_length, self.num_groups, self.num_vars, -1)
774
+ codevectors = codevectors.sum(-2).reshape(batch_size, sequence_length, -1)
775
+
776
+ return codevectors, perplexity
777
+
778
+
779
+ class FlaxWav2Vec2Adapter(nn.Module):
780
+ config: Wav2Vec2Config
781
+ dtype: jnp.dtype = jnp.float32
782
+
783
+ def setup(self):
784
+ # hidden_states require down-projection if feature dims don't match
785
+ if self.config.output_hidden_size != self.config.hidden_size:
786
+ self.proj = nn.Dense(
787
+ self.config.output_hidden_size,
788
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
789
+ dtype=self.dtype,
790
+ )
791
+ self.proj_layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
792
+ else:
793
+ self.proj = self.proj_layer_norm = None
794
+
795
+ self.layers = FlaxWav2Vec2AdapterLayersCollection(self.config, dtype=self.dtype)
796
+
797
+ def __call__(self, hidden_states, deterministic=True):
798
+ # down-project hidden_states if required
799
+ if self.proj is not None and self.proj_layer_norm is not None:
800
+ hidden_states = self.proj(hidden_states)
801
+ hidden_states = self.proj_layer_norm(hidden_states)
802
+
803
+ hidden_states = self.layers(hidden_states)
804
+
805
+ return hidden_states
806
+
807
+
808
+ class FlaxWav2Vec2AdapterLayer(nn.Module):
809
+ config: Wav2Vec2Config
810
+ dtype: jnp.dtype = jnp.float32
811
+
812
+ def setup(self):
813
+ self.conv = nn.Conv(
814
+ features=2 * self.config.output_hidden_size,
815
+ kernel_size=(self.config.adapter_kernel_size,),
816
+ strides=(self.config.adapter_stride,),
817
+ padding=((1, 1),),
818
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
819
+ dtype=self.dtype,
820
+ )
821
+
822
+ def __call__(self, hidden_states):
823
+ hidden_states = self.conv(hidden_states)
824
+ hidden_states = nn.glu(hidden_states, axis=2)
825
+
826
+ return hidden_states
827
+
828
+
829
+ class FlaxWav2Vec2AdapterLayersCollection(nn.Module):
830
+ config: Wav2Vec2Config
831
+ dtype: jnp.dtype = jnp.float32
832
+
833
+ def setup(self):
834
+ self.layers = [
835
+ FlaxWav2Vec2AdapterLayer(self.config, name=str(i), dtype=self.dtype)
836
+ for i in range(self.config.num_adapter_layers)
837
+ ]
838
+
839
+ def __call__(self, hidden_states):
840
+ for conv_layer in self.layers:
841
+ hidden_states = conv_layer(hidden_states)
842
+
843
+ return hidden_states
844
+
845
+
846
+ class FlaxWav2Vec2PreTrainedModel(FlaxPreTrainedModel):
847
+ """
848
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
849
+ models.
850
+ """
851
+
852
+ config_class = Wav2Vec2Config
853
+ base_model_prefix: str = "wav2vec2"
854
+ main_input_name = "input_values"
855
+ module_class: nn.Module = None
856
+
857
+ def __init__(
858
+ self,
859
+ config: Wav2Vec2Config,
860
+ input_shape: Tuple = (1, 1024),
861
+ seed: int = 0,
862
+ dtype: jnp.dtype = jnp.float32,
863
+ _do_init: bool = True,
864
+ **kwargs,
865
+ ):
866
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
867
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
868
+
869
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
870
+ # init input tensors
871
+ input_values = jnp.zeros(input_shape, dtype="i4")
872
+ attention_mask = jnp.ones_like(input_values)
873
+ params_rng, dropout_rng = jax.random.split(rng, 2)
874
+ rngs = {"params": params_rng, "dropout": dropout_rng}
875
+
876
+ random_params = self.module.init(rngs, input_values, attention_mask, return_dict=False)["params"]
877
+
878
+ if params is not None:
879
+ random_params = flatten_dict(unfreeze(random_params))
880
+ params = flatten_dict(unfreeze(params))
881
+ for missing_key in self._missing_keys:
882
+ params[missing_key] = random_params[missing_key]
883
+ self._missing_keys = set()
884
+ return freeze(unflatten_dict(params))
885
+ else:
886
+ return random_params
887
+
888
+ @add_start_docstrings_to_model_forward(WAV_2_VEC_2_INPUTS_DOCSTRING)
889
+ def __call__(
890
+ self,
891
+ input_values,
892
+ attention_mask=None,
893
+ mask_time_indices=None,
894
+ params: dict = None,
895
+ dropout_rng: jax.random.PRNGKey = None,
896
+ train: bool = False,
897
+ output_attentions: Optional[bool] = None,
898
+ output_hidden_states: Optional[bool] = None,
899
+ freeze_feature_encoder: bool = False,
900
+ return_dict: Optional[bool] = None,
901
+ ):
902
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
903
+ output_hidden_states = (
904
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
905
+ )
906
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
907
+
908
+ batch_size, sequence_length = input_values.shape
909
+
910
+ if attention_mask is None:
911
+ attention_mask = jnp.ones((batch_size, sequence_length))
912
+
913
+ # Handle any PRNG if needed
914
+ rngs = {}
915
+ if dropout_rng is not None:
916
+ rngs["dropout"] = dropout_rng
917
+
918
+ inputs = {"params": params or self.params}
919
+
920
+ return self.module.apply(
921
+ inputs,
922
+ jnp.array(input_values, dtype="f4"),
923
+ jnp.array(attention_mask, dtype="i4"),
924
+ mask_time_indices,
925
+ not train,
926
+ output_attentions,
927
+ output_hidden_states,
928
+ freeze_feature_encoder,
929
+ return_dict,
930
+ rngs=rngs,
931
+ )
932
+
933
+ def _get_feat_extract_output_lengths(
934
+ self, input_lengths: Union[jnp.ndarray, int], add_adapter: Optional[bool] = None
935
+ ):
936
+ return self.module._get_feat_extract_output_lengths(input_lengths, add_adapter=add_adapter)
937
+
938
+
939
+ class FlaxWav2Vec2Module(nn.Module):
940
+ config: Wav2Vec2Config
941
+ dtype: jnp.dtype = jnp.float32
942
+
943
+ def setup(self):
944
+ self.feature_extractor = FlaxWav2Vec2FeatureEncoder(self.config, dtype=self.dtype)
945
+ self.feature_projection = FlaxWav2Vec2FeatureProjection(self.config, dtype=self.dtype)
946
+ self.masked_spec_embed = self.param(
947
+ "masked_spec_embed", jax.nn.initializers.uniform(), (self.config.hidden_size,)
948
+ )
949
+
950
+ if self.config.do_stable_layer_norm:
951
+ self.encoder = FlaxWav2Vec2StableLayerNormEncoder(self.config, dtype=self.dtype)
952
+ else:
953
+ raise NotImplementedError("``config.do_stable_layer_norm is False`` is currently not supported.")
954
+
955
+ self.adapter = FlaxWav2Vec2Adapter(self.config, dtype=self.dtype) if self.config.add_adapter else None
956
+
957
+ def __call__(
958
+ self,
959
+ input_values,
960
+ attention_mask=None,
961
+ mask_time_indices=None,
962
+ deterministic=True,
963
+ output_attentions=None,
964
+ output_hidden_states=None,
965
+ freeze_feature_encoder=False,
966
+ return_dict=None,
967
+ ):
968
+ extract_features = self.feature_extractor(input_values, freeze_feature_encoder=freeze_feature_encoder)
969
+
970
+ # make sure that no loss is computed on padded inputs
971
+ if attention_mask is not None:
972
+ # compute reduced attention_mask corresponding to feature vectors
973
+ attention_mask = self._get_feature_vector_attention_mask(
974
+ extract_features.shape[1], attention_mask, add_adapter=False
975
+ )
976
+
977
+ hidden_states, extract_features = self.feature_projection(extract_features, deterministic=deterministic)
978
+ if mask_time_indices is not None: # apply SpecAugment along time axis with given indices
979
+ hidden_states = jnp.where(
980
+ jnp.broadcast_to(mask_time_indices[:, :, None], hidden_states.shape),
981
+ jnp.broadcast_to(self.masked_spec_embed[None, None, :], hidden_states.shape),
982
+ hidden_states,
983
+ )
984
+
985
+ encoder_outputs = self.encoder(
986
+ hidden_states,
987
+ attention_mask=attention_mask,
988
+ deterministic=deterministic,
989
+ output_attentions=output_attentions,
990
+ output_hidden_states=output_hidden_states,
991
+ return_dict=return_dict,
992
+ )
993
+
994
+ hidden_states = encoder_outputs[0]
995
+
996
+ if self.adapter is not None:
997
+ hidden_states = self.adapter(hidden_states)
998
+
999
+ if not return_dict:
1000
+ return (hidden_states, extract_features) + encoder_outputs[1:]
1001
+
1002
+ return FlaxWav2Vec2BaseModelOutput(
1003
+ last_hidden_state=hidden_states,
1004
+ extract_features=extract_features,
1005
+ hidden_states=encoder_outputs.hidden_states,
1006
+ attentions=encoder_outputs.attentions,
1007
+ )
1008
+
1009
+ def _get_feat_extract_output_lengths(
1010
+ self, input_lengths: Union[jnp.ndarray, int], add_adapter: Optional[bool] = None
1011
+ ):
1012
+ """
1013
+ Computes the output length of the convolutional layers
1014
+ """
1015
+
1016
+ add_adapter = self.config.add_adapter if add_adapter is None else add_adapter
1017
+
1018
+ def _conv_out_length(input_length, kernel_size, stride):
1019
+ # 1D convolutional layer output length formula taken
1020
+ # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
1021
+ return (input_length - kernel_size) // stride + 1
1022
+
1023
+ for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride):
1024
+ input_lengths = _conv_out_length(input_lengths, kernel_size, stride)
1025
+
1026
+ if add_adapter:
1027
+ for _ in range(self.config.num_adapter_layers):
1028
+ input_lengths = _conv_out_length(input_lengths, 1, self.config.adapter_stride)
1029
+
1030
+ return input_lengths
1031
+
1032
+ def _get_feature_vector_attention_mask(
1033
+ self, feature_vector_length: int, attention_mask: jnp.ndarray, add_adapter=None
1034
+ ):
1035
+ # Effectively attention_mask.sum(-1), but not inplace to be able to run
1036
+ # on inference mode.
1037
+ non_padded_lengths = attention_mask.cumsum(axis=-1)[:, -1]
1038
+
1039
+ output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths, add_adapter=add_adapter)
1040
+
1041
+ batch_size = attention_mask.shape[0]
1042
+
1043
+ attention_mask = jnp.zeros((batch_size, feature_vector_length), dtype=attention_mask.dtype)
1044
+ # these two operations makes sure that all values
1045
+ # before the output lengths indices are attended to
1046
+ attention_mask = attention_mask.at[jnp.arange(attention_mask.shape[0]), output_lengths - 1].set(1)
1047
+ attention_mask = jnp.flip(jnp.flip(attention_mask, -1).cumsum(-1), -1).astype("bool")
1048
+ return attention_mask
1049
+
1050
+
1051
+ @add_start_docstrings(
1052
+ "The bare Wav2Vec2 Model transformer outputting raw hidden-states without any specific head on top.",
1053
+ WAV_2_VEC_2_START_DOCSTRING,
1054
+ )
1055
+ class FlaxWav2Vec2Model(FlaxWav2Vec2PreTrainedModel):
1056
+ module_class = FlaxWav2Vec2Module
1057
+
1058
+
1059
+ FLAX_WAV2VEC2_MODEL_DOCSTRING = """
1060
+ Returns:
1061
+
1062
+ Example:
1063
+
1064
+ ```python
1065
+ >>> from transformers import AutoProcessor, FlaxWav2Vec2Model
1066
+ >>> from datasets import load_dataset
1067
+ >>> import soundfile as sf
1068
+
1069
+ >>> processor = AutoProcessor.from_pretrained("facebook/wav2vec2-large-lv60")
1070
+ >>> model = FlaxWav2Vec2Model.from_pretrained("facebook/wav2vec2-large-lv60")
1071
+
1072
+
1073
+ >>> def map_to_array(batch):
1074
+ ... speech, _ = sf.read(batch["file"])
1075
+ ... batch["speech"] = speech
1076
+ ... return batch
1077
+
1078
+
1079
+ >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
1080
+ >>> ds = ds.map(map_to_array)
1081
+
1082
+ >>> input_values = processor(
1083
+ ... ds["speech"][0], sampling_rate=16_000, return_tensors="np"
1084
+ ... ).input_values # Batch size 1
1085
+ >>> hidden_states = model(input_values).last_hidden_state
1086
+ ```
1087
+ """
1088
+
1089
+ overwrite_call_docstring(
1090
+ FlaxWav2Vec2Model,
1091
+ WAV_2_VEC_2_INPUTS_DOCSTRING + FLAX_WAV2VEC2_MODEL_DOCSTRING,
1092
+ )
1093
+ append_replace_return_docstrings(
1094
+ FlaxWav2Vec2Model, output_type=FlaxWav2Vec2BaseModelOutput, config_class=Wav2Vec2Config
1095
+ )
1096
+
1097
+
1098
+ class FlaxWav2Vec2ForCTCModule(nn.Module):
1099
+ config: Wav2Vec2Config
1100
+ dtype: jnp.dtype = jnp.float32
1101
+
1102
+ def setup(self):
1103
+ self.wav2vec2 = FlaxWav2Vec2Module(self.config, dtype=self.dtype)
1104
+ self.dropout = nn.Dropout(rate=self.config.final_dropout)
1105
+ self.lm_head = nn.Dense(
1106
+ self.config.vocab_size,
1107
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
1108
+ dtype=self.dtype,
1109
+ )
1110
+
1111
+ def __call__(
1112
+ self,
1113
+ input_values,
1114
+ attention_mask=None,
1115
+ mask_time_indices=None,
1116
+ deterministic=True,
1117
+ output_attentions=None,
1118
+ output_hidden_states=None,
1119
+ freeze_feature_encoder=False,
1120
+ return_dict=None,
1121
+ ):
1122
+ outputs = self.wav2vec2(
1123
+ input_values,
1124
+ attention_mask=attention_mask,
1125
+ mask_time_indices=mask_time_indices,
1126
+ deterministic=deterministic,
1127
+ output_attentions=output_attentions,
1128
+ output_hidden_states=output_hidden_states,
1129
+ freeze_feature_encoder=freeze_feature_encoder,
1130
+ return_dict=return_dict,
1131
+ )
1132
+
1133
+ hidden_states = outputs[0]
1134
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
1135
+
1136
+ logits = self.lm_head(hidden_states)
1137
+
1138
+ if not return_dict:
1139
+ return (logits,) + outputs[2:]
1140
+
1141
+ return FlaxCausalLMOutput(logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
1142
+
1143
+ def _get_feat_extract_output_lengths(
1144
+ self,
1145
+ input_lengths: Union[jnp.ndarray, int],
1146
+ add_adapter: Optional[bool] = None,
1147
+ ):
1148
+ """
1149
+ Computes the output length of the convolutional layers
1150
+ """
1151
+
1152
+ add_adapter = self.config.add_adapter if add_adapter is None else add_adapter
1153
+
1154
+ def _conv_out_length(input_length, kernel_size, stride):
1155
+ # 1D convolutional layer output length formula taken
1156
+ # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
1157
+ return (input_length - kernel_size) // stride + 1
1158
+
1159
+ for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride):
1160
+ input_lengths = _conv_out_length(input_lengths, kernel_size, stride)
1161
+
1162
+ if add_adapter:
1163
+ for _ in range(self.config.num_adapter_layers):
1164
+ input_lengths = _conv_out_length(input_lengths, 1, self.config.adapter_stride)
1165
+
1166
+ return input_lengths
1167
+
1168
+
1169
+ @add_start_docstrings(
1170
+ "Wav2Vec2 Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).",
1171
+ WAV_2_VEC_2_START_DOCSTRING,
1172
+ )
1173
+ class FlaxWav2Vec2ForCTC(FlaxWav2Vec2PreTrainedModel):
1174
+ module_class = FlaxWav2Vec2ForCTCModule
1175
+
1176
+
1177
+ FLAX_WAV2VEC2_FOR_CTC_DOCSTRING = """
1178
+ Returns:
1179
+
1180
+ Example:
1181
+
1182
+ ```python
1183
+ >>> import jax.numpy as jnp
1184
+ >>> from transformers import AutoProcessor, FlaxWav2Vec2ForCTC
1185
+ >>> from datasets import load_dataset
1186
+ >>> import soundfile as sf
1187
+
1188
+ >>> processor = AutoProcessor.from_pretrained("facebook/wav2vec2-large-960h-lv60")
1189
+ >>> model = FlaxWav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-large-960h-lv60")
1190
+
1191
+
1192
+ >>> def map_to_array(batch):
1193
+ ... speech, _ = sf.read(batch["file"])
1194
+ ... batch["speech"] = speech
1195
+ ... return batch
1196
+
1197
+
1198
+ >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
1199
+ >>> ds = ds.map(map_to_array)
1200
+
1201
+ >>> input_values = processor(
1202
+ ... ds["speech"][0], sampling_rate=16_000, return_tensors="np"
1203
+ ... ).input_values # Batch size 1
1204
+ >>> logits = model(input_values).logits
1205
+ >>> predicted_ids = jnp.argmax(logits, axis=-1)
1206
+
1207
+ >>> transcription = processor.decode(predicted_ids[0])
1208
+ >>> # should give: "A MAN SAID TO THE UNIVERSE SIR I EXIST"
1209
+ ```
1210
+ """
1211
+
1212
+ overwrite_call_docstring(
1213
+ FlaxWav2Vec2ForCTC,
1214
+ WAV_2_VEC_2_INPUTS_DOCSTRING + FLAX_WAV2VEC2_FOR_CTC_DOCSTRING,
1215
+ )
1216
+ append_replace_return_docstrings(FlaxWav2Vec2ForCTC, output_type=FlaxCausalLMOutput, config_class=Wav2Vec2Config)
1217
+
1218
+
1219
+ class FlaxWav2Vec2ForPreTrainingModule(nn.Module):
1220
+ config: Wav2Vec2Config
1221
+ dtype: jnp.dtype = jnp.float32
1222
+
1223
+ def setup(self):
1224
+ self.wav2vec2 = FlaxWav2Vec2Module(self.config, dtype=self.dtype)
1225
+ self.dropout_features = nn.Dropout(self.config.feat_quantizer_dropout)
1226
+
1227
+ self.quantizer = FlaxWav2Vec2GumbelVectorQuantizer(self.config, dtype=self.dtype)
1228
+ self.project_q = nn.Dense(
1229
+ self.config.proj_codevector_dim,
1230
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
1231
+ dtype=self.dtype,
1232
+ )
1233
+ self.project_hid = nn.Dense(
1234
+ self.config.proj_codevector_dim,
1235
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
1236
+ dtype=self.dtype,
1237
+ )
1238
+
1239
+ def __call__(
1240
+ self,
1241
+ input_values,
1242
+ attention_mask=None,
1243
+ mask_time_indices=None,
1244
+ gumbel_temperature: int = 1,
1245
+ deterministic: bool = True,
1246
+ output_attentions=None,
1247
+ output_hidden_states=None,
1248
+ freeze_feature_encoder=False,
1249
+ return_dict=None,
1250
+ ):
1251
+ r"""
1252
+ Returns:
1253
+
1254
+ Example:
1255
+
1256
+ ```python
1257
+
1258
+ ```"""
1259
+
1260
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1261
+
1262
+ outputs = self.wav2vec2(
1263
+ input_values,
1264
+ attention_mask=attention_mask,
1265
+ output_attentions=output_attentions,
1266
+ output_hidden_states=output_hidden_states,
1267
+ mask_time_indices=mask_time_indices,
1268
+ deterministic=deterministic,
1269
+ freeze_feature_encoder=freeze_feature_encoder,
1270
+ return_dict=return_dict,
1271
+ )
1272
+
1273
+ # project all transformed features (including masked) to final vq dim
1274
+ transformer_features = self.project_hid(outputs[0])
1275
+
1276
+ # quantize all (unmasked) extracted features and project to final vq dim
1277
+ extract_features = self.dropout_features(outputs[1], deterministic=deterministic)
1278
+ quantized_features, codevector_perplexity = self.quantizer(
1279
+ extract_features, mask_time_indices, deterministic=deterministic, temperature=gumbel_temperature
1280
+ )
1281
+ quantized_features = self.project_q(quantized_features)
1282
+
1283
+ if not return_dict:
1284
+ return (transformer_features, quantized_features, codevector_perplexity) + outputs[2:]
1285
+
1286
+ return FlaxWav2Vec2ForPreTrainingOutput(
1287
+ projected_states=transformer_features,
1288
+ projected_quantized_states=quantized_features,
1289
+ codevector_perplexity=codevector_perplexity,
1290
+ hidden_states=outputs.hidden_states,
1291
+ attentions=outputs.attentions,
1292
+ )
1293
+
1294
+ def _get_feat_extract_output_lengths(
1295
+ self, input_lengths: Union[jnp.ndarray, int], add_adapter: Optional[bool] = None
1296
+ ):
1297
+ """
1298
+ Computes the output length of the convolutional layers
1299
+ """
1300
+
1301
+ add_adapter = self.config.add_adapter if add_adapter is None else add_adapter
1302
+
1303
+ def _conv_out_length(input_length, kernel_size, stride):
1304
+ # 1D convolutional layer output length formula taken
1305
+ # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
1306
+ return (input_length - kernel_size) // stride + 1
1307
+
1308
+ for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride):
1309
+ input_lengths = _conv_out_length(input_lengths, kernel_size, stride)
1310
+
1311
+ if add_adapter:
1312
+ for _ in range(self.config.num_adapter_layers):
1313
+ input_lengths = _conv_out_length(input_lengths, 1, self.config.adapter_stride)
1314
+
1315
+ return input_lengths
1316
+
1317
+
1318
+ @add_start_docstrings("""Wav2Vec2 Model with a quantizer and `VQ` head on top.""", WAV_2_VEC_2_START_DOCSTRING)
1319
+ class FlaxWav2Vec2ForPreTraining(FlaxWav2Vec2PreTrainedModel):
1320
+ module_class = FlaxWav2Vec2ForPreTrainingModule
1321
+
1322
+ @add_start_docstrings_to_model_forward(WAV_2_VEC_2_INPUTS_DOCSTRING)
1323
+ # overwrite since has `gumbel_temperature` input
1324
+ def __call__(
1325
+ self,
1326
+ input_values,
1327
+ attention_mask=None,
1328
+ mask_time_indices=None,
1329
+ gumbel_temperature: int = 1,
1330
+ params: dict = None,
1331
+ dropout_rng: jax.random.PRNGKey = None,
1332
+ gumbel_rng: jax.random.PRNGKey = None,
1333
+ train: bool = False,
1334
+ output_attentions: Optional[bool] = None,
1335
+ output_hidden_states: Optional[bool] = None,
1336
+ freeze_feature_encoder: bool = False,
1337
+ return_dict: Optional[bool] = None,
1338
+ ):
1339
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1340
+ output_hidden_states = (
1341
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1342
+ )
1343
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
1344
+
1345
+ batch_size, sequence_length = input_values.shape
1346
+
1347
+ if attention_mask is None:
1348
+ attention_mask = jnp.ones((batch_size, sequence_length))
1349
+
1350
+ # Handle any PRNG if needed
1351
+ rngs = {}
1352
+ if dropout_rng is not None:
1353
+ rngs["dropout"] = dropout_rng
1354
+
1355
+ if gumbel_rng is not None:
1356
+ rngs["gumbel"] = gumbel_rng
1357
+
1358
+ inputs = {"params": params or self.params}
1359
+
1360
+ return self.module.apply(
1361
+ inputs,
1362
+ jnp.array(input_values, dtype="f4"),
1363
+ jnp.array(attention_mask, dtype="i4"),
1364
+ mask_time_indices,
1365
+ gumbel_temperature,
1366
+ not train,
1367
+ output_attentions,
1368
+ output_hidden_states,
1369
+ freeze_feature_encoder,
1370
+ return_dict,
1371
+ rngs=rngs,
1372
+ )
1373
+
1374
+
1375
+ FLAX_WAV2VEC2_FOR_PRETRAINING_DOCSTRING = """
1376
+ Returns:
1377
+
1378
+ Example:
1379
+
1380
+ ```python
1381
+ >>> import optax
1382
+ >>> import numpy as np
1383
+ >>> import jax.numpy as jnp
1384
+ >>> from transformers import AutoFeatureExtractor, FlaxWav2Vec2ForPreTraining
1385
+ >>> from transformers.models.wav2vec2.modeling_flax_wav2vec2 import _compute_mask_indices
1386
+ >>> from datasets import load_dataset
1387
+ >>> import soundfile as sf
1388
+
1389
+ >>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-large-lv60")
1390
+ >>> model = FlaxWav2Vec2ForPreTraining.from_pretrained("facebook/wav2vec2-large-lv60")
1391
+
1392
+
1393
+ >>> def map_to_array(batch):
1394
+ ... speech, _ = sf.read(batch["file"])
1395
+ ... batch["speech"] = speech
1396
+ ... return batch
1397
+
1398
+
1399
+ >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
1400
+ >>> ds = ds.map(map_to_array)
1401
+
1402
+ >>> input_values = feature_extractor(ds["speech"][0], return_tensors="np").input_values # Batch size 1
1403
+
1404
+ >>> # compute masked indices
1405
+ >>> batch_size, raw_sequence_length = input_values.shape
1406
+ >>> sequence_length = model._get_feat_extract_output_lengths(raw_sequence_length)
1407
+ >>> mask_time_indices = _compute_mask_indices((batch_size, sequence_length), mask_prob=0.2, mask_length=2)
1408
+
1409
+ >>> outputs = model(input_values, mask_time_indices=mask_time_indices)
1410
+
1411
+ >>> # compute cosine similarity between predicted (=projected_states) and target (=projected_quantized_states)
1412
+ >>> cosine_sim = optax.cosine_similarity(outputs.projected_states, outputs.projected_quantized_states)
1413
+
1414
+ >>> # show that cosine similarity is much higher than random
1415
+ >>> assert np.asarray(cosine_sim)[mask_time_indices].mean() > 0.5
1416
+ ```
1417
+ """
1418
+
1419
+ overwrite_call_docstring(
1420
+ FlaxWav2Vec2ForPreTraining,
1421
+ WAV_2_VEC_2_INPUTS_DOCSTRING + FLAX_WAV2VEC2_FOR_PRETRAINING_DOCSTRING,
1422
+ )
1423
+ append_replace_return_docstrings(
1424
+ FlaxWav2Vec2ForPreTraining, output_type=FlaxWav2Vec2ForPreTrainingOutput, config_class=Wav2Vec2Config
1425
+ )
1426
+
1427
+
1428
+ __all__ = ["FlaxWav2Vec2ForCTC", "FlaxWav2Vec2ForPreTraining", "FlaxWav2Vec2Model", "FlaxWav2Vec2PreTrainedModel"]
.venv/lib/python3.11/site-packages/transformers/models/wav2vec2/modeling_tf_wav2vec2.py ADDED
@@ -0,0 +1,1858 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Fairseq Authors and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """TensorFlow Wav2Vec2 model."""
16
+
17
+ from __future__ import annotations
18
+
19
+ import warnings
20
+ from dataclasses import dataclass
21
+ from typing import Any, Optional, Tuple, Union
22
+
23
+ import numpy as np
24
+ import tensorflow as tf
25
+
26
+ from ...activations_tf import get_tf_activation
27
+ from ...modeling_tf_outputs import TFBaseModelOutput, TFCausalLMOutput, TFSequenceClassifierOutput
28
+ from ...modeling_tf_utils import (
29
+ TFPreTrainedModel,
30
+ get_initializer,
31
+ keras,
32
+ keras_serializable,
33
+ unpack_inputs,
34
+ )
35
+ from ...tf_utils import shape_list, stable_softmax
36
+ from ...utils import (
37
+ ModelOutput,
38
+ add_start_docstrings,
39
+ add_start_docstrings_to_model_forward,
40
+ logging,
41
+ replace_return_docstrings,
42
+ )
43
+ from .configuration_wav2vec2 import Wav2Vec2Config
44
+
45
+
46
+ logger = logging.get_logger(__name__)
47
+
48
+
49
+ _HIDDEN_STATES_START_POSITION = 2
50
+
51
+ _CHECKPOINT_FOR_DOC = "facebook/wav2vec2-base-960h"
52
+ _CONFIG_FOR_DOC = "Wav2Vec2Config"
53
+
54
+
55
+ LARGE_NEGATIVE = -1e8
56
+
57
+
58
+ @dataclass
59
+ class TFWav2Vec2BaseModelOutput(ModelOutput):
60
+ """
61
+ Output type of [`TFWav2Vec2BaseModelOutput`], with potential hidden states and attentions.
62
+
63
+ Args:
64
+ last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
65
+ Sequence of hidden-states at the output of the last layer of the model.
66
+ extract_features (`tf.Tensor` of shape `(batch_size, sequence_length, conv_dim[-1])`):
67
+ Sequence of extracted feature vectors of the last convolutional layer of the model.
68
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
69
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
70
+ `(batch_size, sequence_length, hidden_size)`.
71
+
72
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
73
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
74
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
75
+ sequence_length)`.
76
+
77
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
78
+ heads.
79
+ """
80
+
81
+ last_hidden_state: tf.Tensor = None
82
+ extract_features: tf.Tensor = None
83
+ hidden_states: Tuple[tf.Tensor] | None = None
84
+ attentions: Tuple[tf.Tensor] | None = None
85
+
86
+
87
+ def _sample_without_replacement(distribution, num_samples):
88
+ """
89
+ Categorical sampling without replacement is currently not implemented. The gumbel-max trick will do for now - see
90
+ https://github.com/tensorflow/tensorflow/issues/9260 for more info
91
+ """
92
+ z = -tf.math.log(tf.random.uniform(shape_list(distribution), 0, 1))
93
+ _, indices = tf.nn.top_k(distribution + z, num_samples)
94
+ return indices
95
+
96
+
97
+ def _scatter_values_on_batch_indices(values, batch_indices, output_shape):
98
+ """
99
+ Scatter function as in PyTorch with indices in format (batch_dim, indixes)
100
+ """
101
+ indices_shape = shape_list(batch_indices)
102
+ # broadcast batch dim to indices_shape
103
+ broad_casted_batch_dims = tf.reshape(
104
+ tf.broadcast_to(tf.expand_dims(tf.range(indices_shape[0]), axis=-1), indices_shape), [1, -1]
105
+ )
106
+ # transform batch_indices to pair_indices
107
+ pair_indices = tf.transpose(tf.concat([broad_casted_batch_dims, tf.reshape(batch_indices, [1, -1])], 0))
108
+ # scatter values to pair indices
109
+ return tf.scatter_nd(pair_indices, tf.reshape(values, [-1]), output_shape)
110
+
111
+
112
+ def _compute_mask_indices(
113
+ shape: Tuple[int, int],
114
+ mask_prob: float,
115
+ mask_length: int,
116
+ min_masks: int = 0,
117
+ ) -> tf.Tensor:
118
+ """
119
+ Computes random mask spans for a given shape
120
+
121
+ Args:
122
+ shape: the shape for which to compute masks.
123
+ should be of size 2 where first element is batch size and 2nd is timesteps
124
+ attention_mask: optional padding mask of the same size as shape, which will prevent masking padded elements
125
+ mask_prob:
126
+ probability for each token to be chosen as start of the span to be masked. this will be multiplied by
127
+ number of timesteps divided by length of mask span to mask approximately this percentage of all elements.
128
+ however due to overlaps, the actual number will be smaller (unless no_overlap is True)
129
+ mask_length: size of the mask
130
+ min_masks: minimum number of masked spans
131
+
132
+ Adapted from [fairseq's
133
+ data_utils.py](https://github.com/pytorch/fairseq/blob/e0788f7007a8473a76db573985031f3c94201e79/fairseq/data/data_utils.py#L376).
134
+ """
135
+ batch_size, sequence_length = shape
136
+
137
+ if mask_length < 1:
138
+ raise ValueError("`mask_length` has to be bigger than 0.")
139
+
140
+ tf.debugging.assert_less(
141
+ mask_length,
142
+ sequence_length,
143
+ message=(
144
+ f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and"
145
+ f" `sequence_length`: {sequence_length}`"
146
+ ),
147
+ )
148
+
149
+ # compute number of masked spans in batch
150
+ num_masked_spans = mask_prob * tf.cast(sequence_length, tf.float32) / mask_length + tf.random.uniform((1,))
151
+ num_masked_spans = tf.maximum(num_masked_spans, min_masks)
152
+ num_masked_spans = tf.cast(num_masked_spans, tf.int32)
153
+
154
+ # make sure num masked indices <= sequence_length
155
+ num_masked_spans = tf.math.minimum(sequence_length // mask_length, num_masked_spans)
156
+ num_masked_spans = tf.squeeze(num_masked_spans)
157
+
158
+ # SpecAugment mask to fill
159
+ spec_aug_mask = tf.zeros((batch_size, sequence_length), dtype=tf.int32)
160
+
161
+ # uniform distribution to sample from, make sure that offset samples are < sequence_length
162
+ uniform_dist = tf.ones((batch_size, sequence_length - (mask_length - 1)))
163
+
164
+ # get random indices to mask
165
+ spec_aug_mask_idxs = _sample_without_replacement(uniform_dist, num_masked_spans)
166
+
167
+ # expand masked indices to masked spans
168
+ spec_aug_mask_idxs = tf.expand_dims(spec_aug_mask_idxs, -1)
169
+ spec_aug_mask_idxs = tf.tile(spec_aug_mask_idxs, (1, 1, mask_length))
170
+ spec_aug_mask_idxs = tf.reshape(spec_aug_mask_idxs, (batch_size, num_masked_spans * mask_length))
171
+
172
+ offsets = tf.range(mask_length)[tf.newaxis, tf.newaxis, :]
173
+ offsets = tf.tile(offsets, (batch_size, num_masked_spans, 1))
174
+ offsets = tf.reshape(offsets, (batch_size, num_masked_spans * mask_length))
175
+
176
+ spec_aug_mask_idxs = spec_aug_mask_idxs + offsets
177
+
178
+ # scatter indices to mask
179
+ spec_aug_mask = _scatter_values_on_batch_indices(
180
+ tf.ones_like(spec_aug_mask_idxs), spec_aug_mask_idxs, tf.shape(spec_aug_mask)
181
+ )
182
+
183
+ return spec_aug_mask
184
+
185
+
186
+ # Copied from transformers.models.bart.modeling_tf_bart._expand_mask
187
+ def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None):
188
+ """
189
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
190
+ """
191
+ src_len = shape_list(mask)[1]
192
+ tgt_len = tgt_len if tgt_len is not None else src_len
193
+ one_cst = tf.constant(1.0)
194
+ mask = tf.cast(mask, dtype=one_cst.dtype)
195
+ expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1))
196
+
197
+ return (one_cst - expanded_mask) * LARGE_NEGATIVE
198
+
199
+
200
+ class TFWav2Vec2GroupNorm(keras.layers.Layer):
201
+ """
202
+ From tensorflow-addons https://www.tensorflow.org/addons/api_docs/python/tfa/layers/GroupNormalization
203
+ """
204
+
205
+ def __init__(
206
+ self,
207
+ groups: int = 32,
208
+ axis: int = -1,
209
+ epsilon: float = 1e-3,
210
+ center: bool = True,
211
+ scale: bool = True,
212
+ beta_initializer: keras.initializers.Initializer = "zeros",
213
+ gamma_initializer: keras.initializers.Initializer = "ones",
214
+ beta_regularizer: keras.regularizers.Regularizer = None,
215
+ gamma_regularizer: keras.regularizers.Regularizer = None,
216
+ beta_constraint: keras.constraints.Constraint = None,
217
+ gamma_constraint: keras.constraints.Constraint = None,
218
+ **kwargs,
219
+ ):
220
+ super().__init__(**kwargs)
221
+ self.supports_masking = True
222
+ self.groups = groups
223
+ self.axis = axis
224
+ self.epsilon = epsilon
225
+ self.center = center
226
+ self.scale = scale
227
+ self.beta_initializer = keras.initializers.get(beta_initializer)
228
+ self.gamma_initializer = keras.initializers.get(gamma_initializer)
229
+ self.beta_regularizer = keras.regularizers.get(beta_regularizer)
230
+ self.gamma_regularizer = keras.regularizers.get(gamma_regularizer)
231
+ self.beta_constraint = keras.constraints.get(beta_constraint)
232
+ self.gamma_constraint = keras.constraints.get(gamma_constraint)
233
+ self._check_axis()
234
+
235
+ def build(self, input_shape):
236
+ self._check_if_input_shape_is_none(input_shape)
237
+ self._set_number_of_groups_for_instance_norm(input_shape)
238
+ self._check_size_of_dimensions(input_shape)
239
+ self._create_input_spec(input_shape)
240
+
241
+ self._add_gamma_weight(input_shape)
242
+ self._add_beta_weight(input_shape)
243
+ self.built = True
244
+ super().build(input_shape)
245
+
246
+ def call(self, inputs):
247
+ input_shape = keras.backend.int_shape(inputs)
248
+ tensor_input_shape = tf.shape(inputs)
249
+
250
+ reshaped_inputs, group_shape = self._reshape_into_groups(inputs, input_shape, tensor_input_shape)
251
+
252
+ normalized_inputs = self._apply_normalization(reshaped_inputs, input_shape)
253
+
254
+ is_instance_norm = (input_shape[self.axis] // self.groups) == 1
255
+ if not is_instance_norm:
256
+ outputs = tf.reshape(normalized_inputs, tensor_input_shape)
257
+ else:
258
+ outputs = normalized_inputs
259
+
260
+ return outputs
261
+
262
+ def get_config(self):
263
+ config = {
264
+ "groups": self.groups,
265
+ "axis": self.axis,
266
+ "epsilon": self.epsilon,
267
+ "center": self.center,
268
+ "scale": self.scale,
269
+ "beta_initializer": keras.initializers.serialize(self.beta_initializer),
270
+ "gamma_initializer": keras.initializers.serialize(self.gamma_initializer),
271
+ "beta_regularizer": keras.regularizers.serialize(self.beta_regularizer),
272
+ "gamma_regularizer": keras.regularizers.serialize(self.gamma_regularizer),
273
+ "beta_constraint": keras.constraints.serialize(self.beta_constraint),
274
+ "gamma_constraint": keras.constraints.serialize(self.gamma_constraint),
275
+ }
276
+ base_config = super().get_config()
277
+ return {**base_config, **config}
278
+
279
+ def compute_output_shape(self, input_shape):
280
+ return input_shape
281
+
282
+ def _reshape_into_groups(self, inputs, input_shape, tensor_input_shape):
283
+ group_shape = [tensor_input_shape[i] for i in range(len(input_shape))]
284
+ is_instance_norm = (input_shape[self.axis] // self.groups) == 1
285
+ if not is_instance_norm:
286
+ group_shape[self.axis] = input_shape[self.axis] // self.groups
287
+ group_shape.insert(self.axis, self.groups)
288
+ group_shape = tf.stack(group_shape)
289
+ reshaped_inputs = tf.reshape(inputs, group_shape)
290
+ return reshaped_inputs, group_shape
291
+ else:
292
+ return inputs, group_shape
293
+
294
+ def _apply_normalization(self, reshaped_inputs, input_shape):
295
+ group_shape = keras.backend.int_shape(reshaped_inputs)
296
+ group_reduction_axes = list(range(1, len(group_shape)))
297
+ is_instance_norm = (input_shape[self.axis] // self.groups) == 1
298
+ if not is_instance_norm:
299
+ axis = -2 if self.axis == -1 else self.axis - 1
300
+ else:
301
+ axis = -1 if self.axis == -1 else self.axis - 1
302
+ group_reduction_axes.pop(axis)
303
+
304
+ mean, variance = tf.nn.moments(reshaped_inputs, group_reduction_axes, keepdims=True)
305
+
306
+ gamma, beta = self._get_reshaped_weights(input_shape)
307
+ normalized_inputs = tf.nn.batch_normalization(
308
+ reshaped_inputs,
309
+ mean=mean,
310
+ variance=variance,
311
+ scale=gamma,
312
+ offset=beta,
313
+ variance_epsilon=self.epsilon,
314
+ )
315
+ return normalized_inputs
316
+
317
+ def _get_reshaped_weights(self, input_shape):
318
+ broadcast_shape = self._create_broadcast_shape(input_shape)
319
+ gamma = None
320
+ beta = None
321
+ if self.scale:
322
+ gamma = tf.reshape(self.gamma, broadcast_shape)
323
+
324
+ if self.center:
325
+ beta = tf.reshape(self.beta, broadcast_shape)
326
+ return gamma, beta
327
+
328
+ def _check_if_input_shape_is_none(self, input_shape):
329
+ dim = input_shape[self.axis]
330
+ if dim is None:
331
+ raise ValueError(
332
+ "Axis "
333
+ + str(self.axis)
334
+ + " of input tensor should have a defined dimension but the layer received an input with shape "
335
+ + str(input_shape)
336
+ + "."
337
+ )
338
+
339
+ def _set_number_of_groups_for_instance_norm(self, input_shape):
340
+ dim = input_shape[self.axis]
341
+
342
+ if self.groups == -1:
343
+ self.groups = dim
344
+
345
+ def _check_size_of_dimensions(self, input_shape):
346
+ dim = input_shape[self.axis]
347
+ if dim < self.groups:
348
+ raise ValueError(
349
+ "Number of groups ("
350
+ + str(self.groups)
351
+ + ") cannot be more than the number of channels ("
352
+ + str(dim)
353
+ + ")."
354
+ )
355
+
356
+ if dim % self.groups != 0:
357
+ raise ValueError(
358
+ "Number of groups ("
359
+ + str(self.groups)
360
+ + ") must be a multiple of the number of channels ("
361
+ + str(dim)
362
+ + ")."
363
+ )
364
+
365
+ def _check_axis(self):
366
+ if self.axis == 0:
367
+ raise ValueError(
368
+ "You are trying to normalize your batch axis. Do you want to use tf.layer.batch_normalization instead"
369
+ )
370
+
371
+ def _create_input_spec(self, input_shape):
372
+ dim = input_shape[self.axis]
373
+ self.input_spec = keras.layers.InputSpec(ndim=len(input_shape), axes={self.axis: dim})
374
+
375
+ def _add_gamma_weight(self, input_shape):
376
+ dim = input_shape[self.axis]
377
+ shape = (dim,)
378
+
379
+ if self.scale:
380
+ self.gamma = self.add_weight(
381
+ shape=shape,
382
+ name="gamma",
383
+ initializer=self.gamma_initializer,
384
+ regularizer=self.gamma_regularizer,
385
+ constraint=self.gamma_constraint,
386
+ )
387
+ else:
388
+ self.gamma = None
389
+
390
+ def _add_beta_weight(self, input_shape):
391
+ dim = input_shape[self.axis]
392
+ shape = (dim,)
393
+
394
+ if self.center:
395
+ self.beta = self.add_weight(
396
+ shape=shape,
397
+ name="beta",
398
+ initializer=self.beta_initializer,
399
+ regularizer=self.beta_regularizer,
400
+ constraint=self.beta_constraint,
401
+ )
402
+ else:
403
+ self.beta = None
404
+
405
+ def _create_broadcast_shape(self, input_shape):
406
+ broadcast_shape = [1] * len(input_shape)
407
+ is_instance_norm = (input_shape[self.axis] // self.groups) == 1
408
+ if not is_instance_norm:
409
+ broadcast_shape[self.axis] = input_shape[self.axis] // self.groups
410
+ broadcast_shape.insert(self.axis, self.groups)
411
+ else:
412
+ broadcast_shape[self.axis] = self.groups
413
+ return broadcast_shape
414
+
415
+
416
+ class TFWav2Vec2WeightNormConv1D(keras.layers.Conv1D):
417
+ """Adapted from https://www.tensorflow.org/probability/api_docs/python/tfp/layers/weight_norm/WeightNorm"""
418
+
419
+ def __init__(self, filters, kernel_size, groups, explicit_padding, **kwargs):
420
+ super().__init__(
421
+ filters=filters,
422
+ kernel_size=kernel_size,
423
+ groups=groups,
424
+ padding="valid",
425
+ use_bias=True,
426
+ bias_initializer="he_normal",
427
+ **kwargs,
428
+ )
429
+ self.explicit_padding = explicit_padding
430
+ self.filter_axis = 2
431
+ self.kernel_norm_axes = tf.constant([0, 1])
432
+
433
+ def _init_norm(self):
434
+ """Set the norm of the weight vector."""
435
+ kernel_norm = tf.sqrt(tf.reduce_sum(tf.square(self.weight_v), axis=self.kernel_norm_axes))
436
+ self.weight_g.assign(kernel_norm[:, tf.newaxis, tf.newaxis])
437
+
438
+ def _normalize_kernel(self):
439
+ """Generate normalized weights."""
440
+ kernel = tf.nn.l2_normalize(self.weight_v, axis=self.kernel_norm_axes) * tf.transpose(self.weight_g)
441
+ self.kernel = tf.transpose(kernel)
442
+
443
+ def build(self, input_shape):
444
+ if not self.built:
445
+ super().build(input_shape)
446
+
447
+ self.kernel = tf.Variable(tf.transpose(self.kernel), name="weight_v", trainable=True)
448
+ self.weight_v = self.kernel
449
+
450
+ self.weight_g = self.add_weight(
451
+ name="weight_g",
452
+ shape=(int(self.weight_v.shape[self.filter_axis]), 1, 1),
453
+ initializer="ones",
454
+ dtype=self.weight_v.dtype,
455
+ trainable=True,
456
+ )
457
+ self._init_norm()
458
+ self.bias = self.add_weight(name="bias", shape=(self.filters,), initializer="zeros", trainable=True)
459
+
460
+ def call(self, inputs):
461
+ # TODO Matt: Assigning to attributes in call() is deeply sinful in TensorFlow, as it should be idempotent.
462
+ # This whole layer should be replaced by a layer that doesn't inherit from Conv1D, but instead calls
463
+ # a functional 1d convolution with normalized weights that it generates (but does not store!)
464
+ self._normalize_kernel()
465
+
466
+ padded_inputs = tf.pad(inputs, ((0, 0), (self.explicit_padding, self.explicit_padding), (0, 0)))
467
+ output = super().call(padded_inputs)
468
+
469
+ return output
470
+
471
+
472
+ class TFWav2Vec2NoLayerNormConvLayer(keras.layers.Layer):
473
+ def __init__(self, config: Wav2Vec2Config, layer_id: int = 0, **kwargs: Any) -> None:
474
+ super().__init__(**kwargs)
475
+ self.in_conv_dim = config.conv_dim[layer_id] if layer_id > 0 else 1
476
+ self.out_conv_dim = config.conv_dim[layer_id]
477
+
478
+ self.conv = keras.layers.Conv1D(
479
+ filters=self.out_conv_dim,
480
+ kernel_size=config.conv_kernel[layer_id],
481
+ strides=config.conv_stride[layer_id],
482
+ use_bias=config.conv_bias,
483
+ name="conv",
484
+ )
485
+ self.activation = get_tf_activation(config.feat_extract_activation)
486
+
487
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
488
+ hidden_states = self.conv(hidden_states)
489
+ hidden_states = self.activation(hidden_states)
490
+ return hidden_states
491
+
492
+ def build(self, input_shape=None):
493
+ if self.built:
494
+ return
495
+ self.built = True
496
+ if getattr(self, "conv", None) is not None:
497
+ with tf.name_scope(self.conv.name):
498
+ self.conv.build([None, None, self.in_conv_dim])
499
+
500
+
501
+ class TFWav2Vec2LayerNormConvLayer(keras.layers.Layer):
502
+ def __init__(self, config: Wav2Vec2Config, layer_id: int = 0, **kwargs: Any) -> None:
503
+ super().__init__(**kwargs)
504
+ self.in_conv_dim = config.conv_dim[layer_id] if layer_id > 0 else 1
505
+ self.out_conv_dim = config.conv_dim[layer_id]
506
+
507
+ self.conv = keras.layers.Conv1D(
508
+ filters=self.out_conv_dim,
509
+ kernel_size=config.conv_kernel[layer_id],
510
+ strides=config.conv_stride[layer_id],
511
+ use_bias=config.conv_bias,
512
+ name="conv",
513
+ )
514
+ self.layer_norm = keras.layers.LayerNormalization(name="layer_norm", epsilon=config.layer_norm_eps)
515
+ self.activation = get_tf_activation(config.feat_extract_activation)
516
+
517
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
518
+ hidden_states = self.conv(hidden_states)
519
+ hidden_states = self.layer_norm(hidden_states)
520
+ hidden_states = self.activation(hidden_states)
521
+ return hidden_states
522
+
523
+ def build(self, input_shape=None):
524
+ if self.built:
525
+ return
526
+ self.built = True
527
+ if getattr(self, "conv", None) is not None:
528
+ with tf.name_scope(self.conv.name):
529
+ self.conv.build([None, None, self.in_conv_dim])
530
+ if getattr(self, "layer_norm", None) is not None:
531
+ with tf.name_scope(self.layer_norm.name):
532
+ self.layer_norm.build([None, None, self.out_conv_dim])
533
+
534
+
535
+ class TFWav2Vec2GroupNormConvLayer(keras.layers.Layer):
536
+ def __init__(self, config: Wav2Vec2Config, layer_id: int = 0, **kwargs: Any) -> None:
537
+ super().__init__(**kwargs)
538
+ self.in_conv_dim = config.conv_dim[layer_id] if layer_id > 0 else 1
539
+ self.out_conv_dim = config.conv_dim[layer_id]
540
+
541
+ self.conv = keras.layers.Conv1D(
542
+ filters=self.out_conv_dim,
543
+ kernel_size=config.conv_kernel[layer_id],
544
+ strides=config.conv_stride[layer_id],
545
+ use_bias=config.conv_bias,
546
+ name="conv",
547
+ )
548
+ self.activation = get_tf_activation(config.feat_extract_activation)
549
+ self.layer_norm = TFWav2Vec2GroupNorm(
550
+ groups=self.out_conv_dim, epsilon=config.layer_norm_eps, name="layer_norm"
551
+ )
552
+
553
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
554
+ hidden_states = self.conv(hidden_states)
555
+ hidden_states = self.layer_norm(hidden_states)
556
+ hidden_states = self.activation(hidden_states)
557
+ return hidden_states
558
+
559
+ def build(self, input_shape=None):
560
+ if self.built:
561
+ return
562
+ self.built = True
563
+ if getattr(self, "conv", None) is not None:
564
+ with tf.name_scope(self.conv.name):
565
+ self.conv.build([None, None, self.in_conv_dim])
566
+ if getattr(self, "layer_norm", None) is not None:
567
+ with tf.name_scope(self.layer_norm.name):
568
+ self.layer_norm.build([None, None, self.out_conv_dim])
569
+
570
+
571
+ class TFWav2Vec2PositionalConvEmbedding(keras.layers.Layer):
572
+ def __init__(self, config: Wav2Vec2Config, **kwargs: Any) -> None:
573
+ super().__init__(**kwargs)
574
+ self.conv = TFWav2Vec2WeightNormConv1D(
575
+ filters=config.hidden_size,
576
+ kernel_size=config.num_conv_pos_embeddings,
577
+ groups=config.num_conv_pos_embedding_groups,
578
+ explicit_padding=config.num_conv_pos_embeddings // 2,
579
+ name="conv",
580
+ )
581
+ self.padding = TFWav2Vec2SamePadLayer(config.num_conv_pos_embeddings)
582
+ self.activation = get_tf_activation(config.feat_extract_activation)
583
+ self.config = config
584
+
585
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
586
+ hidden_states = self.conv(hidden_states)
587
+ hidden_states = self.padding(hidden_states)
588
+ hidden_states = self.activation(hidden_states)
589
+ return hidden_states
590
+
591
+ def build(self, input_shape=None):
592
+ if self.built:
593
+ return
594
+ self.built = True
595
+ if getattr(self, "conv", None) is not None:
596
+ with tf.name_scope(self.conv.name):
597
+ self.conv.build([None, None, self.config.hidden_size])
598
+
599
+
600
+ class TFWav2Vec2SamePadLayer(keras.layers.Layer):
601
+ def __init__(self, num_conv_pos_embeddings, **kwargs):
602
+ super().__init__(**kwargs)
603
+ self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0
604
+
605
+ def call(self, hidden_states):
606
+ if self.num_pad_remove > 0:
607
+ hidden_states = hidden_states[:, : -self.num_pad_remove, :]
608
+ return hidden_states
609
+
610
+
611
+ class TFWav2Vec2FeatureEncoder(keras.layers.Layer):
612
+ def __init__(self, config: Wav2Vec2Config, **kwargs: Any) -> None:
613
+ super().__init__(**kwargs)
614
+
615
+ if config.feat_extract_norm == "group":
616
+ conv_layers = [TFWav2Vec2GroupNormConvLayer(config, layer_id=0, name=f"conv_layers.{0}")] + [
617
+ TFWav2Vec2NoLayerNormConvLayer(config, layer_id=i + 1, name=f"conv_layers.{i+1}")
618
+ for i in range(config.num_feat_extract_layers - 1)
619
+ ]
620
+ elif config.feat_extract_norm == "layer":
621
+ conv_layers = [
622
+ TFWav2Vec2LayerNormConvLayer(config, layer_id=i, name=f"conv_layers.{i}")
623
+ for i in range(config.num_feat_extract_layers)
624
+ ]
625
+ else:
626
+ raise ValueError(
627
+ f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']"
628
+ )
629
+ self.conv_layers = conv_layers
630
+
631
+ def call(self, input_values):
632
+ hidden_states = tf.expand_dims(input_values, -1)
633
+ for conv_layer in self.conv_layers:
634
+ hidden_states = conv_layer(hidden_states)
635
+ return hidden_states
636
+
637
+ def build(self, input_shape=None):
638
+ if self.built:
639
+ return
640
+ self.built = True
641
+ if getattr(self, "conv_layers", None) is not None:
642
+ for conv_layer in self.conv_layers:
643
+ with tf.name_scope(conv_layer.name):
644
+ conv_layer.build(None)
645
+
646
+
647
+ class TFWav2Vec2FeatureExtractor(TFWav2Vec2FeatureEncoder):
648
+ def __init__(self, config, **kwargs):
649
+ super().__init__(config, **kwargs)
650
+ warnings.warn(
651
+ f"The class `{self.__class__.__name__}` has been depreciated "
652
+ "and will be removed in Transformers v5. "
653
+ f"Use `{self.__class__.__bases__[0].__name__}` instead.",
654
+ FutureWarning,
655
+ )
656
+
657
+
658
+ class TFWav2Vec2FeatureProjection(keras.layers.Layer):
659
+ def __init__(self, config: Wav2Vec2Config, **kwargs):
660
+ super().__init__(**kwargs)
661
+
662
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
663
+ self.projection = keras.layers.Dense(
664
+ units=config.hidden_size,
665
+ kernel_initializer=get_initializer(config.initializer_range),
666
+ bias_initializer="zeros",
667
+ name="projection",
668
+ )
669
+ self.dropout = keras.layers.Dropout(rate=config.feat_proj_dropout)
670
+ self.config = config
671
+
672
+ def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor:
673
+ norm_hidden_states = self.layer_norm(hidden_states)
674
+ hidden_states = self.projection(norm_hidden_states)
675
+ hidden_states = self.dropout(hidden_states, training=training)
676
+ return hidden_states, norm_hidden_states
677
+
678
+ def build(self, input_shape=None):
679
+ if self.built:
680
+ return
681
+ self.built = True
682
+ if getattr(self, "layer_norm", None) is not None:
683
+ with tf.name_scope(self.layer_norm.name):
684
+ self.layer_norm.build([None, None, self.config.conv_dim[-1]])
685
+ if getattr(self, "projection", None) is not None:
686
+ with tf.name_scope(self.projection.name):
687
+ self.projection.build([None, None, self.config.conv_dim[-1]])
688
+
689
+
690
+ # Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention with TFBart->TFWav2Vec2
691
+ class TFWav2Vec2Attention(keras.layers.Layer):
692
+ """Multi-headed attention from "Attention Is All You Need"""
693
+
694
+ def __init__(
695
+ self,
696
+ embed_dim: int,
697
+ num_heads: int,
698
+ dropout: float = 0.0,
699
+ is_decoder: bool = False,
700
+ bias: bool = True,
701
+ **kwargs,
702
+ ):
703
+ super().__init__(**kwargs)
704
+ self.embed_dim = embed_dim
705
+
706
+ self.num_heads = num_heads
707
+ self.dropout = keras.layers.Dropout(dropout)
708
+ self.head_dim = embed_dim // num_heads
709
+ if (self.head_dim * num_heads) != self.embed_dim:
710
+ raise ValueError(
711
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
712
+ f" and `num_heads`: {num_heads})."
713
+ )
714
+ self.scaling = self.head_dim**-0.5
715
+ self.is_decoder = is_decoder
716
+
717
+ self.k_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj")
718
+ self.q_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj")
719
+ self.v_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj")
720
+ self.out_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj")
721
+
722
+ def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int):
723
+ return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3))
724
+
725
+ def call(
726
+ self,
727
+ hidden_states: tf.Tensor,
728
+ key_value_states: tf.Tensor | None = None,
729
+ past_key_value: Tuple[Tuple[tf.Tensor]] | None = None,
730
+ attention_mask: tf.Tensor | None = None,
731
+ layer_head_mask: tf.Tensor | None = None,
732
+ training: Optional[bool] = False,
733
+ ) -> Tuple[tf.Tensor, tf.Tensor | None]:
734
+ """Input shape: Batch x Time x Channel"""
735
+
736
+ # if key_value_states are provided this layer is used as a cross-attention layer
737
+ # for the decoder
738
+ is_cross_attention = key_value_states is not None
739
+ bsz, tgt_len, embed_dim = shape_list(hidden_states)
740
+
741
+ # get query proj
742
+ query_states = self.q_proj(hidden_states) * self.scaling
743
+ # get key, value proj
744
+ if is_cross_attention and past_key_value is not None:
745
+ # reuse k,v, cross_attentions
746
+ key_states = past_key_value[0]
747
+ value_states = past_key_value[1]
748
+ elif is_cross_attention:
749
+ # cross_attentions
750
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
751
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
752
+ elif past_key_value is not None:
753
+ # reuse k, v, self_attention
754
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
755
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
756
+ key_states = tf.concat([past_key_value[0], key_states], axis=2)
757
+ value_states = tf.concat([past_key_value[1], value_states], axis=2)
758
+ else:
759
+ # self_attention
760
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
761
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
762
+
763
+ if self.is_decoder:
764
+ # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
765
+ # Further calls to cross_attention layer can then reuse all cross-attention
766
+ # key/value_states (first "if" case)
767
+ # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
768
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
769
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
770
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
771
+ past_key_value = (key_states, value_states)
772
+
773
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
774
+ query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape)
775
+ key_states = tf.reshape(key_states, proj_shape)
776
+ value_states = tf.reshape(value_states, proj_shape)
777
+
778
+ src_len = shape_list(key_states)[1]
779
+ attn_weights = tf.matmul(query_states, key_states, transpose_b=True)
780
+
781
+ tf.debugging.assert_equal(
782
+ shape_list(attn_weights),
783
+ [bsz * self.num_heads, tgt_len, src_len],
784
+ message=(
785
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
786
+ f" {shape_list(attn_weights)}"
787
+ ),
788
+ )
789
+
790
+ if attention_mask is not None:
791
+ tf.debugging.assert_equal(
792
+ shape_list(attention_mask),
793
+ [bsz, 1, tgt_len, src_len],
794
+ message=(
795
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
796
+ f" {shape_list(attention_mask)}"
797
+ ),
798
+ )
799
+
800
+ attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype)
801
+ attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask
802
+ attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
803
+
804
+ attn_weights = stable_softmax(attn_weights, axis=-1)
805
+
806
+ if layer_head_mask is not None:
807
+ tf.debugging.assert_equal(
808
+ shape_list(layer_head_mask),
809
+ [self.num_heads],
810
+ message=(
811
+ f"Head mask for a single layer should be of size {(self.num_heads)}, but is"
812
+ f" {shape_list(layer_head_mask)}"
813
+ ),
814
+ )
815
+
816
+ attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape(
817
+ attn_weights, (bsz, self.num_heads, tgt_len, src_len)
818
+ )
819
+ attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
820
+
821
+ attn_probs = self.dropout(attn_weights, training=training)
822
+ attn_output = tf.matmul(attn_probs, value_states)
823
+
824
+ tf.debugging.assert_equal(
825
+ shape_list(attn_output),
826
+ [bsz * self.num_heads, tgt_len, self.head_dim],
827
+ message=(
828
+ f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
829
+ f" {shape_list(attn_output)}"
830
+ ),
831
+ )
832
+
833
+ attn_output = tf.transpose(
834
+ tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3)
835
+ )
836
+ attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim))
837
+
838
+ attn_output = self.out_proj(attn_output)
839
+ attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len))
840
+
841
+ return attn_output, attn_weights, past_key_value
842
+
843
+ def build(self, input_shape=None):
844
+ if self.built:
845
+ return
846
+ self.built = True
847
+ if getattr(self, "k_proj", None) is not None:
848
+ with tf.name_scope(self.k_proj.name):
849
+ self.k_proj.build([None, None, self.embed_dim])
850
+ if getattr(self, "q_proj", None) is not None:
851
+ with tf.name_scope(self.q_proj.name):
852
+ self.q_proj.build([None, None, self.embed_dim])
853
+ if getattr(self, "v_proj", None) is not None:
854
+ with tf.name_scope(self.v_proj.name):
855
+ self.v_proj.build([None, None, self.embed_dim])
856
+ if getattr(self, "out_proj", None) is not None:
857
+ with tf.name_scope(self.out_proj.name):
858
+ self.out_proj.build([None, None, self.embed_dim])
859
+
860
+
861
+ class TFWav2Vec2FeedForward(keras.layers.Layer):
862
+ def __init__(self, config: Wav2Vec2Config, **kwargs):
863
+ super().__init__(**kwargs)
864
+
865
+ self.intermediate_dropout = keras.layers.Dropout(config.activation_dropout)
866
+
867
+ self.intermediate_dense = keras.layers.Dense(
868
+ units=config.intermediate_size,
869
+ kernel_initializer=get_initializer(config.initializer_range),
870
+ bias_initializer="zeros",
871
+ name="intermediate_dense",
872
+ )
873
+ self.intermediate_act_fn = get_tf_activation(config.hidden_act)
874
+
875
+ self.output_dense = keras.layers.Dense(
876
+ units=config.hidden_size,
877
+ kernel_initializer=get_initializer(config.initializer_range),
878
+ bias_initializer="zeros",
879
+ name="output_dense",
880
+ )
881
+ self.output_dropout = keras.layers.Dropout(config.hidden_dropout)
882
+ self.config = config
883
+
884
+ def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor:
885
+ hidden_states = self.intermediate_dense(hidden_states)
886
+ hidden_states = self.intermediate_act_fn(hidden_states)
887
+ hidden_states = self.intermediate_dropout(hidden_states, training=training)
888
+
889
+ hidden_states = self.output_dense(hidden_states)
890
+ hidden_states = self.output_dropout(hidden_states, training=training)
891
+ return hidden_states
892
+
893
+ def build(self, input_shape=None):
894
+ if self.built:
895
+ return
896
+ self.built = True
897
+ if getattr(self, "intermediate_dense", None) is not None:
898
+ with tf.name_scope(self.intermediate_dense.name):
899
+ self.intermediate_dense.build([None, None, self.config.hidden_size])
900
+ if getattr(self, "output_dense", None) is not None:
901
+ with tf.name_scope(self.output_dense.name):
902
+ self.output_dense.build([None, None, self.config.intermediate_size])
903
+
904
+
905
+ class TFWav2Vec2EncoderLayer(keras.layers.Layer):
906
+ def __init__(self, config: Wav2Vec2Config, **kwargs):
907
+ super().__init__(**kwargs)
908
+ self.attention = TFWav2Vec2Attention(
909
+ embed_dim=config.hidden_size,
910
+ num_heads=config.num_attention_heads,
911
+ dropout=config.attention_dropout,
912
+ is_decoder=False,
913
+ name="attention",
914
+ )
915
+ self.dropout = keras.layers.Dropout(config.hidden_dropout)
916
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
917
+ self.feed_forward = TFWav2Vec2FeedForward(config, name="feed_forward")
918
+ self.final_layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="final_layer_norm")
919
+ self.config = config
920
+
921
+ def call(
922
+ self,
923
+ hidden_states: tf.Tensor,
924
+ attention_mask: tf.Tensor | None = None,
925
+ output_attentions: Optional[bool] = False,
926
+ training: bool = False,
927
+ ) -> Tuple[tf.Tensor]:
928
+ attn_residual = hidden_states
929
+ hidden_states, attn_weights, _ = self.attention(
930
+ hidden_states, attention_mask=attention_mask, training=training
931
+ )
932
+ hidden_states = self.dropout(hidden_states, training=training)
933
+ hidden_states = attn_residual + hidden_states
934
+
935
+ hidden_states = self.layer_norm(hidden_states)
936
+ hidden_states = hidden_states + self.feed_forward(hidden_states)
937
+ hidden_states = self.final_layer_norm(hidden_states)
938
+
939
+ outputs = (hidden_states,)
940
+
941
+ if output_attentions:
942
+ outputs += (attn_weights,)
943
+
944
+ return outputs
945
+
946
+ def build(self, input_shape=None):
947
+ if self.built:
948
+ return
949
+ self.built = True
950
+ if getattr(self, "attention", None) is not None:
951
+ with tf.name_scope(self.attention.name):
952
+ self.attention.build(None)
953
+ if getattr(self, "layer_norm", None) is not None:
954
+ with tf.name_scope(self.layer_norm.name):
955
+ self.layer_norm.build([None, None, self.config.hidden_size])
956
+ if getattr(self, "feed_forward", None) is not None:
957
+ with tf.name_scope(self.feed_forward.name):
958
+ self.feed_forward.build(None)
959
+ if getattr(self, "final_layer_norm", None) is not None:
960
+ with tf.name_scope(self.final_layer_norm.name):
961
+ self.final_layer_norm.build([None, None, self.config.hidden_size])
962
+
963
+
964
+ class TFWav2Vec2EncoderLayerStableLayerNorm(keras.layers.Layer):
965
+ def __init__(self, config: Wav2Vec2Config, **kwargs):
966
+ super().__init__(**kwargs)
967
+ self.attention = TFWav2Vec2Attention(
968
+ embed_dim=config.hidden_size,
969
+ num_heads=config.num_attention_heads,
970
+ dropout=config.attention_dropout,
971
+ is_decoder=False,
972
+ name="attention",
973
+ )
974
+ self.dropout = keras.layers.Dropout(config.hidden_dropout)
975
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
976
+ self.feed_forward = TFWav2Vec2FeedForward(config, name="feed_forward")
977
+ self.final_layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="final_layer_norm")
978
+ self.config = config
979
+
980
+ def call(
981
+ self,
982
+ hidden_states: tf.Tensor,
983
+ attention_mask: tf.Tensor | None = None,
984
+ output_attentions: Optional[bool] = False,
985
+ training: bool = False,
986
+ ) -> Tuple[tf.Tensor]:
987
+ attn_residual = hidden_states
988
+ hidden_states = self.layer_norm(hidden_states)
989
+ hidden_states, attn_weights, _ = self.attention(
990
+ hidden_states, attention_mask=attention_mask, training=training
991
+ )
992
+ hidden_states = self.dropout(hidden_states, training=training)
993
+ hidden_states = attn_residual + hidden_states
994
+ hidden_states = hidden_states + self.feed_forward(self.final_layer_norm(hidden_states))
995
+
996
+ outputs = (hidden_states,)
997
+
998
+ if output_attentions:
999
+ outputs += (attn_weights,)
1000
+
1001
+ return outputs
1002
+
1003
+ def build(self, input_shape=None):
1004
+ if self.built:
1005
+ return
1006
+ self.built = True
1007
+ if getattr(self, "attention", None) is not None:
1008
+ with tf.name_scope(self.attention.name):
1009
+ self.attention.build(None)
1010
+ if getattr(self, "layer_norm", None) is not None:
1011
+ with tf.name_scope(self.layer_norm.name):
1012
+ self.layer_norm.build([None, None, self.config.hidden_size])
1013
+ if getattr(self, "feed_forward", None) is not None:
1014
+ with tf.name_scope(self.feed_forward.name):
1015
+ self.feed_forward.build(None)
1016
+ if getattr(self, "final_layer_norm", None) is not None:
1017
+ with tf.name_scope(self.final_layer_norm.name):
1018
+ self.final_layer_norm.build([None, None, self.config.hidden_size])
1019
+
1020
+
1021
+ class TFWav2Vec2Encoder(keras.layers.Layer):
1022
+ def __init__(self, config: Wav2Vec2Config, **kwargs):
1023
+ super().__init__(**kwargs)
1024
+ self.config = config
1025
+ self.pos_conv_embed = TFWav2Vec2PositionalConvEmbedding(config, name="pos_conv_embed")
1026
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
1027
+ self.dropout = keras.layers.Dropout(config.hidden_dropout)
1028
+ self.layer = [TFWav2Vec2EncoderLayer(config, name=f"layers.{i}") for i in range(config.num_hidden_layers)]
1029
+
1030
+ def call(
1031
+ self,
1032
+ hidden_states: tf.Tensor,
1033
+ attention_mask: tf.Tensor | None = None,
1034
+ output_attentions: Optional[bool] = False,
1035
+ output_hidden_states: Optional[bool] = False,
1036
+ return_dict: Optional[bool] = True,
1037
+ training: Optional[bool] = False,
1038
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
1039
+ all_hidden_states = () if output_hidden_states else None
1040
+ all_self_attentions = () if output_attentions else None
1041
+
1042
+ if attention_mask is not None:
1043
+ hidden_states = hidden_states * tf.expand_dims(attention_mask, -1)
1044
+ attention_mask = _expand_mask(attention_mask)
1045
+ else:
1046
+ attention_mask = None
1047
+
1048
+ position_embeddings = self.pos_conv_embed(hidden_states)
1049
+ hidden_states = hidden_states + position_embeddings
1050
+ hidden_states = self.layer_norm(hidden_states)
1051
+ hidden_states = self.dropout(hidden_states, training=training)
1052
+
1053
+ for i, layer_module in enumerate(self.layer):
1054
+ if output_hidden_states:
1055
+ all_hidden_states = all_hidden_states + (hidden_states,)
1056
+
1057
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
1058
+ dropout_probability = np.random.uniform(0, 1)
1059
+ if training and (dropout_probability < self.config.layerdrop): # skip the layer
1060
+ continue
1061
+
1062
+ layer_outputs = layer_module(
1063
+ hidden_states=hidden_states,
1064
+ attention_mask=attention_mask,
1065
+ output_attentions=output_attentions,
1066
+ training=training,
1067
+ )
1068
+ hidden_states = layer_outputs[0]
1069
+
1070
+ if output_attentions:
1071
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
1072
+
1073
+ # Add last layer
1074
+ if output_hidden_states:
1075
+ all_hidden_states = all_hidden_states + (hidden_states,)
1076
+
1077
+ if not return_dict:
1078
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
1079
+ return TFBaseModelOutput(
1080
+ last_hidden_state=hidden_states,
1081
+ hidden_states=all_hidden_states,
1082
+ attentions=all_self_attentions,
1083
+ )
1084
+
1085
+ def build(self, input_shape=None):
1086
+ if self.built:
1087
+ return
1088
+ self.built = True
1089
+ if getattr(self, "pos_conv_embed", None) is not None:
1090
+ with tf.name_scope(self.pos_conv_embed.name):
1091
+ self.pos_conv_embed.build(None)
1092
+ if getattr(self, "layer_norm", None) is not None:
1093
+ with tf.name_scope(self.layer_norm.name):
1094
+ self.layer_norm.build([None, None, self.config.hidden_size])
1095
+ if getattr(self, "layer", None) is not None:
1096
+ for layer in self.layer:
1097
+ with tf.name_scope(layer.name):
1098
+ layer.build(None)
1099
+
1100
+
1101
+ class TFWav2Vec2EncoderStableLayerNorm(keras.layers.Layer):
1102
+ def __init__(self, config: Wav2Vec2Config, **kwargs):
1103
+ super().__init__(**kwargs)
1104
+ self.config = config
1105
+ self.pos_conv_embed = TFWav2Vec2PositionalConvEmbedding(config, name="pos_conv_embed")
1106
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
1107
+ self.dropout = keras.layers.Dropout(config.hidden_dropout)
1108
+ self.layer = [
1109
+ TFWav2Vec2EncoderLayerStableLayerNorm(config, name=f"layers.{i}") for i in range(config.num_hidden_layers)
1110
+ ]
1111
+
1112
+ def call(
1113
+ self,
1114
+ hidden_states: tf.Tensor,
1115
+ attention_mask: tf.Tensor | None = None,
1116
+ output_attentions: Optional[bool] = False,
1117
+ output_hidden_states: Optional[bool] = False,
1118
+ return_dict: Optional[bool] = True,
1119
+ training: Optional[bool] = False,
1120
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
1121
+ all_hidden_states = () if output_hidden_states else None
1122
+ all_self_attentions = () if output_attentions else None
1123
+
1124
+ if attention_mask is not None:
1125
+ hidden_states = hidden_states * tf.expand_dims(attention_mask, -1)
1126
+ attention_mask = _expand_mask(attention_mask)
1127
+ else:
1128
+ attention_mask = None
1129
+
1130
+ position_embeddings = self.pos_conv_embed(hidden_states)
1131
+ hidden_states = hidden_states + position_embeddings
1132
+ hidden_states = self.dropout(hidden_states, training=training)
1133
+
1134
+ for i, layer_module in enumerate(self.layer):
1135
+ if output_hidden_states:
1136
+ all_hidden_states = all_hidden_states + (hidden_states,)
1137
+
1138
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
1139
+ dropout_probability = np.random.uniform(0, 1)
1140
+ if training and (dropout_probability < self.config.layerdrop): # skip the layer
1141
+ continue
1142
+
1143
+ layer_outputs = layer_module(
1144
+ hidden_states=hidden_states,
1145
+ attention_mask=attention_mask,
1146
+ output_attentions=output_attentions,
1147
+ training=training,
1148
+ )
1149
+ hidden_states = layer_outputs[0]
1150
+
1151
+ if output_attentions:
1152
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
1153
+
1154
+ hidden_states = self.layer_norm(hidden_states)
1155
+
1156
+ if output_hidden_states:
1157
+ all_hidden_states = all_hidden_states + (hidden_states,)
1158
+
1159
+ if not return_dict:
1160
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
1161
+ return TFBaseModelOutput(
1162
+ last_hidden_state=hidden_states,
1163
+ hidden_states=all_hidden_states,
1164
+ attentions=all_self_attentions,
1165
+ )
1166
+
1167
+ def build(self, input_shape=None):
1168
+ if self.built:
1169
+ return
1170
+ self.built = True
1171
+ if getattr(self, "pos_conv_embed", None) is not None:
1172
+ with tf.name_scope(self.pos_conv_embed.name):
1173
+ self.pos_conv_embed.build(None)
1174
+ if getattr(self, "layer_norm", None) is not None:
1175
+ with tf.name_scope(self.layer_norm.name):
1176
+ self.layer_norm.build([None, None, self.config.hidden_size])
1177
+ if getattr(self, "layer", None) is not None:
1178
+ for layer in self.layer:
1179
+ with tf.name_scope(layer.name):
1180
+ layer.build(None)
1181
+
1182
+
1183
+ @keras_serializable
1184
+ class TFWav2Vec2MainLayer(keras.layers.Layer):
1185
+ config_class = Wav2Vec2Config
1186
+
1187
+ def __init__(self, config: Wav2Vec2Config, **kwargs):
1188
+ super().__init__(**kwargs)
1189
+ self.config = config
1190
+ self.feature_extractor = TFWav2Vec2FeatureEncoder(config, name="feature_extractor")
1191
+ self.feature_projection = TFWav2Vec2FeatureProjection(config, name="feature_projection")
1192
+
1193
+ if config.do_stable_layer_norm:
1194
+ self.encoder = TFWav2Vec2EncoderStableLayerNorm(config, name="encoder")
1195
+ else:
1196
+ self.encoder = TFWav2Vec2Encoder(config, name="encoder")
1197
+
1198
+ def build(self, input_shape=None):
1199
+ if self.built:
1200
+ return
1201
+ self.built = True
1202
+ if self.config.mask_time_prob > 0.0 or self.config.mask_feature_prob > 0.0:
1203
+ self.masked_spec_embed = self.add_weight(
1204
+ shape=(self.config.hidden_size,), initializer="uniform", trainable=True, name="masked_spec_embed"
1205
+ )
1206
+ if getattr(self, "feature_extractor", None) is not None:
1207
+ with tf.name_scope(self.feature_extractor.name):
1208
+ self.feature_extractor.build(None)
1209
+ if getattr(self, "feature_projection", None) is not None:
1210
+ with tf.name_scope(self.feature_projection.name):
1211
+ self.feature_projection.build(None)
1212
+ if getattr(self, "encoder", None) is not None:
1213
+ with tf.name_scope(self.encoder.name):
1214
+ self.encoder.build(None)
1215
+
1216
+ def _get_feat_extract_output_lengths(self, input_lengths: tf.Tensor):
1217
+ """
1218
+ Computes the output length of the convolutional layers
1219
+ """
1220
+
1221
+ def _conv_out_length(input_length, kernel_size, stride):
1222
+ # 1D convolutional layer output length formula taken
1223
+ # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
1224
+ return (input_length - kernel_size) // stride + 1
1225
+
1226
+ for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride):
1227
+ input_lengths = _conv_out_length(input_lengths, kernel_size, stride)
1228
+
1229
+ return input_lengths
1230
+
1231
+ def _mask_hidden_states(self, hidden_states: tf.Tensor, mask_time_indices: tf.Tensor | None = None):
1232
+ """
1233
+ Masks extracted features along time axis and/or along feature axis according to
1234
+ [SpecAugment](https://arxiv.org/abs/1904.08779).
1235
+ """
1236
+ batch_size, sequence_length, hidden_size = shape_list(hidden_states)
1237
+
1238
+ # `config.apply_spec_augment` can set masking to False
1239
+ if not getattr(self.config, "apply_spec_augment", True):
1240
+ return hidden_states
1241
+
1242
+ if mask_time_indices is not None:
1243
+ # apply SpecAugment along time axis with given mask_time_indices
1244
+ hidden_states = tf.where(
1245
+ tf.cast(mask_time_indices[:, :, tf.newaxis], tf.bool),
1246
+ self.masked_spec_embed[tf.newaxis, tf.newaxis, :],
1247
+ hidden_states,
1248
+ )
1249
+
1250
+ elif self.config.mask_time_prob > 0:
1251
+ # generate indices & apply SpecAugment along time axis
1252
+ mask_time_indices = _compute_mask_indices(
1253
+ (batch_size, sequence_length),
1254
+ mask_prob=self.config.mask_time_prob,
1255
+ mask_length=self.config.mask_time_length,
1256
+ min_masks=2,
1257
+ )
1258
+ hidden_states = tf.where(
1259
+ tf.cast(mask_time_indices[:, :, tf.newaxis], tf.bool),
1260
+ self.masked_spec_embed[tf.newaxis, tf.newaxis, :],
1261
+ hidden_states,
1262
+ )
1263
+
1264
+ # apply SpecAugment along feature axis
1265
+ if self.config.mask_feature_prob > 0:
1266
+ mask_feature_indices = _compute_mask_indices(
1267
+ (batch_size, hidden_size),
1268
+ mask_prob=self.config.mask_feature_prob,
1269
+ mask_length=self.config.mask_feature_length,
1270
+ )
1271
+ hidden_states = tf.where(mask_feature_indices[:, tf.newaxis, :], hidden_states, 0)
1272
+
1273
+ return hidden_states
1274
+
1275
+ @unpack_inputs
1276
+ def call(
1277
+ self,
1278
+ input_values: tf.Tensor,
1279
+ attention_mask: tf.Tensor | None = None,
1280
+ token_type_ids: tf.Tensor | None = None,
1281
+ position_ids: tf.Tensor | None = None,
1282
+ head_mask: tf.Tensor | None = None,
1283
+ inputs_embeds: tf.Tensor | None = None,
1284
+ output_attentions: Optional[bool] = None,
1285
+ output_hidden_states: Optional[bool] = None,
1286
+ return_dict: Optional[bool] = None,
1287
+ training: bool = False,
1288
+ **kwargs: Any,
1289
+ ):
1290
+ extract_features = self.feature_extractor(tf.cast(input_values, tf.float32), training=training)
1291
+ # extract_features = tf.transpose(extract_features, perm=(0, 2, 1))
1292
+
1293
+ if attention_mask is not None:
1294
+ # compute real output lengths according to convolution formula
1295
+ output_lengths = self._get_feat_extract_output_lengths(tf.reduce_sum(attention_mask, -1))
1296
+
1297
+ attention_mask = tf.sequence_mask(
1298
+ output_lengths, maxlen=shape_list(extract_features)[1], dtype=extract_features.dtype
1299
+ )
1300
+
1301
+ hidden_states, extract_features = self.feature_projection(extract_features, training=training)
1302
+
1303
+ mask_time_indices = kwargs.get("mask_time_indices", None)
1304
+ if training:
1305
+ hidden_states = self._mask_hidden_states(hidden_states, mask_time_indices=mask_time_indices)
1306
+
1307
+ encoder_outputs = self.encoder(
1308
+ hidden_states,
1309
+ attention_mask=attention_mask,
1310
+ output_attentions=output_attentions,
1311
+ output_hidden_states=output_hidden_states,
1312
+ return_dict=return_dict,
1313
+ training=training,
1314
+ )
1315
+ hidden_states = encoder_outputs[0]
1316
+
1317
+ if not return_dict:
1318
+ return (hidden_states, extract_features) + encoder_outputs[1:]
1319
+
1320
+ return TFWav2Vec2BaseModelOutput(
1321
+ last_hidden_state=hidden_states,
1322
+ extract_features=extract_features,
1323
+ hidden_states=encoder_outputs.hidden_states,
1324
+ attentions=encoder_outputs.attentions,
1325
+ )
1326
+
1327
+
1328
+ class TFWav2Vec2PreTrainedModel(TFPreTrainedModel):
1329
+ """
1330
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
1331
+ models.
1332
+ """
1333
+
1334
+ config_class = Wav2Vec2Config
1335
+ base_model_prefix = "wav2vec2"
1336
+ main_input_name = "input_values"
1337
+
1338
+ @property
1339
+ def input_signature(self):
1340
+ return {
1341
+ "input_values": tf.TensorSpec((None, None), tf.float32, name="input_values"),
1342
+ "attention_mask": tf.TensorSpec((None, None), tf.float32, name="attention_mask"),
1343
+ }
1344
+
1345
+ @property
1346
+ def dummy_inputs(self):
1347
+ return {
1348
+ "input_values": tf.random.uniform(shape=(1, 500), dtype=tf.float32),
1349
+ "attention_mask": tf.ones(shape=(1, 500), dtype=tf.float32),
1350
+ }
1351
+
1352
+ def __init__(self, config, *inputs, **kwargs):
1353
+ super().__init__(config, *inputs, **kwargs)
1354
+ logger.warning(
1355
+ f"\n{self.__class__.__name__} has backpropagation operations that are NOT supported on CPU. If you wish "
1356
+ "to train/fine-tune this model, you need a GPU or a TPU"
1357
+ )
1358
+
1359
+ def _get_feat_extract_output_lengths(self, input_lengths, add_adapter=None):
1360
+ """
1361
+ Computes the output length of the convolutional layers
1362
+ """
1363
+ add_adapter = self.config.add_adapter if add_adapter is None else add_adapter
1364
+
1365
+ def _conv_out_length(input_length, kernel_size, stride):
1366
+ return tf.math.floordiv(input_length - kernel_size, stride) + 1
1367
+
1368
+ for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride):
1369
+ input_lengths = _conv_out_length(input_lengths, kernel_size, stride)
1370
+
1371
+ if add_adapter:
1372
+ for _ in range(self.config.num_adapter_layers):
1373
+ input_lengths = _conv_out_length(input_lengths, 1, self.config.adapter_stride)
1374
+ return input_lengths
1375
+
1376
+ def _get_feature_vector_attention_mask(
1377
+ self, feature_vector_length: int, attention_mask: tf.Tensor, add_adapter=None
1378
+ ):
1379
+ non_padded_lengths = tf.math.cumsum(attention_mask, axis=-1)[:, -1]
1380
+ output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths, add_adapter=add_adapter)
1381
+ output_lengths = tf.cast(output_lengths, tf.int32)
1382
+ batch_size = tf.shape(attention_mask)[0]
1383
+ # check device here
1384
+ attention_mask = tf.zeros(
1385
+ (batch_size, feature_vector_length), dtype=attention_mask.dtype, name="attention_mask"
1386
+ ) # these two operations makes sure that all values before the output lengths idxs are attended to
1387
+ ## check device
1388
+ attention_mask = tf.tensor_scatter_nd_update(
1389
+ attention_mask,
1390
+ indices=tf.stack([tf.range(batch_size), output_lengths - 1], axis=1),
1391
+ updates=tf.ones([batch_size], dtype=attention_mask.dtype),
1392
+ )
1393
+ attention_mask = tf.reverse(attention_mask, axis=[-1])
1394
+ attention_mask = tf.cumsum(attention_mask, axis=-1)
1395
+ attention_mask = tf.reverse(attention_mask, axis=[-1])
1396
+ attention_mask = tf.cast(attention_mask, tf.bool)
1397
+ return attention_mask
1398
+
1399
+
1400
+ WAV_2_VEC_2_START_DOCSTRING = r"""
1401
+
1402
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
1403
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
1404
+ etc.)
1405
+
1406
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
1407
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
1408
+ behavior.
1409
+
1410
+ <Tip>
1411
+
1412
+ TensorFlow models and layers in `transformers` accept two formats as input:
1413
+
1414
+ - having all inputs as keyword arguments (like PyTorch models), or
1415
+ - having all inputs as a list, tuple or dict in the first positional argument.
1416
+
1417
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
1418
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
1419
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
1420
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
1421
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
1422
+ positional argument:
1423
+
1424
+ - a single Tensor with `input_values` only and nothing else: `model(input_values)`
1425
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
1426
+ `model([input_values, attention_mask])` or `model([input_values, attention_mask, token_type_ids])`
1427
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
1428
+ `model({"input_values": input_values, "token_type_ids": token_type_ids})`
1429
+
1430
+ Note that when creating models and layers with
1431
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
1432
+ about any of this, as you can just pass inputs like you would to any other Python function!
1433
+
1434
+ </Tip>
1435
+
1436
+ Args:
1437
+ config ([`Wav2Vec2Config`]): Model configuration class with all the parameters of the model.
1438
+ Initializing with a config file does not load the weights associated with the model, only the
1439
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
1440
+ """
1441
+
1442
+ WAV_2_VEC_2_INPUTS_DOCSTRING = r"""
1443
+ Args:
1444
+ input_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` `Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):
1445
+ Indices of input sequence tokens in the vocabulary.
1446
+
1447
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
1448
+ [`PreTrainedTokenizer.encode`] for details.
1449
+
1450
+ [What are input IDs?](../glossary#input-ids)
1451
+ attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
1452
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1453
+
1454
+ - 1 for tokens that are **not masked**,
1455
+ - 0 for tokens that are **masked**.
1456
+
1457
+ [What are attention masks?](../glossary#attention-mask)
1458
+ token_type_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
1459
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1460
+ 1]`:
1461
+
1462
+ - 0 corresponds to a *sentence A* token,
1463
+ - 1 corresponds to a *sentence B* token.
1464
+
1465
+ [What are token type IDs?](../glossary#token-type-ids)
1466
+ position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
1467
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
1468
+ config.max_position_embeddings - 1]`.
1469
+
1470
+ [What are position IDs?](../glossary#position-ids)
1471
+ head_mask (`np.ndarray` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
1472
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
1473
+
1474
+ - 1 indicates the head is **not masked**,
1475
+ - 0 indicates the head is **masked**.
1476
+
1477
+ inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
1478
+ Optionally, instead of passing `input_values` you can choose to directly pass an embedded representation.
1479
+ This is useful if you want more control over how to convert `input_values` indices into associated vectors
1480
+ than the model's internal embedding lookup matrix.
1481
+ output_attentions (`bool`, *optional*):
1482
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1483
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
1484
+ config will be used instead.
1485
+ output_hidden_states (`bool`, *optional*):
1486
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1487
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
1488
+ used instead.
1489
+ return_dict (`bool`, *optional*):
1490
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
1491
+ eager mode, in graph mode the value will always be set to True.
1492
+ training (`bool`, *optional*, defaults to `False``):
1493
+ Whether or not to use the model in training mode (some modules like dropout modules have different
1494
+ behaviors between training and evaluation).
1495
+ """
1496
+
1497
+
1498
+ @add_start_docstrings(
1499
+ "The bare TFWav2Vec2 Model transformer outputing raw hidden-states without any specific head on top.",
1500
+ WAV_2_VEC_2_START_DOCSTRING,
1501
+ )
1502
+ class TFWav2Vec2Model(TFWav2Vec2PreTrainedModel):
1503
+ def __init__(self, config: Wav2Vec2Config, *inputs, **kwargs):
1504
+ super().__init__(config, *inputs, **kwargs)
1505
+ self.config = config
1506
+ self.wav2vec2 = TFWav2Vec2MainLayer(config, name="wav2vec2")
1507
+
1508
+ @add_start_docstrings_to_model_forward(WAV_2_VEC_2_INPUTS_DOCSTRING)
1509
+ @replace_return_docstrings(output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC)
1510
+ @unpack_inputs
1511
+ def call(
1512
+ self,
1513
+ input_values: tf.Tensor,
1514
+ attention_mask: tf.Tensor | None = None,
1515
+ token_type_ids: tf.Tensor | None = None,
1516
+ position_ids: tf.Tensor | None = None,
1517
+ head_mask: tf.Tensor | None = None,
1518
+ inputs_embeds: tf.Tensor | None = None,
1519
+ output_attentions: Optional[bool] = None,
1520
+ output_hidden_states: Optional[bool] = None,
1521
+ return_dict: Optional[bool] = None,
1522
+ training: bool = False,
1523
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
1524
+ """
1525
+
1526
+ Returns:
1527
+
1528
+ Example:
1529
+
1530
+ ```python
1531
+ >>> from transformers import AutoProcessor, TFWav2Vec2Model
1532
+ >>> from datasets import load_dataset
1533
+ >>> import soundfile as sf
1534
+
1535
+ >>> processor = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h")
1536
+ >>> model = TFWav2Vec2Model.from_pretrained("facebook/wav2vec2-base-960h")
1537
+
1538
+
1539
+ >>> def map_to_array(batch):
1540
+ ... speech, _ = sf.read(batch["file"])
1541
+ ... batch["speech"] = speech
1542
+ ... return batch
1543
+
1544
+
1545
+ >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
1546
+ >>> ds = ds.map(map_to_array)
1547
+
1548
+ >>> input_values = processor(ds["speech"][0], return_tensors="tf").input_values # Batch size 1
1549
+ >>> hidden_states = model(input_values).last_hidden_state
1550
+ ```"""
1551
+
1552
+ output_hidden_states = output_hidden_states if output_hidden_states else self.config.output_hidden_states
1553
+ output_attentions = output_attentions if output_attentions else self.config.output_attentions
1554
+ return_dict = return_dict if return_dict else self.config.return_dict
1555
+
1556
+ outputs = self.wav2vec2(
1557
+ input_values=input_values,
1558
+ attention_mask=attention_mask,
1559
+ token_type_ids=token_type_ids,
1560
+ position_ids=position_ids,
1561
+ head_mask=head_mask,
1562
+ inputs_embeds=inputs_embeds,
1563
+ output_attentions=output_attentions,
1564
+ output_hidden_states=output_hidden_states,
1565
+ return_dict=return_dict,
1566
+ training=training,
1567
+ )
1568
+
1569
+ return outputs
1570
+
1571
+ def build(self, input_shape=None):
1572
+ if self.built:
1573
+ return
1574
+ self.built = True
1575
+ if getattr(self, "wav2vec2", None) is not None:
1576
+ with tf.name_scope(self.wav2vec2.name):
1577
+ self.wav2vec2.build(None)
1578
+
1579
+
1580
+ @add_start_docstrings(
1581
+ """TFWav2Vec2 Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).""",
1582
+ WAV_2_VEC_2_START_DOCSTRING,
1583
+ )
1584
+ class TFWav2Vec2ForCTC(TFWav2Vec2PreTrainedModel):
1585
+ def __init__(self, config: Wav2Vec2Config, *inputs, **kwargs):
1586
+ super().__init__(config, *inputs, **kwargs)
1587
+
1588
+ self.wav2vec2 = TFWav2Vec2MainLayer(config, name="wav2vec2")
1589
+ self.dropout = keras.layers.Dropout(config.final_dropout)
1590
+ self.lm_head = keras.layers.Dense(config.vocab_size, name="lm_head")
1591
+ self.output_hidden_size = (
1592
+ config.output_hidden_size if hasattr(config, "add_adapter") and config.add_adapter else config.hidden_size
1593
+ )
1594
+
1595
+ def freeze_feature_extractor(self):
1596
+ """
1597
+ Calling this function will disable the gradient computation for the feature encoder so that its parameters will
1598
+ not be updated during training.
1599
+ """
1600
+ warnings.warn(
1601
+ "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
1602
+ "Please use the equivalent `freeze_feature_encoder` method instead.",
1603
+ FutureWarning,
1604
+ )
1605
+ self.freeze_feature_encoder()
1606
+
1607
+ def freeze_feature_encoder(self):
1608
+ """
1609
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
1610
+ not be updated during training.
1611
+ """
1612
+ self.wav2vec2.feature_extractor.trainable = False
1613
+
1614
+ @unpack_inputs
1615
+ @add_start_docstrings_to_model_forward(WAV_2_VEC_2_INPUTS_DOCSTRING)
1616
+ @replace_return_docstrings(output_type=TFCausalLMOutput, config_class=_CONFIG_FOR_DOC)
1617
+ def call(
1618
+ self,
1619
+ input_values: tf.Tensor,
1620
+ attention_mask: tf.Tensor | None = None,
1621
+ token_type_ids: tf.Tensor | None = None,
1622
+ position_ids: tf.Tensor | None = None,
1623
+ head_mask: tf.Tensor | None = None,
1624
+ inputs_embeds: tf.Tensor | None = None,
1625
+ output_attentions: Optional[bool] = None,
1626
+ labels: tf.Tensor | None = None,
1627
+ output_hidden_states: Optional[bool] = None,
1628
+ return_dict: Optional[bool] = None,
1629
+ training: Optional[bool] = False,
1630
+ ) -> Union[TFCausalLMOutput, Tuple[tf.Tensor]]:
1631
+ r"""
1632
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
1633
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1634
+ config.vocab_size]` (see `input_values` docstring) Tokens with indices set to `-100` are ignored (masked),
1635
+ the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1636
+
1637
+ Returns:
1638
+
1639
+ Example:
1640
+
1641
+ ```python
1642
+ >>> import tensorflow as tf
1643
+ >>> from transformers import AutoProcessor, TFWav2Vec2ForCTC
1644
+ >>> from datasets import load_dataset
1645
+ >>> import soundfile as sf
1646
+
1647
+ >>> processor = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h")
1648
+ >>> model = TFWav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h")
1649
+
1650
+
1651
+ >>> def map_to_array(batch):
1652
+ ... speech, _ = sf.read(batch["file"])
1653
+ ... batch["speech"] = speech
1654
+ ... return batch
1655
+
1656
+
1657
+ >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
1658
+ >>> ds = ds.map(map_to_array)
1659
+
1660
+ >>> input_values = processor(ds["speech"][0], return_tensors="tf").input_values # Batch size 1
1661
+ >>> logits = model(input_values).logits
1662
+ >>> predicted_ids = tf.argmax(logits, axis=-1)
1663
+
1664
+ >>> transcription = processor.decode(predicted_ids[0])
1665
+
1666
+ >>> # compute loss
1667
+ >>> target_transcription = "A MAN SAID TO THE UNIVERSE SIR I EXIST"
1668
+
1669
+ >>> # Pass transcription as `text` to encode labels
1670
+ >>> labels = processor(text=transcription, return_tensors="tf").input_ids
1671
+
1672
+ >>> loss = model(input_values, labels=labels).loss
1673
+ ```"""
1674
+ if labels is not None and tf.reduce_max(labels) >= self.config.vocab_size:
1675
+ raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}")
1676
+
1677
+ outputs = self.wav2vec2(
1678
+ input_values=input_values,
1679
+ attention_mask=attention_mask,
1680
+ token_type_ids=token_type_ids,
1681
+ position_ids=position_ids,
1682
+ head_mask=head_mask,
1683
+ inputs_embeds=inputs_embeds,
1684
+ output_attentions=output_attentions,
1685
+ output_hidden_states=output_hidden_states,
1686
+ return_dict=return_dict,
1687
+ training=training,
1688
+ )
1689
+ hidden_states = outputs[0]
1690
+ hidden_states = self.dropout(hidden_states, training=training)
1691
+
1692
+ logits = self.lm_head(hidden_states)
1693
+
1694
+ if labels is not None:
1695
+ attention_mask = (
1696
+ attention_mask if attention_mask is not None else tf.ones_like(input_values, dtype=tf.float32)
1697
+ )
1698
+ input_lengths = self.wav2vec2._get_feat_extract_output_lengths(tf.reduce_sum(attention_mask, axis=-1))
1699
+
1700
+ # assuming that padded tokens are filled with -100
1701
+ # when not being attended to
1702
+ labels_mask = tf.cast(labels >= 0, tf.int32)
1703
+ target_lengths = tf.reduce_sum(labels_mask, axis=-1)
1704
+
1705
+ loss = tf.nn.ctc_loss(
1706
+ logits=logits,
1707
+ labels=labels,
1708
+ logit_length=input_lengths,
1709
+ label_length=target_lengths,
1710
+ blank_index=self.config.pad_token_id,
1711
+ logits_time_major=False,
1712
+ )
1713
+
1714
+ if self.config.ctc_loss_reduction == "sum":
1715
+ loss = tf.reduce_sum(loss)
1716
+ if self.config.ctc_loss_reduction == "mean":
1717
+ loss = tf.reduce_mean(loss)
1718
+
1719
+ loss = tf.reshape(loss, (1,))
1720
+ else:
1721
+ loss = None
1722
+
1723
+ if not return_dict:
1724
+ output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
1725
+ return ((loss,) + output) if loss is not None else output
1726
+
1727
+ return TFCausalLMOutput(
1728
+ loss=loss,
1729
+ logits=logits,
1730
+ hidden_states=outputs.hidden_states,
1731
+ attentions=outputs.attentions,
1732
+ )
1733
+
1734
+ def build(self, input_shape=None):
1735
+ if self.built:
1736
+ return
1737
+ self.built = True
1738
+ if getattr(self, "wav2vec2", None) is not None:
1739
+ with tf.name_scope(self.wav2vec2.name):
1740
+ self.wav2vec2.build(None)
1741
+ if getattr(self, "lm_head", None) is not None:
1742
+ with tf.name_scope(self.lm_head.name):
1743
+ self.lm_head.build([None, None, self.output_hidden_size])
1744
+
1745
+
1746
+ class TFWav2Vec2ForSequenceClassification(TFWav2Vec2PreTrainedModel):
1747
+ def __init__(self, config):
1748
+ super().__init__(config)
1749
+ self.wav2vec2 = TFWav2Vec2MainLayer(config, name="wav2vec2")
1750
+ self.num_layers = config.num_hidden_layers + 1
1751
+ with tf.name_scope(self._name_scope()):
1752
+ if config.use_weighted_layer_sum:
1753
+ self.layer_weights = self.add_weight(
1754
+ shape=(self.num_layers,), initializer="ones", trainable=True, name="layer_weights"
1755
+ )
1756
+ self.config = config
1757
+ self.projector = keras.layers.Dense(units=config.classifier_proj_size, name="projector")
1758
+ self.classifier = keras.layers.Dense(units=config.num_labels, activation=None, name="classifier")
1759
+
1760
+ def freeze_feature_extractor(self):
1761
+ """
1762
+ Calling this function will disable the gradient computation for the feature encoder so that its parameters will
1763
+ not be updated during training.
1764
+ """
1765
+ warnings.warn(
1766
+ "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
1767
+ "Please use the equivalent `freeze_feature_encoder` method instead.",
1768
+ FutureWarning,
1769
+ )
1770
+ self.freeze_feature_encoder()
1771
+
1772
+ def freeze_feature_encoder(self):
1773
+ """
1774
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
1775
+ not be updated during training.
1776
+ """
1777
+ self.wav2vec2.feature_extractor.trainable = False
1778
+
1779
+ def freeze_base_model(self):
1780
+ """
1781
+ Calling this function will disable the gradient computation for the base model so that its parameters will not
1782
+ be updated during training. Only the classification head will be updated.
1783
+ """
1784
+ for layer in self.wav2vec2.layers:
1785
+ layer.trainable = False
1786
+
1787
+ @unpack_inputs
1788
+ def call(
1789
+ self,
1790
+ input_values: tf.Tensor,
1791
+ attention_mask: tf.Tensor | None = None,
1792
+ output_attentions: bool | None = None,
1793
+ output_hidden_states: bool | None = None,
1794
+ return_dict: bool | None = None,
1795
+ labels: tf.Tensor | None = None,
1796
+ training: bool = False,
1797
+ ) -> TFSequenceClassifierOutput | Tuple[tf.Tensor]:
1798
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1799
+ output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
1800
+
1801
+ outputs = self.wav2vec2(
1802
+ input_values,
1803
+ attention_mask=attention_mask,
1804
+ output_attentions=output_attentions,
1805
+ output_hidden_states=output_hidden_states,
1806
+ return_dict=return_dict,
1807
+ training=training,
1808
+ )
1809
+ if self.config.use_weighted_layer_sum:
1810
+ hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
1811
+ hidden_states = tf.stack(hidden_states, axis=1)
1812
+ norm_weights = tf.nn.softmax(self.layer_weights, axis=-1)
1813
+ hidden_states = tf.reduce_sum(hidden_states * tf.reshape(norm_weights, [-1, 1, 1]), axis=1)
1814
+ else:
1815
+ hidden_states = outputs[0]
1816
+
1817
+ hidden_states = self.projector(hidden_states)
1818
+ if attention_mask is None:
1819
+ pooled_output = tf.reduce_mean(hidden_states, axis=1)
1820
+ else:
1821
+ padding_mask = self._get_feature_vector_attention_mask(shape_list(hidden_states)[1], attention_mask)
1822
+ padding_mask_float = tf.cast(padding_mask, hidden_states.dtype)
1823
+ hidden_states = tf.multiply(hidden_states, tf.expand_dims(padding_mask_float, axis=-1))
1824
+ pooled_output = tf.divide(
1825
+ tf.reduce_sum(hidden_states, axis=1), tf.expand_dims(tf.reduce_sum(padding_mask_float, axis=1), axis=1)
1826
+ )
1827
+ logits = self.classifier(pooled_output)
1828
+ loss = None
1829
+ if labels is not None:
1830
+ loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
1831
+ loss = loss_fn(tf.reshape(labels, [-1]), tf.reshape(logits, [-1, self.config.num_labels]))
1832
+ if not return_dict:
1833
+ output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
1834
+ return ((loss,) + output) if loss is not None else output
1835
+
1836
+ return TFSequenceClassifierOutput(
1837
+ loss=loss,
1838
+ logits=logits,
1839
+ hidden_states=outputs.hidden_states,
1840
+ attentions=outputs.attentions,
1841
+ )
1842
+
1843
+ def build(self, input_shape=None):
1844
+ if self.built:
1845
+ return
1846
+ self.built = True
1847
+ if getattr(self, "wav2vec2", None) is not None:
1848
+ with tf.name_scope(self.wav2vec2.name):
1849
+ self.wav2vec2.build(None)
1850
+ if getattr(self, "projector", None) is not None:
1851
+ with tf.name_scope(self.projector.name):
1852
+ self.projector.build([None, None, self.config.hidden_size])
1853
+ if getattr(self, "classifier", None) is not None:
1854
+ with tf.name_scope(self.classifier.name):
1855
+ self.classifier.build([None, None, self.config.classifier_proj_size])
1856
+
1857
+
1858
+ __all__ = ["TFWav2Vec2ForCTC", "TFWav2Vec2Model", "TFWav2Vec2PreTrainedModel", "TFWav2Vec2ForSequenceClassification"]
.venv/lib/python3.11/site-packages/transformers/models/wav2vec2/modeling_wav2vec2.py ADDED
The diff for this file is too large to render. See raw diff
 
.venv/lib/python3.11/site-packages/transformers/models/wav2vec2/processing_wav2vec2.py ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Speech processor class for Wav2Vec2
17
+ """
18
+
19
+ import warnings
20
+ from contextlib import contextmanager
21
+ from typing import List, Optional, Union
22
+
23
+ from ...processing_utils import ProcessingKwargs, ProcessorMixin, Unpack
24
+ from ...tokenization_utils_base import AudioInput, PreTokenizedInput, TextInput
25
+ from .feature_extraction_wav2vec2 import Wav2Vec2FeatureExtractor
26
+ from .tokenization_wav2vec2 import Wav2Vec2CTCTokenizer
27
+
28
+
29
+ class Wav2Vec2ProcessorKwargs(ProcessingKwargs, total=False):
30
+ _defaults = {}
31
+
32
+
33
+ class Wav2Vec2Processor(ProcessorMixin):
34
+ r"""
35
+ Constructs a Wav2Vec2 processor which wraps a Wav2Vec2 feature extractor and a Wav2Vec2 CTC tokenizer into a single
36
+ processor.
37
+
38
+ [`Wav2Vec2Processor`] offers all the functionalities of [`Wav2Vec2FeatureExtractor`] and [`PreTrainedTokenizer`].
39
+ See the docstring of [`~Wav2Vec2Processor.__call__`] and [`~Wav2Vec2Processor.decode`] for more information.
40
+
41
+ Args:
42
+ feature_extractor (`Wav2Vec2FeatureExtractor`):
43
+ An instance of [`Wav2Vec2FeatureExtractor`]. The feature extractor is a required input.
44
+ tokenizer ([`PreTrainedTokenizer`]):
45
+ An instance of [`PreTrainedTokenizer`]. The tokenizer is a required input.
46
+ """
47
+
48
+ feature_extractor_class = "Wav2Vec2FeatureExtractor"
49
+ tokenizer_class = "AutoTokenizer"
50
+
51
+ def __init__(self, feature_extractor, tokenizer):
52
+ super().__init__(feature_extractor, tokenizer)
53
+ self.current_processor = self.feature_extractor
54
+ self._in_target_context_manager = False
55
+
56
+ @classmethod
57
+ def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
58
+ try:
59
+ return super().from_pretrained(pretrained_model_name_or_path, **kwargs)
60
+ except (OSError, ValueError):
61
+ warnings.warn(
62
+ f"Loading a tokenizer inside {cls.__name__} from a config that does not"
63
+ " include a `tokenizer_class` attribute is deprecated and will be "
64
+ "removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"
65
+ " attribute to either your `config.json` or `tokenizer_config.json` "
66
+ "file to suppress this warning: ",
67
+ FutureWarning,
68
+ )
69
+
70
+ feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(pretrained_model_name_or_path, **kwargs)
71
+ tokenizer = Wav2Vec2CTCTokenizer.from_pretrained(pretrained_model_name_or_path, **kwargs)
72
+
73
+ return cls(feature_extractor=feature_extractor, tokenizer=tokenizer)
74
+
75
+ def __call__(
76
+ self,
77
+ audio: AudioInput = None,
78
+ text: Optional[Union[str, List[str], TextInput, PreTokenizedInput]] = None,
79
+ images=None,
80
+ videos=None,
81
+ **kwargs: Unpack[Wav2Vec2ProcessorKwargs],
82
+ ):
83
+ """
84
+ When used in normal mode, this method forwards all its arguments to Wav2Vec2FeatureExtractor's
85
+ [`~Wav2Vec2FeatureExtractor.__call__`] and returns its output. If used in the context
86
+ [`~Wav2Vec2Processor.as_target_processor`] this method forwards all its arguments to PreTrainedTokenizer's
87
+ [`~PreTrainedTokenizer.__call__`]. Please refer to the docstring of the above two methods for more information.
88
+ """
89
+
90
+ if "raw_speech" in kwargs:
91
+ warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.")
92
+ audio = kwargs.pop("raw_speech")
93
+
94
+ if audio is None and text is None:
95
+ raise ValueError("You need to specify either an `audio` or `text` input to process.")
96
+
97
+ output_kwargs = self._merge_kwargs(
98
+ Wav2Vec2ProcessorKwargs,
99
+ tokenizer_init_kwargs=self.tokenizer.init_kwargs,
100
+ **kwargs,
101
+ )
102
+ # For backward compatibility
103
+ if self._in_target_context_manager:
104
+ return self.current_processor(
105
+ audio,
106
+ **output_kwargs["audio_kwargs"],
107
+ **output_kwargs["text_kwargs"],
108
+ **output_kwargs["common_kwargs"],
109
+ )
110
+
111
+ if audio is not None:
112
+ inputs = self.feature_extractor(audio, **output_kwargs["audio_kwargs"])
113
+ if text is not None:
114
+ encodings = self.tokenizer(text, **output_kwargs["text_kwargs"])
115
+
116
+ if text is None:
117
+ return inputs
118
+ elif audio is None:
119
+ return encodings
120
+ else:
121
+ inputs["labels"] = encodings["input_ids"]
122
+ return inputs
123
+
124
+ def pad(self, *args, **kwargs):
125
+ """
126
+ When used in normal mode, this method forwards all its arguments to Wav2Vec2FeatureExtractor's
127
+ [`~Wav2Vec2FeatureExtractor.pad`] and returns its output. If used in the context
128
+ [`~Wav2Vec2Processor.as_target_processor`] this method forwards all its arguments to PreTrainedTokenizer's
129
+ [`~PreTrainedTokenizer.pad`]. Please refer to the docstring of the above two methods for more information.
130
+ """
131
+ # For backward compatibility
132
+ if self._in_target_context_manager:
133
+ return self.current_processor.pad(*args, **kwargs)
134
+
135
+ input_features = kwargs.pop("input_features", None)
136
+ labels = kwargs.pop("labels", None)
137
+ if len(args) > 0:
138
+ input_features = args[0]
139
+ args = args[1:]
140
+
141
+ if input_features is not None:
142
+ input_features = self.feature_extractor.pad(input_features, *args, **kwargs)
143
+ if labels is not None:
144
+ labels = self.tokenizer.pad(labels, **kwargs)
145
+
146
+ if labels is None:
147
+ return input_features
148
+ elif input_features is None:
149
+ return labels
150
+ else:
151
+ input_features["labels"] = labels["input_ids"]
152
+ return input_features
153
+
154
+ def batch_decode(self, *args, **kwargs):
155
+ """
156
+ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please
157
+ refer to the docstring of this method for more information.
158
+ """
159
+ return self.tokenizer.batch_decode(*args, **kwargs)
160
+
161
+ def decode(self, *args, **kwargs):
162
+ """
163
+ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer
164
+ to the docstring of this method for more information.
165
+ """
166
+ return self.tokenizer.decode(*args, **kwargs)
167
+
168
+ @contextmanager
169
+ def as_target_processor(self):
170
+ """
171
+ Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning
172
+ Wav2Vec2.
173
+ """
174
+ warnings.warn(
175
+ "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
176
+ "labels by using the argument `text` of the regular `__call__` method (either in the same call as "
177
+ "your audio inputs, or in a separate call."
178
+ )
179
+ self._in_target_context_manager = True
180
+ self.current_processor = self.tokenizer
181
+ yield
182
+ self.current_processor = self.feature_extractor
183
+ self._in_target_context_manager = False
184
+
185
+
186
+ __all__ = ["Wav2Vec2Processor"]