Student0809 commited on
Commit
da4d9dc
·
verified ·
1 Parent(s): b9950ae

Add files using upload-large-folder tool

Browse files
Files changed (20) hide show
  1. docs/transformers/build/lib/transformers/models/regnet/modeling_tf_regnet.py +611 -0
  2. docs/transformers/build/lib/transformers/models/rembert/__init__.py +30 -0
  3. docs/transformers/build/lib/transformers/models/rembert/modeling_tf_rembert.py +1721 -0
  4. docs/transformers/build/lib/transformers/models/rembert/tokenization_rembert.py +267 -0
  5. docs/transformers/build/lib/transformers/models/rembert/tokenization_rembert_fast.py +232 -0
  6. docs/transformers/build/lib/transformers/models/resnet/__init__.py +29 -0
  7. docs/transformers/build/lib/transformers/models/resnet/configuration_resnet.py +136 -0
  8. docs/transformers/build/lib/transformers/models/resnet/convert_resnet_to_pytorch.py +199 -0
  9. docs/transformers/build/lib/transformers/models/resnet/modeling_flax_resnet.py +704 -0
  10. docs/transformers/build/lib/transformers/models/resnet/modeling_resnet.py +520 -0
  11. docs/transformers/build/lib/transformers/models/resnet/modeling_tf_resnet.py +596 -0
  12. docs/transformers/build/lib/transformers/models/roberta/__init__.py +31 -0
  13. docs/transformers/build/lib/transformers/models/roberta/configuration_roberta.py +155 -0
  14. docs/transformers/build/lib/transformers/models/roberta/convert_roberta_original_pytorch_checkpoint_to_pytorch.py +177 -0
  15. docs/transformers/build/lib/transformers/models/roberta/modeling_flax_roberta.py +1500 -0
  16. docs/transformers/build/lib/transformers/models/roberta/modeling_roberta.py +1698 -0
  17. docs/transformers/build/lib/transformers/models/roberta/modeling_tf_roberta.py +1783 -0
  18. docs/transformers/build/lib/transformers/models/roberta/tokenization_roberta_fast.py +264 -0
  19. docs/transformers/build/lib/transformers/models/roberta_prelayernorm/configuration_roberta_prelayernorm.py +157 -0
  20. docs/transformers/build/lib/transformers/models/roberta_prelayernorm/modeling_tf_roberta_prelayernorm.py +1808 -0
docs/transformers/build/lib/transformers/models/regnet/modeling_tf_regnet.py ADDED
@@ -0,0 +1,611 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """TensorFlow RegNet model."""
16
+
17
+ from typing import Optional, Tuple, Union
18
+
19
+ import tensorflow as tf
20
+
21
+ from ...activations_tf import ACT2FN
22
+ from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
23
+ from ...modeling_tf_outputs import (
24
+ TFBaseModelOutputWithNoAttention,
25
+ TFBaseModelOutputWithPoolingAndNoAttention,
26
+ TFSequenceClassifierOutput,
27
+ )
28
+ from ...modeling_tf_utils import (
29
+ TFPreTrainedModel,
30
+ TFSequenceClassificationLoss,
31
+ keras,
32
+ keras_serializable,
33
+ unpack_inputs,
34
+ )
35
+ from ...tf_utils import shape_list
36
+ from ...utils import logging
37
+ from .configuration_regnet import RegNetConfig
38
+
39
+
40
+ logger = logging.get_logger(__name__)
41
+
42
+ # General docstring
43
+ _CONFIG_FOR_DOC = "RegNetConfig"
44
+
45
+ # Base docstring
46
+ _CHECKPOINT_FOR_DOC = "facebook/regnet-y-040"
47
+ _EXPECTED_OUTPUT_SHAPE = [1, 1088, 7, 7]
48
+
49
+ # Image classification docstring
50
+ _IMAGE_CLASS_CHECKPOINT = "facebook/regnet-y-040"
51
+ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
52
+
53
+
54
+ class TFRegNetConvLayer(keras.layers.Layer):
55
+ def __init__(
56
+ self,
57
+ in_channels: int,
58
+ out_channels: int,
59
+ kernel_size: int = 3,
60
+ stride: int = 1,
61
+ groups: int = 1,
62
+ activation: Optional[str] = "relu",
63
+ **kwargs,
64
+ ):
65
+ super().__init__(**kwargs)
66
+ # The padding and conv has been verified in
67
+ # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
68
+ self.padding = keras.layers.ZeroPadding2D(padding=kernel_size // 2)
69
+ self.convolution = keras.layers.Conv2D(
70
+ filters=out_channels,
71
+ kernel_size=kernel_size,
72
+ strides=stride,
73
+ padding="VALID",
74
+ groups=groups,
75
+ use_bias=False,
76
+ name="convolution",
77
+ )
78
+ self.normalization = keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.9, name="normalization")
79
+ self.activation = ACT2FN[activation] if activation is not None else tf.identity
80
+ self.in_channels = in_channels
81
+ self.out_channels = out_channels
82
+
83
+ def call(self, hidden_state):
84
+ hidden_state = self.convolution(self.padding(hidden_state))
85
+ hidden_state = self.normalization(hidden_state)
86
+ hidden_state = self.activation(hidden_state)
87
+ return hidden_state
88
+
89
+ def build(self, input_shape=None):
90
+ if self.built:
91
+ return
92
+ self.built = True
93
+ if getattr(self, "convolution", None) is not None:
94
+ with tf.name_scope(self.convolution.name):
95
+ self.convolution.build([None, None, None, self.in_channels])
96
+ if getattr(self, "normalization", None) is not None:
97
+ with tf.name_scope(self.normalization.name):
98
+ self.normalization.build([None, None, None, self.out_channels])
99
+
100
+
101
+ class TFRegNetEmbeddings(keras.layers.Layer):
102
+ """
103
+ RegNet Embeddings (stem) composed of a single aggressive convolution.
104
+ """
105
+
106
+ def __init__(self, config: RegNetConfig, **kwargs):
107
+ super().__init__(**kwargs)
108
+ self.num_channels = config.num_channels
109
+ self.embedder = TFRegNetConvLayer(
110
+ in_channels=config.num_channels,
111
+ out_channels=config.embedding_size,
112
+ kernel_size=3,
113
+ stride=2,
114
+ activation=config.hidden_act,
115
+ name="embedder",
116
+ )
117
+
118
+ def call(self, pixel_values):
119
+ num_channels = shape_list(pixel_values)[1]
120
+ if tf.executing_eagerly() and num_channels != self.num_channels:
121
+ raise ValueError(
122
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
123
+ )
124
+
125
+ # When running on CPU, `keras.layers.Conv2D` doesn't support `NCHW` format.
126
+ # So change the input format from `NCHW` to `NHWC`.
127
+ # shape = (batch_size, in_height, in_width, in_channels=num_channels)
128
+ pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1))
129
+ hidden_state = self.embedder(pixel_values)
130
+ return hidden_state
131
+
132
+ def build(self, input_shape=None):
133
+ if self.built:
134
+ return
135
+ self.built = True
136
+ if getattr(self, "embedder", None) is not None:
137
+ with tf.name_scope(self.embedder.name):
138
+ self.embedder.build(None)
139
+
140
+
141
+ class TFRegNetShortCut(keras.layers.Layer):
142
+ """
143
+ RegNet shortcut, used to project the residual features to the correct size. If needed, it is also used to
144
+ downsample the input using `stride=2`.
145
+ """
146
+
147
+ def __init__(self, in_channels: int, out_channels: int, stride: int = 2, **kwargs):
148
+ super().__init__(**kwargs)
149
+ self.convolution = keras.layers.Conv2D(
150
+ filters=out_channels, kernel_size=1, strides=stride, use_bias=False, name="convolution"
151
+ )
152
+ self.normalization = keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.9, name="normalization")
153
+ self.in_channels = in_channels
154
+ self.out_channels = out_channels
155
+
156
+ def call(self, inputs: tf.Tensor, training: bool = False) -> tf.Tensor:
157
+ return self.normalization(self.convolution(inputs), training=training)
158
+
159
+ def build(self, input_shape=None):
160
+ if self.built:
161
+ return
162
+ self.built = True
163
+ if getattr(self, "convolution", None) is not None:
164
+ with tf.name_scope(self.convolution.name):
165
+ self.convolution.build([None, None, None, self.in_channels])
166
+ if getattr(self, "normalization", None) is not None:
167
+ with tf.name_scope(self.normalization.name):
168
+ self.normalization.build([None, None, None, self.out_channels])
169
+
170
+
171
+ class TFRegNetSELayer(keras.layers.Layer):
172
+ """
173
+ Squeeze and Excitation layer (SE) proposed in [Squeeze-and-Excitation Networks](https://arxiv.org/abs/1709.01507).
174
+ """
175
+
176
+ def __init__(self, in_channels: int, reduced_channels: int, **kwargs):
177
+ super().__init__(**kwargs)
178
+ self.pooler = keras.layers.GlobalAveragePooling2D(keepdims=True, name="pooler")
179
+ self.attention = [
180
+ keras.layers.Conv2D(filters=reduced_channels, kernel_size=1, activation="relu", name="attention.0"),
181
+ keras.layers.Conv2D(filters=in_channels, kernel_size=1, activation="sigmoid", name="attention.2"),
182
+ ]
183
+ self.in_channels = in_channels
184
+ self.reduced_channels = reduced_channels
185
+
186
+ def call(self, hidden_state):
187
+ # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
188
+ pooled = self.pooler(hidden_state)
189
+ for layer_module in self.attention:
190
+ pooled = layer_module(pooled)
191
+ hidden_state = hidden_state * pooled
192
+ return hidden_state
193
+
194
+ def build(self, input_shape=None):
195
+ if self.built:
196
+ return
197
+ self.built = True
198
+ if getattr(self, "pooler", None) is not None:
199
+ with tf.name_scope(self.pooler.name):
200
+ self.pooler.build((None, None, None, None))
201
+ if getattr(self, "attention", None) is not None:
202
+ with tf.name_scope(self.attention[0].name):
203
+ self.attention[0].build([None, None, None, self.in_channels])
204
+ with tf.name_scope(self.attention[1].name):
205
+ self.attention[1].build([None, None, None, self.reduced_channels])
206
+
207
+
208
+ class TFRegNetXLayer(keras.layers.Layer):
209
+ """
210
+ RegNet's layer composed by three `3x3` convolutions, same as a ResNet bottleneck layer with reduction = 1.
211
+ """
212
+
213
+ def __init__(self, config: RegNetConfig, in_channels: int, out_channels: int, stride: int = 1, **kwargs):
214
+ super().__init__(**kwargs)
215
+ should_apply_shortcut = in_channels != out_channels or stride != 1
216
+ groups = max(1, out_channels // config.groups_width)
217
+ self.shortcut = (
218
+ TFRegNetShortCut(in_channels, out_channels, stride=stride, name="shortcut")
219
+ if should_apply_shortcut
220
+ else keras.layers.Activation("linear", name="shortcut")
221
+ )
222
+ # `self.layers` instead of `self.layer` because that is a reserved argument.
223
+ self.layers = [
224
+ TFRegNetConvLayer(in_channels, out_channels, kernel_size=1, activation=config.hidden_act, name="layer.0"),
225
+ TFRegNetConvLayer(
226
+ out_channels, out_channels, stride=stride, groups=groups, activation=config.hidden_act, name="layer.1"
227
+ ),
228
+ TFRegNetConvLayer(out_channels, out_channels, kernel_size=1, activation=None, name="layer.2"),
229
+ ]
230
+ self.activation = ACT2FN[config.hidden_act]
231
+
232
+ def call(self, hidden_state):
233
+ residual = hidden_state
234
+ for layer_module in self.layers:
235
+ hidden_state = layer_module(hidden_state)
236
+ residual = self.shortcut(residual)
237
+ hidden_state += residual
238
+ hidden_state = self.activation(hidden_state)
239
+ return hidden_state
240
+
241
+ def build(self, input_shape=None):
242
+ if self.built:
243
+ return
244
+ self.built = True
245
+ if getattr(self, "shortcut", None) is not None:
246
+ with tf.name_scope(self.shortcut.name):
247
+ self.shortcut.build(None)
248
+ if getattr(self, "layers", None) is not None:
249
+ for layer in self.layers:
250
+ with tf.name_scope(layer.name):
251
+ layer.build(None)
252
+
253
+
254
+ class TFRegNetYLayer(keras.layers.Layer):
255
+ """
256
+ RegNet's Y layer: an X layer with Squeeze and Excitation.
257
+ """
258
+
259
+ def __init__(self, config: RegNetConfig, in_channels: int, out_channels: int, stride: int = 1, **kwargs):
260
+ super().__init__(**kwargs)
261
+ should_apply_shortcut = in_channels != out_channels or stride != 1
262
+ groups = max(1, out_channels // config.groups_width)
263
+ self.shortcut = (
264
+ TFRegNetShortCut(in_channels, out_channels, stride=stride, name="shortcut")
265
+ if should_apply_shortcut
266
+ else keras.layers.Activation("linear", name="shortcut")
267
+ )
268
+ self.layers = [
269
+ TFRegNetConvLayer(in_channels, out_channels, kernel_size=1, activation=config.hidden_act, name="layer.0"),
270
+ TFRegNetConvLayer(
271
+ out_channels, out_channels, stride=stride, groups=groups, activation=config.hidden_act, name="layer.1"
272
+ ),
273
+ TFRegNetSELayer(out_channels, reduced_channels=int(round(in_channels / 4)), name="layer.2"),
274
+ TFRegNetConvLayer(out_channels, out_channels, kernel_size=1, activation=None, name="layer.3"),
275
+ ]
276
+ self.activation = ACT2FN[config.hidden_act]
277
+
278
+ def call(self, hidden_state):
279
+ residual = hidden_state
280
+ for layer_module in self.layers:
281
+ hidden_state = layer_module(hidden_state)
282
+ residual = self.shortcut(residual)
283
+ hidden_state += residual
284
+ hidden_state = self.activation(hidden_state)
285
+ return hidden_state
286
+
287
+ def build(self, input_shape=None):
288
+ if self.built:
289
+ return
290
+ self.built = True
291
+ if getattr(self, "shortcut", None) is not None:
292
+ with tf.name_scope(self.shortcut.name):
293
+ self.shortcut.build(None)
294
+ if getattr(self, "layers", None) is not None:
295
+ for layer in self.layers:
296
+ with tf.name_scope(layer.name):
297
+ layer.build(None)
298
+
299
+
300
+ class TFRegNetStage(keras.layers.Layer):
301
+ """
302
+ A RegNet stage composed by stacked layers.
303
+ """
304
+
305
+ def __init__(
306
+ self, config: RegNetConfig, in_channels: int, out_channels: int, stride: int = 2, depth: int = 2, **kwargs
307
+ ):
308
+ super().__init__(**kwargs)
309
+
310
+ layer = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
311
+ self.layers = [
312
+ # downsampling is done in the first layer with stride of 2
313
+ layer(config, in_channels, out_channels, stride=stride, name="layers.0"),
314
+ *[layer(config, out_channels, out_channels, name=f"layers.{i + 1}") for i in range(depth - 1)],
315
+ ]
316
+
317
+ def call(self, hidden_state):
318
+ for layer_module in self.layers:
319
+ hidden_state = layer_module(hidden_state)
320
+ return hidden_state
321
+
322
+ def build(self, input_shape=None):
323
+ if self.built:
324
+ return
325
+ self.built = True
326
+ if getattr(self, "layers", None) is not None:
327
+ for layer in self.layers:
328
+ with tf.name_scope(layer.name):
329
+ layer.build(None)
330
+
331
+
332
+ class TFRegNetEncoder(keras.layers.Layer):
333
+ def __init__(self, config: RegNetConfig, **kwargs):
334
+ super().__init__(**kwargs)
335
+ self.stages = []
336
+ # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
337
+ self.stages.append(
338
+ TFRegNetStage(
339
+ config,
340
+ config.embedding_size,
341
+ config.hidden_sizes[0],
342
+ stride=2 if config.downsample_in_first_stage else 1,
343
+ depth=config.depths[0],
344
+ name="stages.0",
345
+ )
346
+ )
347
+ in_out_channels = zip(config.hidden_sizes, config.hidden_sizes[1:])
348
+ for i, ((in_channels, out_channels), depth) in enumerate(zip(in_out_channels, config.depths[1:])):
349
+ self.stages.append(TFRegNetStage(config, in_channels, out_channels, depth=depth, name=f"stages.{i + 1}"))
350
+
351
+ def call(
352
+ self, hidden_state: tf.Tensor, output_hidden_states: bool = False, return_dict: bool = True
353
+ ) -> TFBaseModelOutputWithNoAttention:
354
+ hidden_states = () if output_hidden_states else None
355
+
356
+ for stage_module in self.stages:
357
+ if output_hidden_states:
358
+ hidden_states = hidden_states + (hidden_state,)
359
+
360
+ hidden_state = stage_module(hidden_state)
361
+
362
+ if output_hidden_states:
363
+ hidden_states = hidden_states + (hidden_state,)
364
+
365
+ if not return_dict:
366
+ return tuple(v for v in [hidden_state, hidden_states] if v is not None)
367
+
368
+ return TFBaseModelOutputWithNoAttention(last_hidden_state=hidden_state, hidden_states=hidden_states)
369
+
370
+ def build(self, input_shape=None):
371
+ if self.built:
372
+ return
373
+ self.built = True
374
+ for stage in self.stages:
375
+ with tf.name_scope(stage.name):
376
+ stage.build(None)
377
+
378
+
379
+ @keras_serializable
380
+ class TFRegNetMainLayer(keras.layers.Layer):
381
+ config_class = RegNetConfig
382
+
383
+ def __init__(self, config, **kwargs):
384
+ super().__init__(**kwargs)
385
+ self.config = config
386
+ self.embedder = TFRegNetEmbeddings(config, name="embedder")
387
+ self.encoder = TFRegNetEncoder(config, name="encoder")
388
+ self.pooler = keras.layers.GlobalAveragePooling2D(keepdims=True, name="pooler")
389
+
390
+ @unpack_inputs
391
+ def call(
392
+ self,
393
+ pixel_values: tf.Tensor,
394
+ output_hidden_states: Optional[bool] = None,
395
+ return_dict: Optional[bool] = None,
396
+ training: bool = False,
397
+ ) -> TFBaseModelOutputWithPoolingAndNoAttention:
398
+ output_hidden_states = (
399
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
400
+ )
401
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
402
+
403
+ embedding_output = self.embedder(pixel_values, training=training)
404
+
405
+ encoder_outputs = self.encoder(
406
+ embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training
407
+ )
408
+
409
+ last_hidden_state = encoder_outputs[0]
410
+ pooled_output = self.pooler(last_hidden_state)
411
+
412
+ # Change to NCHW output format have uniformity in the modules
413
+ pooled_output = tf.transpose(pooled_output, perm=(0, 3, 1, 2))
414
+ last_hidden_state = tf.transpose(last_hidden_state, perm=(0, 3, 1, 2))
415
+
416
+ # Change the other hidden state outputs to NCHW as well
417
+ if output_hidden_states:
418
+ hidden_states = tuple([tf.transpose(h, perm=(0, 3, 1, 2)) for h in encoder_outputs[1]])
419
+
420
+ if not return_dict:
421
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
422
+
423
+ return TFBaseModelOutputWithPoolingAndNoAttention(
424
+ last_hidden_state=last_hidden_state,
425
+ pooler_output=pooled_output,
426
+ hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states,
427
+ )
428
+
429
+ def build(self, input_shape=None):
430
+ if self.built:
431
+ return
432
+ self.built = True
433
+ if getattr(self, "embedder", None) is not None:
434
+ with tf.name_scope(self.embedder.name):
435
+ self.embedder.build(None)
436
+ if getattr(self, "encoder", None) is not None:
437
+ with tf.name_scope(self.encoder.name):
438
+ self.encoder.build(None)
439
+ if getattr(self, "pooler", None) is not None:
440
+ with tf.name_scope(self.pooler.name):
441
+ self.pooler.build((None, None, None, None))
442
+
443
+
444
+ class TFRegNetPreTrainedModel(TFPreTrainedModel):
445
+ """
446
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
447
+ models.
448
+ """
449
+
450
+ config_class = RegNetConfig
451
+ base_model_prefix = "regnet"
452
+ main_input_name = "pixel_values"
453
+
454
+ @property
455
+ def input_signature(self):
456
+ return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224), dtype=tf.float32)}
457
+
458
+
459
+ REGNET_START_DOCSTRING = r"""
460
+ This model is a Tensorflow
461
+ [keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
462
+ regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
463
+ behavior.
464
+
465
+ Parameters:
466
+ config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
467
+ Initializing with a config file does not load the weights associated with the model, only the
468
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
469
+ """
470
+
471
+ REGNET_INPUTS_DOCSTRING = r"""
472
+ Args:
473
+ pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
474
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
475
+ [`ConveNextImageProcessor.__call__`] for details.
476
+ output_hidden_states (`bool`, *optional*):
477
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
478
+ more detail.
479
+ return_dict (`bool`, *optional*):
480
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
481
+ """
482
+
483
+
484
+ @add_start_docstrings(
485
+ "The bare RegNet model outputting raw features without any specific head on top.",
486
+ REGNET_START_DOCSTRING,
487
+ )
488
+ class TFRegNetModel(TFRegNetPreTrainedModel):
489
+ def __init__(self, config: RegNetConfig, *inputs, **kwargs):
490
+ super().__init__(config, *inputs, **kwargs)
491
+ self.regnet = TFRegNetMainLayer(config, name="regnet")
492
+
493
+ @unpack_inputs
494
+ @add_start_docstrings_to_model_forward(REGNET_INPUTS_DOCSTRING)
495
+ @add_code_sample_docstrings(
496
+ checkpoint=_CHECKPOINT_FOR_DOC,
497
+ output_type=TFBaseModelOutputWithPoolingAndNoAttention,
498
+ config_class=_CONFIG_FOR_DOC,
499
+ modality="vision",
500
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
501
+ )
502
+ def call(
503
+ self,
504
+ pixel_values: tf.Tensor,
505
+ output_hidden_states: Optional[bool] = None,
506
+ return_dict: Optional[bool] = None,
507
+ training: bool = False,
508
+ ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
509
+ output_hidden_states = (
510
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
511
+ )
512
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
513
+
514
+ outputs = self.regnet(
515
+ pixel_values=pixel_values,
516
+ output_hidden_states=output_hidden_states,
517
+ return_dict=return_dict,
518
+ training=training,
519
+ )
520
+ if not return_dict:
521
+ return (outputs[0],) + outputs[1:]
522
+
523
+ return TFBaseModelOutputWithPoolingAndNoAttention(
524
+ last_hidden_state=outputs.last_hidden_state,
525
+ pooler_output=outputs.pooler_output,
526
+ hidden_states=outputs.hidden_states,
527
+ )
528
+
529
+ def build(self, input_shape=None):
530
+ if self.built:
531
+ return
532
+ self.built = True
533
+ if getattr(self, "regnet", None) is not None:
534
+ with tf.name_scope(self.regnet.name):
535
+ self.regnet.build(None)
536
+
537
+
538
+ @add_start_docstrings(
539
+ """
540
+ RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
541
+ ImageNet.
542
+ """,
543
+ REGNET_START_DOCSTRING,
544
+ )
545
+ class TFRegNetForImageClassification(TFRegNetPreTrainedModel, TFSequenceClassificationLoss):
546
+ def __init__(self, config: RegNetConfig, *inputs, **kwargs):
547
+ super().__init__(config, *inputs, **kwargs)
548
+ self.num_labels = config.num_labels
549
+ self.regnet = TFRegNetMainLayer(config, name="regnet")
550
+ # classification head
551
+ self.classifier = [
552
+ keras.layers.Flatten(),
553
+ keras.layers.Dense(config.num_labels, name="classifier.1") if config.num_labels > 0 else tf.identity,
554
+ ]
555
+
556
+ @unpack_inputs
557
+ @add_start_docstrings_to_model_forward(REGNET_INPUTS_DOCSTRING)
558
+ @add_code_sample_docstrings(
559
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
560
+ output_type=TFSequenceClassifierOutput,
561
+ config_class=_CONFIG_FOR_DOC,
562
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
563
+ )
564
+ def call(
565
+ self,
566
+ pixel_values: Optional[tf.Tensor] = None,
567
+ labels: Optional[tf.Tensor] = None,
568
+ output_hidden_states: Optional[bool] = None,
569
+ return_dict: Optional[bool] = None,
570
+ training: bool = False,
571
+ ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
572
+ r"""
573
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
574
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
575
+ config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
576
+ """
577
+ output_hidden_states = (
578
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
579
+ )
580
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
581
+
582
+ outputs = self.regnet(
583
+ pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training
584
+ )
585
+
586
+ pooled_output = outputs.pooler_output if return_dict else outputs[1]
587
+
588
+ flattened_output = self.classifier[0](pooled_output)
589
+ logits = self.classifier[1](flattened_output)
590
+
591
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
592
+
593
+ if not return_dict:
594
+ output = (logits,) + outputs[2:]
595
+ return ((loss,) + output) if loss is not None else output
596
+
597
+ return TFSequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
598
+
599
+ def build(self, input_shape=None):
600
+ if self.built:
601
+ return
602
+ self.built = True
603
+ if getattr(self, "regnet", None) is not None:
604
+ with tf.name_scope(self.regnet.name):
605
+ self.regnet.build(None)
606
+ if getattr(self, "classifier", None) is not None:
607
+ with tf.name_scope(self.classifier[1].name):
608
+ self.classifier[1].build([None, None, None, self.config.hidden_sizes[-1]])
609
+
610
+
611
+ __all__ = ["TFRegNetForImageClassification", "TFRegNetModel", "TFRegNetPreTrainedModel"]
docs/transformers/build/lib/transformers/models/rembert/__init__.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import _LazyModule
17
+ from ...utils.import_utils import define_import_structure
18
+
19
+
20
+ if TYPE_CHECKING:
21
+ from .configuration_rembert import *
22
+ from .modeling_rembert import *
23
+ from .modeling_tf_rembert import *
24
+ from .tokenization_rembert import *
25
+ from .tokenization_rembert_fast import *
26
+ else:
27
+ import sys
28
+
29
+ _file = globals()["__file__"]
30
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
docs/transformers/build/lib/transformers/models/rembert/modeling_tf_rembert.py ADDED
@@ -0,0 +1,1721 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Team and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """TF 2.0 RemBERT model."""
16
+
17
+ from __future__ import annotations
18
+
19
+ import math
20
+ from typing import Dict, Optional, Tuple, Union
21
+
22
+ import numpy as np
23
+ import tensorflow as tf
24
+
25
+ from ...activations_tf import get_tf_activation
26
+ from ...modeling_tf_outputs import (
27
+ TFBaseModelOutputWithPastAndCrossAttentions,
28
+ TFBaseModelOutputWithPoolingAndCrossAttentions,
29
+ TFCausalLMOutputWithCrossAttentions,
30
+ TFMaskedLMOutput,
31
+ TFMultipleChoiceModelOutput,
32
+ TFQuestionAnsweringModelOutput,
33
+ TFSequenceClassifierOutput,
34
+ TFTokenClassifierOutput,
35
+ )
36
+ from ...modeling_tf_utils import (
37
+ TFCausalLanguageModelingLoss,
38
+ TFMaskedLanguageModelingLoss,
39
+ TFModelInputType,
40
+ TFMultipleChoiceLoss,
41
+ TFPreTrainedModel,
42
+ TFQuestionAnsweringLoss,
43
+ TFSequenceClassificationLoss,
44
+ TFTokenClassificationLoss,
45
+ get_initializer,
46
+ keras,
47
+ keras_serializable,
48
+ unpack_inputs,
49
+ )
50
+ from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
51
+ from ...utils import (
52
+ add_code_sample_docstrings,
53
+ add_start_docstrings,
54
+ add_start_docstrings_to_model_forward,
55
+ logging,
56
+ )
57
+ from .configuration_rembert import RemBertConfig
58
+
59
+
60
+ logger = logging.get_logger(__name__)
61
+
62
+ _CONFIG_FOR_DOC = "RemBertConfig"
63
+
64
+
65
+ class TFRemBertEmbeddings(keras.layers.Layer):
66
+ """Construct the embeddings from word, position and token_type embeddings."""
67
+
68
+ def __init__(self, config: RemBertConfig, **kwargs):
69
+ super().__init__(**kwargs)
70
+
71
+ self.config = config
72
+ self.input_embedding_size = config.input_embedding_size
73
+ self.max_position_embeddings = config.max_position_embeddings
74
+ self.initializer_range = config.initializer_range
75
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
76
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
77
+
78
+ def build(self, input_shape=None):
79
+ with tf.name_scope("word_embeddings"):
80
+ self.weight = self.add_weight(
81
+ name="weight",
82
+ shape=[self.config.vocab_size, self.input_embedding_size],
83
+ initializer=get_initializer(self.initializer_range),
84
+ )
85
+
86
+ with tf.name_scope("token_type_embeddings"):
87
+ self.token_type_embeddings = self.add_weight(
88
+ name="embeddings",
89
+ shape=[self.config.type_vocab_size, self.input_embedding_size],
90
+ initializer=get_initializer(self.initializer_range),
91
+ )
92
+
93
+ with tf.name_scope("position_embeddings"):
94
+ self.position_embeddings = self.add_weight(
95
+ name="embeddings",
96
+ shape=[self.max_position_embeddings, self.input_embedding_size],
97
+ initializer=get_initializer(self.initializer_range),
98
+ )
99
+
100
+ if self.built:
101
+ return
102
+ self.built = True
103
+ if getattr(self, "LayerNorm", None) is not None:
104
+ with tf.name_scope(self.LayerNorm.name):
105
+ self.LayerNorm.build([None, None, self.config.input_embedding_size])
106
+
107
+ def call(
108
+ self,
109
+ input_ids: Optional[tf.Tensor] = None,
110
+ position_ids: Optional[tf.Tensor] = None,
111
+ token_type_ids: Optional[tf.Tensor] = None,
112
+ inputs_embeds: Optional[tf.Tensor] = None,
113
+ past_key_values_length=0,
114
+ training: bool = False,
115
+ ) -> tf.Tensor:
116
+ """
117
+ Applies embedding based on inputs tensor.
118
+
119
+ Returns:
120
+ final_embeddings (`tf.Tensor`): output embedding tensor.
121
+ """
122
+ assert not (input_ids is None and inputs_embeds is None)
123
+
124
+ if input_ids is not None:
125
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
126
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
127
+
128
+ input_shape = shape_list(inputs_embeds)[:-1]
129
+
130
+ if token_type_ids is None:
131
+ token_type_ids = tf.fill(dims=input_shape, value=0)
132
+
133
+ if position_ids is None:
134
+ position_ids = tf.expand_dims(
135
+ tf.range(start=past_key_values_length, limit=input_shape[1] + past_key_values_length), axis=0
136
+ )
137
+
138
+ position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
139
+ token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
140
+ final_embeddings = inputs_embeds + position_embeds + token_type_embeds
141
+ final_embeddings = self.LayerNorm(inputs=final_embeddings)
142
+ final_embeddings = self.dropout(inputs=final_embeddings, training=training)
143
+
144
+ return final_embeddings
145
+
146
+
147
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfAttention with Bert->RemBert
148
+ class TFRemBertSelfAttention(keras.layers.Layer):
149
+ def __init__(self, config: RemBertConfig, **kwargs):
150
+ super().__init__(**kwargs)
151
+
152
+ if config.hidden_size % config.num_attention_heads != 0:
153
+ raise ValueError(
154
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number "
155
+ f"of attention heads ({config.num_attention_heads})"
156
+ )
157
+
158
+ self.num_attention_heads = config.num_attention_heads
159
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
160
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
161
+ self.sqrt_att_head_size = math.sqrt(self.attention_head_size)
162
+
163
+ self.query = keras.layers.Dense(
164
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
165
+ )
166
+ self.key = keras.layers.Dense(
167
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key"
168
+ )
169
+ self.value = keras.layers.Dense(
170
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
171
+ )
172
+ self.dropout = keras.layers.Dropout(rate=config.attention_probs_dropout_prob)
173
+
174
+ self.is_decoder = config.is_decoder
175
+ self.config = config
176
+
177
+ def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor:
178
+ # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
179
+ tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size))
180
+
181
+ # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size]
182
+ return tf.transpose(tensor, perm=[0, 2, 1, 3])
183
+
184
+ def call(
185
+ self,
186
+ hidden_states: tf.Tensor,
187
+ attention_mask: tf.Tensor,
188
+ head_mask: tf.Tensor,
189
+ encoder_hidden_states: tf.Tensor,
190
+ encoder_attention_mask: tf.Tensor,
191
+ past_key_value: Tuple[tf.Tensor],
192
+ output_attentions: bool,
193
+ training: bool = False,
194
+ ) -> Tuple[tf.Tensor]:
195
+ batch_size = shape_list(hidden_states)[0]
196
+ mixed_query_layer = self.query(inputs=hidden_states)
197
+
198
+ # If this is instantiated as a cross-attention module, the keys
199
+ # and values come from an encoder; the attention mask needs to be
200
+ # such that the encoder's padding tokens are not attended to.
201
+ is_cross_attention = encoder_hidden_states is not None
202
+
203
+ if is_cross_attention and past_key_value is not None:
204
+ # reuse k,v, cross_attentions
205
+ key_layer = past_key_value[0]
206
+ value_layer = past_key_value[1]
207
+ attention_mask = encoder_attention_mask
208
+ elif is_cross_attention:
209
+ key_layer = self.transpose_for_scores(self.key(inputs=encoder_hidden_states), batch_size)
210
+ value_layer = self.transpose_for_scores(self.value(inputs=encoder_hidden_states), batch_size)
211
+ attention_mask = encoder_attention_mask
212
+ elif past_key_value is not None:
213
+ key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size)
214
+ value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size)
215
+ key_layer = tf.concat([past_key_value[0], key_layer], axis=2)
216
+ value_layer = tf.concat([past_key_value[1], value_layer], axis=2)
217
+ else:
218
+ key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size)
219
+ value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size)
220
+
221
+ query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
222
+
223
+ if self.is_decoder:
224
+ # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
225
+ # Further calls to cross_attention layer can then reuse all cross-attention
226
+ # key/value_states (first "if" case)
227
+ # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
228
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
229
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
230
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
231
+ past_key_value = (key_layer, value_layer)
232
+
233
+ # Take the dot product between "query" and "key" to get the raw attention scores.
234
+ # (batch size, num_heads, seq_len_q, seq_len_k)
235
+ attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
236
+ dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype)
237
+ attention_scores = tf.divide(attention_scores, dk)
238
+
239
+ if attention_mask is not None:
240
+ # Apply the attention mask is (precomputed for all layers in TFRemBertModel call() function)
241
+ attention_scores = tf.add(attention_scores, attention_mask)
242
+
243
+ # Normalize the attention scores to probabilities.
244
+ attention_probs = stable_softmax(logits=attention_scores, axis=-1)
245
+
246
+ # This is actually dropping out entire tokens to attend to, which might
247
+ # seem a bit unusual, but is taken from the original Transformer paper.
248
+ attention_probs = self.dropout(inputs=attention_probs, training=training)
249
+
250
+ # Mask heads if we want to
251
+ if head_mask is not None:
252
+ attention_probs = tf.multiply(attention_probs, head_mask)
253
+
254
+ attention_output = tf.matmul(attention_probs, value_layer)
255
+ attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3])
256
+
257
+ # (batch_size, seq_len_q, all_head_size)
258
+ attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.all_head_size))
259
+ outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
260
+
261
+ if self.is_decoder:
262
+ outputs = outputs + (past_key_value,)
263
+ return outputs
264
+
265
+ def build(self, input_shape=None):
266
+ if self.built:
267
+ return
268
+ self.built = True
269
+ if getattr(self, "query", None) is not None:
270
+ with tf.name_scope(self.query.name):
271
+ self.query.build([None, None, self.config.hidden_size])
272
+ if getattr(self, "key", None) is not None:
273
+ with tf.name_scope(self.key.name):
274
+ self.key.build([None, None, self.config.hidden_size])
275
+ if getattr(self, "value", None) is not None:
276
+ with tf.name_scope(self.value.name):
277
+ self.value.build([None, None, self.config.hidden_size])
278
+
279
+
280
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfOutput with Bert->RemBert
281
+ class TFRemBertSelfOutput(keras.layers.Layer):
282
+ def __init__(self, config: RemBertConfig, **kwargs):
283
+ super().__init__(**kwargs)
284
+
285
+ self.dense = keras.layers.Dense(
286
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
287
+ )
288
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
289
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
290
+ self.config = config
291
+
292
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
293
+ hidden_states = self.dense(inputs=hidden_states)
294
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
295
+ hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
296
+
297
+ return hidden_states
298
+
299
+ def build(self, input_shape=None):
300
+ if self.built:
301
+ return
302
+ self.built = True
303
+ if getattr(self, "dense", None) is not None:
304
+ with tf.name_scope(self.dense.name):
305
+ self.dense.build([None, None, self.config.hidden_size])
306
+ if getattr(self, "LayerNorm", None) is not None:
307
+ with tf.name_scope(self.LayerNorm.name):
308
+ self.LayerNorm.build([None, None, self.config.hidden_size])
309
+
310
+
311
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertAttention with Bert->RemBert
312
+ class TFRemBertAttention(keras.layers.Layer):
313
+ def __init__(self, config: RemBertConfig, **kwargs):
314
+ super().__init__(**kwargs)
315
+
316
+ self.self_attention = TFRemBertSelfAttention(config, name="self")
317
+ self.dense_output = TFRemBertSelfOutput(config, name="output")
318
+
319
+ def prune_heads(self, heads):
320
+ raise NotImplementedError
321
+
322
+ def call(
323
+ self,
324
+ input_tensor: tf.Tensor,
325
+ attention_mask: tf.Tensor,
326
+ head_mask: tf.Tensor,
327
+ encoder_hidden_states: tf.Tensor,
328
+ encoder_attention_mask: tf.Tensor,
329
+ past_key_value: Tuple[tf.Tensor],
330
+ output_attentions: bool,
331
+ training: bool = False,
332
+ ) -> Tuple[tf.Tensor]:
333
+ self_outputs = self.self_attention(
334
+ hidden_states=input_tensor,
335
+ attention_mask=attention_mask,
336
+ head_mask=head_mask,
337
+ encoder_hidden_states=encoder_hidden_states,
338
+ encoder_attention_mask=encoder_attention_mask,
339
+ past_key_value=past_key_value,
340
+ output_attentions=output_attentions,
341
+ training=training,
342
+ )
343
+ attention_output = self.dense_output(
344
+ hidden_states=self_outputs[0], input_tensor=input_tensor, training=training
345
+ )
346
+ # add attentions (possibly with past_key_value) if we output them
347
+ outputs = (attention_output,) + self_outputs[1:]
348
+
349
+ return outputs
350
+
351
+ def build(self, input_shape=None):
352
+ if self.built:
353
+ return
354
+ self.built = True
355
+ if getattr(self, "self_attention", None) is not None:
356
+ with tf.name_scope(self.self_attention.name):
357
+ self.self_attention.build(None)
358
+ if getattr(self, "dense_output", None) is not None:
359
+ with tf.name_scope(self.dense_output.name):
360
+ self.dense_output.build(None)
361
+
362
+
363
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertIntermediate with Bert->RemBert
364
+ class TFRemBertIntermediate(keras.layers.Layer):
365
+ def __init__(self, config: RemBertConfig, **kwargs):
366
+ super().__init__(**kwargs)
367
+
368
+ self.dense = keras.layers.Dense(
369
+ units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
370
+ )
371
+
372
+ if isinstance(config.hidden_act, str):
373
+ self.intermediate_act_fn = get_tf_activation(config.hidden_act)
374
+ else:
375
+ self.intermediate_act_fn = config.hidden_act
376
+ self.config = config
377
+
378
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
379
+ hidden_states = self.dense(inputs=hidden_states)
380
+ hidden_states = self.intermediate_act_fn(hidden_states)
381
+
382
+ return hidden_states
383
+
384
+ def build(self, input_shape=None):
385
+ if self.built:
386
+ return
387
+ self.built = True
388
+ if getattr(self, "dense", None) is not None:
389
+ with tf.name_scope(self.dense.name):
390
+ self.dense.build([None, None, self.config.hidden_size])
391
+
392
+
393
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertOutput with Bert->RemBert
394
+ class TFRemBertOutput(keras.layers.Layer):
395
+ def __init__(self, config: RemBertConfig, **kwargs):
396
+ super().__init__(**kwargs)
397
+
398
+ self.dense = keras.layers.Dense(
399
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
400
+ )
401
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
402
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
403
+ self.config = config
404
+
405
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
406
+ hidden_states = self.dense(inputs=hidden_states)
407
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
408
+ hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
409
+
410
+ return hidden_states
411
+
412
+ def build(self, input_shape=None):
413
+ if self.built:
414
+ return
415
+ self.built = True
416
+ if getattr(self, "dense", None) is not None:
417
+ with tf.name_scope(self.dense.name):
418
+ self.dense.build([None, None, self.config.intermediate_size])
419
+ if getattr(self, "LayerNorm", None) is not None:
420
+ with tf.name_scope(self.LayerNorm.name):
421
+ self.LayerNorm.build([None, None, self.config.hidden_size])
422
+
423
+
424
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertLayer with Bert->RemBert
425
+ class TFRemBertLayer(keras.layers.Layer):
426
+ def __init__(self, config: RemBertConfig, **kwargs):
427
+ super().__init__(**kwargs)
428
+
429
+ self.attention = TFRemBertAttention(config, name="attention")
430
+ self.is_decoder = config.is_decoder
431
+ self.add_cross_attention = config.add_cross_attention
432
+ if self.add_cross_attention:
433
+ if not self.is_decoder:
434
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
435
+ self.crossattention = TFRemBertAttention(config, name="crossattention")
436
+ self.intermediate = TFRemBertIntermediate(config, name="intermediate")
437
+ self.bert_output = TFRemBertOutput(config, name="output")
438
+
439
+ def call(
440
+ self,
441
+ hidden_states: tf.Tensor,
442
+ attention_mask: tf.Tensor,
443
+ head_mask: tf.Tensor,
444
+ encoder_hidden_states: tf.Tensor | None,
445
+ encoder_attention_mask: tf.Tensor | None,
446
+ past_key_value: Tuple[tf.Tensor] | None,
447
+ output_attentions: bool,
448
+ training: bool = False,
449
+ ) -> Tuple[tf.Tensor]:
450
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
451
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
452
+ self_attention_outputs = self.attention(
453
+ input_tensor=hidden_states,
454
+ attention_mask=attention_mask,
455
+ head_mask=head_mask,
456
+ encoder_hidden_states=None,
457
+ encoder_attention_mask=None,
458
+ past_key_value=self_attn_past_key_value,
459
+ output_attentions=output_attentions,
460
+ training=training,
461
+ )
462
+ attention_output = self_attention_outputs[0]
463
+
464
+ # if decoder, the last output is tuple of self-attn cache
465
+ if self.is_decoder:
466
+ outputs = self_attention_outputs[1:-1]
467
+ present_key_value = self_attention_outputs[-1]
468
+ else:
469
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
470
+
471
+ cross_attn_present_key_value = None
472
+ if self.is_decoder and encoder_hidden_states is not None:
473
+ if not hasattr(self, "crossattention"):
474
+ raise ValueError(
475
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
476
+ " by setting `config.add_cross_attention=True`"
477
+ )
478
+
479
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
480
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
481
+ cross_attention_outputs = self.crossattention(
482
+ input_tensor=attention_output,
483
+ attention_mask=attention_mask,
484
+ head_mask=head_mask,
485
+ encoder_hidden_states=encoder_hidden_states,
486
+ encoder_attention_mask=encoder_attention_mask,
487
+ past_key_value=cross_attn_past_key_value,
488
+ output_attentions=output_attentions,
489
+ training=training,
490
+ )
491
+ attention_output = cross_attention_outputs[0]
492
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
493
+
494
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
495
+ cross_attn_present_key_value = cross_attention_outputs[-1]
496
+ present_key_value = present_key_value + cross_attn_present_key_value
497
+
498
+ intermediate_output = self.intermediate(hidden_states=attention_output)
499
+ layer_output = self.bert_output(
500
+ hidden_states=intermediate_output, input_tensor=attention_output, training=training
501
+ )
502
+ outputs = (layer_output,) + outputs # add attentions if we output them
503
+
504
+ # if decoder, return the attn key/values as the last output
505
+ if self.is_decoder:
506
+ outputs = outputs + (present_key_value,)
507
+
508
+ return outputs
509
+
510
+ def build(self, input_shape=None):
511
+ if self.built:
512
+ return
513
+ self.built = True
514
+ if getattr(self, "attention", None) is not None:
515
+ with tf.name_scope(self.attention.name):
516
+ self.attention.build(None)
517
+ if getattr(self, "intermediate", None) is not None:
518
+ with tf.name_scope(self.intermediate.name):
519
+ self.intermediate.build(None)
520
+ if getattr(self, "bert_output", None) is not None:
521
+ with tf.name_scope(self.bert_output.name):
522
+ self.bert_output.build(None)
523
+ if getattr(self, "crossattention", None) is not None:
524
+ with tf.name_scope(self.crossattention.name):
525
+ self.crossattention.build(None)
526
+
527
+
528
+ class TFRemBertEncoder(keras.layers.Layer):
529
+ def __init__(self, config: RemBertConfig, **kwargs):
530
+ super().__init__(**kwargs)
531
+ self.config = config
532
+
533
+ self.embedding_hidden_mapping_in = keras.layers.Dense(
534
+ units=config.hidden_size,
535
+ kernel_initializer=get_initializer(config.initializer_range),
536
+ name="embedding_hidden_mapping_in",
537
+ )
538
+ self.layer = [TFRemBertLayer(config, name="layer_._{}".format(i)) for i in range(config.num_hidden_layers)]
539
+
540
+ def call(
541
+ self,
542
+ hidden_states: tf.Tensor,
543
+ attention_mask: tf.Tensor,
544
+ head_mask: tf.Tensor,
545
+ encoder_hidden_states: tf.Tensor,
546
+ encoder_attention_mask: tf.Tensor,
547
+ past_key_values: Tuple[Tuple[tf.Tensor]],
548
+ use_cache: bool,
549
+ output_attentions: bool,
550
+ output_hidden_states: bool,
551
+ return_dict: bool,
552
+ training: bool = False,
553
+ ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]:
554
+ hidden_states = self.embedding_hidden_mapping_in(inputs=hidden_states)
555
+ all_hidden_states = () if output_hidden_states else None
556
+ all_attentions = () if output_attentions else None
557
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
558
+
559
+ next_decoder_cache = () if use_cache else None
560
+ for i, layer_module in enumerate(self.layer):
561
+ if output_hidden_states:
562
+ all_hidden_states = all_hidden_states + (hidden_states,)
563
+
564
+ past_key_value = past_key_values[i] if past_key_values is not None else None
565
+
566
+ layer_outputs = layer_module(
567
+ hidden_states=hidden_states,
568
+ attention_mask=attention_mask,
569
+ head_mask=head_mask[i],
570
+ encoder_hidden_states=encoder_hidden_states,
571
+ encoder_attention_mask=encoder_attention_mask,
572
+ past_key_value=past_key_value,
573
+ output_attentions=output_attentions,
574
+ training=training,
575
+ )
576
+ hidden_states = layer_outputs[0]
577
+
578
+ if use_cache:
579
+ next_decoder_cache += (layer_outputs[-1],)
580
+
581
+ if output_attentions:
582
+ all_attentions = all_attentions + (layer_outputs[1],)
583
+ if self.config.add_cross_attention and encoder_hidden_states is not None:
584
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
585
+
586
+ # Add last layer
587
+ if output_hidden_states:
588
+ all_hidden_states = all_hidden_states + (hidden_states,)
589
+
590
+ if not return_dict:
591
+ return tuple(
592
+ v for v in [hidden_states, all_hidden_states, all_attentions, all_cross_attentions] if v is not None
593
+ )
594
+
595
+ return TFBaseModelOutputWithPastAndCrossAttentions(
596
+ last_hidden_state=hidden_states,
597
+ past_key_values=next_decoder_cache,
598
+ hidden_states=all_hidden_states,
599
+ attentions=all_attentions,
600
+ cross_attentions=all_cross_attentions,
601
+ )
602
+
603
+ def build(self, input_shape=None):
604
+ if self.built:
605
+ return
606
+ self.built = True
607
+ if getattr(self, "embedding_hidden_mapping_in", None) is not None:
608
+ with tf.name_scope(self.embedding_hidden_mapping_in.name):
609
+ self.embedding_hidden_mapping_in.build([None, None, self.config.input_embedding_size])
610
+ if getattr(self, "layer", None) is not None:
611
+ for layer in self.layer:
612
+ with tf.name_scope(layer.name):
613
+ layer.build(None)
614
+
615
+
616
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertPooler with Bert->RemBert
617
+ class TFRemBertPooler(keras.layers.Layer):
618
+ def __init__(self, config: RemBertConfig, **kwargs):
619
+ super().__init__(**kwargs)
620
+
621
+ self.dense = keras.layers.Dense(
622
+ units=config.hidden_size,
623
+ kernel_initializer=get_initializer(config.initializer_range),
624
+ activation="tanh",
625
+ name="dense",
626
+ )
627
+ self.config = config
628
+
629
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
630
+ # We "pool" the model by simply taking the hidden state corresponding
631
+ # to the first token.
632
+ first_token_tensor = hidden_states[:, 0]
633
+ pooled_output = self.dense(inputs=first_token_tensor)
634
+
635
+ return pooled_output
636
+
637
+ def build(self, input_shape=None):
638
+ if self.built:
639
+ return
640
+ self.built = True
641
+ if getattr(self, "dense", None) is not None:
642
+ with tf.name_scope(self.dense.name):
643
+ self.dense.build([None, None, self.config.hidden_size])
644
+
645
+
646
+ class TFRemBertLMPredictionHead(keras.layers.Layer):
647
+ def __init__(self, config: RemBertConfig, input_embeddings: keras.layers.Layer, **kwargs):
648
+ super().__init__(**kwargs)
649
+
650
+ self.config = config
651
+ self.initializer_range = config.initializer_range
652
+ self.output_embedding_size = config.output_embedding_size
653
+ self.dense = keras.layers.Dense(
654
+ config.output_embedding_size, kernel_initializer=get_initializer(self.initializer_range), name="dense"
655
+ )
656
+ if isinstance(config.hidden_act, str):
657
+ self.activation = get_tf_activation(config.hidden_act)
658
+ else:
659
+ self.activation = config.hidden_act
660
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
661
+
662
+ def build(self, input_shape=None):
663
+ self.decoder = self.add_weight(
664
+ name="decoder/weight",
665
+ shape=[self.config.vocab_size, self.output_embedding_size],
666
+ initializer=get_initializer(self.initializer_range),
667
+ )
668
+ self.decoder_bias = self.add_weight(
669
+ shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="decoder/bias"
670
+ )
671
+
672
+ if self.built:
673
+ return
674
+ self.built = True
675
+ if getattr(self, "dense", None) is not None:
676
+ with tf.name_scope(self.dense.name):
677
+ self.dense.build([None, None, self.config.hidden_size])
678
+ if getattr(self, "LayerNorm", None) is not None:
679
+ with tf.name_scope(self.LayerNorm.name):
680
+ self.LayerNorm.build([None, self.config.output_embedding_size])
681
+
682
+ def get_output_embeddings(self) -> keras.layers.Layer:
683
+ return self
684
+
685
+ def set_output_embeddings(self, value):
686
+ self.decoder = value
687
+ self.decoder.vocab_size = shape_list(value)[0]
688
+
689
+ def get_bias(self) -> Dict[str, tf.Variable]:
690
+ return {"decoder_bias": self.decoder_bias}
691
+
692
+ def set_bias(self, value: tf.Variable):
693
+ self.decoder_bias = value["decoder_bias"]
694
+ self.config.vocab_size = shape_list(value["decoder_bias"])[0]
695
+
696
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
697
+ hidden_states = self.dense(inputs=hidden_states)
698
+ hidden_states = self.activation(hidden_states)
699
+ seq_length = shape_list(tensor=hidden_states)[1]
700
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.output_embedding_size])
701
+ hidden_states = self.LayerNorm(hidden_states)
702
+ hidden_states = tf.matmul(a=hidden_states, b=self.decoder, transpose_b=True)
703
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
704
+ hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.decoder_bias)
705
+ return hidden_states
706
+
707
+
708
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertMLMHead with Bert->RemBert
709
+ class TFRemBertMLMHead(keras.layers.Layer):
710
+ def __init__(self, config: RemBertConfig, input_embeddings: keras.layers.Layer, **kwargs):
711
+ super().__init__(**kwargs)
712
+
713
+ self.predictions = TFRemBertLMPredictionHead(config, input_embeddings, name="predictions")
714
+
715
+ def call(self, sequence_output: tf.Tensor) -> tf.Tensor:
716
+ prediction_scores = self.predictions(hidden_states=sequence_output)
717
+
718
+ return prediction_scores
719
+
720
+ def build(self, input_shape=None):
721
+ if self.built:
722
+ return
723
+ self.built = True
724
+ if getattr(self, "predictions", None) is not None:
725
+ with tf.name_scope(self.predictions.name):
726
+ self.predictions.build(None)
727
+
728
+
729
+ @keras_serializable
730
+ class TFRemBertMainLayer(keras.layers.Layer):
731
+ config_class = RemBertConfig
732
+
733
+ def __init__(self, config: RemBertConfig, add_pooling_layer: bool = True, **kwargs):
734
+ super().__init__(**kwargs)
735
+
736
+ self.config = config
737
+ self.is_decoder = config.is_decoder
738
+
739
+ self.embeddings = TFRemBertEmbeddings(config, name="embeddings")
740
+ self.encoder = TFRemBertEncoder(config, name="encoder")
741
+ self.pooler = TFRemBertPooler(config, name="pooler") if add_pooling_layer else None
742
+
743
+ def get_input_embeddings(self) -> keras.layers.Layer:
744
+ return self.embeddings
745
+
746
+ def set_input_embeddings(self, value: tf.Variable):
747
+ self.embeddings.weight = value
748
+ self.embeddings.vocab_size = shape_list(value)[0]
749
+
750
+ def _prune_heads(self, heads_to_prune):
751
+ """
752
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
753
+ class PreTrainedModel
754
+ """
755
+ raise NotImplementedError
756
+
757
+ @unpack_inputs
758
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer.call
759
+ def call(
760
+ self,
761
+ input_ids: TFModelInputType | None = None,
762
+ attention_mask: np.ndarray | tf.Tensor | None = None,
763
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
764
+ position_ids: np.ndarray | tf.Tensor | None = None,
765
+ head_mask: np.ndarray | tf.Tensor | None = None,
766
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
767
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
768
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
769
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
770
+ use_cache: Optional[bool] = None,
771
+ output_attentions: Optional[bool] = None,
772
+ output_hidden_states: Optional[bool] = None,
773
+ return_dict: Optional[bool] = None,
774
+ training: bool = False,
775
+ ) -> Union[TFBaseModelOutputWithPoolingAndCrossAttentions, Tuple[tf.Tensor]]:
776
+ if not self.config.is_decoder:
777
+ use_cache = False
778
+
779
+ if input_ids is not None and inputs_embeds is not None:
780
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
781
+ elif input_ids is not None:
782
+ input_shape = shape_list(input_ids)
783
+ elif inputs_embeds is not None:
784
+ input_shape = shape_list(inputs_embeds)[:-1]
785
+ else:
786
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
787
+
788
+ batch_size, seq_length = input_shape
789
+
790
+ if past_key_values is None:
791
+ past_key_values_length = 0
792
+ past_key_values = [None] * len(self.encoder.layer)
793
+ else:
794
+ past_key_values_length = shape_list(past_key_values[0][0])[-2]
795
+
796
+ if attention_mask is None:
797
+ attention_mask = tf.fill(dims=(batch_size, seq_length + past_key_values_length), value=1)
798
+
799
+ if token_type_ids is None:
800
+ token_type_ids = tf.fill(dims=input_shape, value=0)
801
+
802
+ embedding_output = self.embeddings(
803
+ input_ids=input_ids,
804
+ position_ids=position_ids,
805
+ token_type_ids=token_type_ids,
806
+ inputs_embeds=inputs_embeds,
807
+ past_key_values_length=past_key_values_length,
808
+ training=training,
809
+ )
810
+
811
+ # We create a 3D attention mask from a 2D tensor mask.
812
+ # Sizes are [batch_size, 1, 1, to_seq_length]
813
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
814
+ # this attention mask is more simple than the triangular masking of causal attention
815
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
816
+ attention_mask_shape = shape_list(attention_mask)
817
+
818
+ mask_seq_length = seq_length + past_key_values_length
819
+ # Copied from `modeling_tf_t5.py`
820
+ # Provided a padding mask of dimensions [batch_size, mask_seq_length]
821
+ # - if the model is a decoder, apply a causal mask in addition to the padding mask
822
+ # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length]
823
+ if self.is_decoder:
824
+ seq_ids = tf.range(mask_seq_length)
825
+ causal_mask = tf.less_equal(
826
+ tf.tile(seq_ids[None, None, :], (batch_size, mask_seq_length, 1)),
827
+ seq_ids[None, :, None],
828
+ )
829
+ causal_mask = tf.cast(causal_mask, dtype=attention_mask.dtype)
830
+ extended_attention_mask = causal_mask * attention_mask[:, None, :]
831
+ attention_mask_shape = shape_list(extended_attention_mask)
832
+ extended_attention_mask = tf.reshape(
833
+ extended_attention_mask, (attention_mask_shape[0], 1, attention_mask_shape[1], attention_mask_shape[2])
834
+ )
835
+ if past_key_values[0] is not None:
836
+ # attention_mask needs to be sliced to the shape `[batch_size, 1, from_seq_length - cached_seq_length, to_seq_length]
837
+ extended_attention_mask = extended_attention_mask[:, :, -seq_length:, :]
838
+ else:
839
+ extended_attention_mask = tf.reshape(
840
+ attention_mask, (attention_mask_shape[0], 1, 1, attention_mask_shape[1])
841
+ )
842
+
843
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
844
+ # masked positions, this operation will create a tensor which is 0.0 for
845
+ # positions we want to attend and -10000.0 for masked positions.
846
+ # Since we are adding it to the raw scores before the softmax, this is
847
+ # effectively the same as removing these entirely.
848
+ extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype)
849
+ one_cst = tf.constant(1.0, dtype=embedding_output.dtype)
850
+ ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype)
851
+ extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst)
852
+
853
+ # Copied from `modeling_tf_t5.py` with -1e9 -> -10000
854
+ if self.is_decoder and encoder_attention_mask is not None:
855
+ # If a 2D ou 3D attention mask is provided for the cross-attention
856
+ # we need to make broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length]
857
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
858
+ encoder_attention_mask = tf.cast(encoder_attention_mask, dtype=extended_attention_mask.dtype)
859
+ num_dims_encoder_attention_mask = len(shape_list(encoder_attention_mask))
860
+ if num_dims_encoder_attention_mask == 3:
861
+ encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
862
+ if num_dims_encoder_attention_mask == 2:
863
+ encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
864
+
865
+ # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
866
+ # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow/transformer/transformer_layers.py#L270
867
+ # encoder_extended_attention_mask = tf.math.equal(encoder_extended_attention_mask,
868
+ # tf.transpose(encoder_extended_attention_mask, perm=(-1, -2)))
869
+
870
+ encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0
871
+ else:
872
+ encoder_extended_attention_mask = None
873
+
874
+ # Prepare head mask if needed
875
+ # 1.0 in head_mask indicate we keep the head
876
+ # attention_probs has shape bsz x n_heads x N x N
877
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
878
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
879
+ if head_mask is not None:
880
+ raise NotImplementedError
881
+ else:
882
+ head_mask = [None] * self.config.num_hidden_layers
883
+
884
+ encoder_outputs = self.encoder(
885
+ hidden_states=embedding_output,
886
+ attention_mask=extended_attention_mask,
887
+ head_mask=head_mask,
888
+ encoder_hidden_states=encoder_hidden_states,
889
+ encoder_attention_mask=encoder_extended_attention_mask,
890
+ past_key_values=past_key_values,
891
+ use_cache=use_cache,
892
+ output_attentions=output_attentions,
893
+ output_hidden_states=output_hidden_states,
894
+ return_dict=return_dict,
895
+ training=training,
896
+ )
897
+
898
+ sequence_output = encoder_outputs[0]
899
+ pooled_output = self.pooler(hidden_states=sequence_output) if self.pooler is not None else None
900
+
901
+ if not return_dict:
902
+ return (
903
+ sequence_output,
904
+ pooled_output,
905
+ ) + encoder_outputs[1:]
906
+
907
+ return TFBaseModelOutputWithPoolingAndCrossAttentions(
908
+ last_hidden_state=sequence_output,
909
+ pooler_output=pooled_output,
910
+ past_key_values=encoder_outputs.past_key_values,
911
+ hidden_states=encoder_outputs.hidden_states,
912
+ attentions=encoder_outputs.attentions,
913
+ cross_attentions=encoder_outputs.cross_attentions,
914
+ )
915
+
916
+ def build(self, input_shape=None):
917
+ if self.built:
918
+ return
919
+ self.built = True
920
+ if getattr(self, "embeddings", None) is not None:
921
+ with tf.name_scope(self.embeddings.name):
922
+ self.embeddings.build(None)
923
+ if getattr(self, "encoder", None) is not None:
924
+ with tf.name_scope(self.encoder.name):
925
+ self.encoder.build(None)
926
+ if getattr(self, "pooler", None) is not None:
927
+ with tf.name_scope(self.pooler.name):
928
+ self.pooler.build(None)
929
+
930
+
931
+ class TFRemBertPreTrainedModel(TFPreTrainedModel):
932
+ """
933
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
934
+ models.
935
+ """
936
+
937
+ config_class = RemBertConfig
938
+ base_model_prefix = "rembert"
939
+
940
+
941
+ REMBERT_START_DOCSTRING = r"""
942
+
943
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
944
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
945
+ etc.)
946
+
947
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
948
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
949
+ behavior.
950
+
951
+ <Tip>
952
+
953
+ TensorFlow models and layers in `transformers` accept two formats as input:
954
+
955
+ - having all inputs as keyword arguments (like PyTorch models), or
956
+ - having all inputs as a list, tuple or dict in the first positional argument.
957
+
958
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
959
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
960
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
961
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
962
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
963
+ positional argument:
964
+
965
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
966
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
967
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
968
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
969
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
970
+
971
+ Note that when creating models and layers with
972
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
973
+ about any of this, as you can just pass inputs like you would to any other Python function!
974
+
975
+ </Tip>
976
+
977
+ Args:
978
+ config ([`RemBertConfig`]): Model configuration class with all the parameters of the model.
979
+ Initializing with a config file does not load the weights associated with the model, only the
980
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
981
+ """
982
+
983
+ REMBERT_INPUTS_DOCSTRING = r"""
984
+ Args:
985
+ input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):
986
+ Indices of input sequence tokens in the vocabulary.
987
+
988
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
989
+ [`PreTrainedTokenizer.encode`] for details.
990
+
991
+ [What are input IDs?](../glossary#input-ids)
992
+ attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
993
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
994
+
995
+ - 1 for tokens that are **not masked**,
996
+ - 0 for tokens that are **masked**.
997
+
998
+ [What are attention masks?](../glossary#attention-mask)
999
+ token_type_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
1000
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1001
+ 1]`:
1002
+
1003
+ - 0 corresponds to a *sentence A* token,
1004
+ - 1 corresponds to a *sentence B* token.
1005
+
1006
+ [What are token type IDs?](../glossary#token-type-ids)
1007
+ position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
1008
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
1009
+ config.max_position_embeddings - 1]`.
1010
+
1011
+ [What are position IDs?](../glossary#position-ids)
1012
+ head_mask (`np.ndarray` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
1013
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
1014
+
1015
+ - 1 indicates the head is **not masked**,
1016
+ - 0 indicates the head is **masked**.
1017
+
1018
+ inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
1019
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1020
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
1021
+ model's internal embedding lookup matrix.
1022
+ output_attentions (`bool`, *optional*):
1023
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1024
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
1025
+ config will be used instead.
1026
+ output_hidden_states (`bool`, *optional*):
1027
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1028
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
1029
+ used instead.
1030
+ return_dict (`bool`, *optional*):
1031
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
1032
+ eager mode, in graph mode the value will always be set to True.
1033
+ training (`bool`, *optional*, defaults to `False``):
1034
+ Whether or not to use the model in training mode (some modules like dropout modules have different
1035
+ behaviors between training and evaluation).
1036
+ """
1037
+
1038
+
1039
+ @add_start_docstrings(
1040
+ "The bare RemBERT Model transformer outputing raw hidden-states without any specific head on top.",
1041
+ REMBERT_START_DOCSTRING,
1042
+ )
1043
+ class TFRemBertModel(TFRemBertPreTrainedModel):
1044
+ def __init__(self, config: RemBertConfig, *inputs, **kwargs):
1045
+ super().__init__(config, *inputs, **kwargs)
1046
+
1047
+ self.rembert = TFRemBertMainLayer(config, name="rembert")
1048
+
1049
+ @unpack_inputs
1050
+ @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1051
+ @add_code_sample_docstrings(
1052
+ checkpoint="google/rembert",
1053
+ output_type=TFBaseModelOutputWithPoolingAndCrossAttentions,
1054
+ config_class=_CONFIG_FOR_DOC,
1055
+ )
1056
+ def call(
1057
+ self,
1058
+ input_ids: TFModelInputType | None = None,
1059
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1060
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1061
+ position_ids: np.ndarray | tf.Tensor | None = None,
1062
+ head_mask: np.ndarray | tf.Tensor | None = None,
1063
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1064
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
1065
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
1066
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
1067
+ use_cache: Optional[bool] = None,
1068
+ output_attentions: Optional[bool] = None,
1069
+ output_hidden_states: Optional[bool] = None,
1070
+ return_dict: Optional[bool] = None,
1071
+ training: Optional[bool] = False,
1072
+ ) -> Union[TFBaseModelOutputWithPoolingAndCrossAttentions, Tuple[tf.Tensor]]:
1073
+ r"""
1074
+ encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1075
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
1076
+ the model is configured as a decoder.
1077
+ encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1078
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
1079
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1080
+
1081
+ - 1 for tokens that are **not masked**,
1082
+ - 0 for tokens that are **masked**.
1083
+
1084
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
1085
+ contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1086
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1087
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1088
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1089
+ use_cache (`bool`, *optional*, defaults to `True`):
1090
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1091
+ `past_key_values`). Set to `False` during training, `True` during generation
1092
+ """
1093
+ outputs = self.rembert(
1094
+ input_ids=input_ids,
1095
+ attention_mask=attention_mask,
1096
+ token_type_ids=token_type_ids,
1097
+ position_ids=position_ids,
1098
+ head_mask=head_mask,
1099
+ inputs_embeds=inputs_embeds,
1100
+ encoder_hidden_states=encoder_hidden_states,
1101
+ encoder_attention_mask=encoder_attention_mask,
1102
+ past_key_values=past_key_values,
1103
+ use_cache=use_cache,
1104
+ output_attentions=output_attentions,
1105
+ output_hidden_states=output_hidden_states,
1106
+ return_dict=return_dict,
1107
+ training=training,
1108
+ )
1109
+
1110
+ return outputs
1111
+
1112
+ def build(self, input_shape=None):
1113
+ if self.built:
1114
+ return
1115
+ self.built = True
1116
+ if getattr(self, "rembert", None) is not None:
1117
+ with tf.name_scope(self.rembert.name):
1118
+ self.rembert.build(None)
1119
+
1120
+
1121
+ @add_start_docstrings("""RemBERT Model with a `language modeling` head on top.""", REMBERT_START_DOCSTRING)
1122
+ class TFRemBertForMaskedLM(TFRemBertPreTrainedModel, TFMaskedLanguageModelingLoss):
1123
+ def __init__(self, config: RemBertConfig, *inputs, **kwargs):
1124
+ super().__init__(config, *inputs, **kwargs)
1125
+
1126
+ if config.is_decoder:
1127
+ logger.warning(
1128
+ "If you want to use `TFRemBertForMaskedLM` make sure `config.is_decoder=False` for "
1129
+ "bi-directional self-attention."
1130
+ )
1131
+
1132
+ self.rembert = TFRemBertMainLayer(config, name="rembert", add_pooling_layer=False)
1133
+ self.mlm = TFRemBertMLMHead(config, input_embeddings=self.rembert.embeddings, name="mlm___cls")
1134
+
1135
+ def get_lm_head(self) -> keras.layers.Layer:
1136
+ return self.mlm.predictions
1137
+
1138
+ @unpack_inputs
1139
+ @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1140
+ @add_code_sample_docstrings(
1141
+ checkpoint="google/rembert",
1142
+ output_type=TFMaskedLMOutput,
1143
+ config_class=_CONFIG_FOR_DOC,
1144
+ )
1145
+ def call(
1146
+ self,
1147
+ input_ids: TFModelInputType | None = None,
1148
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1149
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1150
+ position_ids: np.ndarray | tf.Tensor | None = None,
1151
+ head_mask: np.ndarray | tf.Tensor | None = None,
1152
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1153
+ output_attentions: Optional[bool] = None,
1154
+ output_hidden_states: Optional[bool] = None,
1155
+ return_dict: Optional[bool] = None,
1156
+ labels: np.ndarray | tf.Tensor | None = None,
1157
+ training: Optional[bool] = False,
1158
+ ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
1159
+ r"""
1160
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
1161
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1162
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1163
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1164
+ """
1165
+ outputs = self.rembert(
1166
+ input_ids=input_ids,
1167
+ attention_mask=attention_mask,
1168
+ token_type_ids=token_type_ids,
1169
+ position_ids=position_ids,
1170
+ head_mask=head_mask,
1171
+ inputs_embeds=inputs_embeds,
1172
+ output_attentions=output_attentions,
1173
+ output_hidden_states=output_hidden_states,
1174
+ return_dict=return_dict,
1175
+ training=training,
1176
+ )
1177
+ sequence_output = outputs[0]
1178
+ prediction_scores = self.mlm(sequence_output=sequence_output, training=training)
1179
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=prediction_scores)
1180
+
1181
+ if not return_dict:
1182
+ output = (prediction_scores,) + outputs[2:]
1183
+ return ((loss,) + output) if loss is not None else output
1184
+
1185
+ return TFMaskedLMOutput(
1186
+ loss=loss,
1187
+ logits=prediction_scores,
1188
+ hidden_states=outputs.hidden_states,
1189
+ attentions=outputs.attentions,
1190
+ )
1191
+
1192
+ def build(self, input_shape=None):
1193
+ if self.built:
1194
+ return
1195
+ self.built = True
1196
+ if getattr(self, "rembert", None) is not None:
1197
+ with tf.name_scope(self.rembert.name):
1198
+ self.rembert.build(None)
1199
+ if getattr(self, "mlm", None) is not None:
1200
+ with tf.name_scope(self.mlm.name):
1201
+ self.mlm.build(None)
1202
+
1203
+
1204
+ @add_start_docstrings(
1205
+ """RemBERT Model with a `language modeling` head on top for CLM fine-tuning.""", REMBERT_START_DOCSTRING
1206
+ )
1207
+ class TFRemBertForCausalLM(TFRemBertPreTrainedModel, TFCausalLanguageModelingLoss):
1208
+ def __init__(self, config: RemBertConfig, *inputs, **kwargs):
1209
+ super().__init__(config, *inputs, **kwargs)
1210
+
1211
+ if not config.is_decoder:
1212
+ logger.warning("If you want to use `TFRemBertForCausalLM` as a standalone, add `is_decoder=True.`")
1213
+
1214
+ self.rembert = TFRemBertMainLayer(config, name="rembert", add_pooling_layer=False)
1215
+ self.mlm = TFRemBertMLMHead(config, input_embeddings=self.rembert.embeddings, name="mlm___cls")
1216
+
1217
+ def get_lm_head(self) -> keras.layers.Layer:
1218
+ return self.mlm.predictions
1219
+
1220
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertLMHeadModel.prepare_inputs_for_generation
1221
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs):
1222
+ input_shape = input_ids.shape
1223
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
1224
+ if attention_mask is None:
1225
+ attention_mask = tf.ones(input_shape)
1226
+
1227
+ # cut decoder_input_ids if past is used
1228
+ if past_key_values is not None:
1229
+ input_ids = input_ids[:, -1:]
1230
+
1231
+ return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values}
1232
+
1233
+ @unpack_inputs
1234
+ @add_code_sample_docstrings(
1235
+ checkpoint="google/rembert",
1236
+ output_type=TFCausalLMOutputWithCrossAttentions,
1237
+ config_class=_CONFIG_FOR_DOC,
1238
+ )
1239
+ def call(
1240
+ self,
1241
+ input_ids: TFModelInputType | None = None,
1242
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1243
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1244
+ position_ids: np.ndarray | tf.Tensor | None = None,
1245
+ head_mask: np.ndarray | tf.Tensor | None = None,
1246
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1247
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
1248
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
1249
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
1250
+ use_cache: Optional[bool] = None,
1251
+ output_attentions: Optional[bool] = None,
1252
+ output_hidden_states: Optional[bool] = None,
1253
+ return_dict: Optional[bool] = None,
1254
+ labels: np.ndarray | tf.Tensor | None = None,
1255
+ training: Optional[bool] = False,
1256
+ ) -> Union[TFCausalLMOutputWithCrossAttentions, Tuple[tf.Tensor]]:
1257
+ r"""
1258
+ encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1259
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
1260
+ the model is configured as a decoder.
1261
+ encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1262
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
1263
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1264
+
1265
+ - 1 for tokens that are **not masked**,
1266
+ - 0 for tokens that are **masked**.
1267
+
1268
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
1269
+ contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1270
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1271
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1272
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1273
+ use_cache (`bool`, *optional*, defaults to `True`):
1274
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1275
+ `past_key_values`). Set to `False` during training, `True` during generation
1276
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
1277
+ Labels for computing the cross entropy classification loss. Indices should be in `[0, ...,
1278
+ config.vocab_size - 1]`.
1279
+ """
1280
+ outputs = self.rembert(
1281
+ input_ids=input_ids,
1282
+ attention_mask=attention_mask,
1283
+ token_type_ids=token_type_ids,
1284
+ position_ids=position_ids,
1285
+ head_mask=head_mask,
1286
+ inputs_embeds=inputs_embeds,
1287
+ encoder_hidden_states=encoder_hidden_states,
1288
+ encoder_attention_mask=encoder_attention_mask,
1289
+ past_key_values=past_key_values,
1290
+ use_cache=use_cache,
1291
+ output_attentions=output_attentions,
1292
+ output_hidden_states=output_hidden_states,
1293
+ return_dict=return_dict,
1294
+ training=training,
1295
+ )
1296
+ sequence_output = outputs[0]
1297
+ logits = self.mlm(sequence_output=sequence_output, training=training)
1298
+ loss = None
1299
+
1300
+ if labels is not None:
1301
+ # shift labels to the left and cut last logit token
1302
+ shifted_logits = logits[:, :-1]
1303
+ labels = labels[:, 1:]
1304
+ loss = self.hf_compute_loss(labels=labels, logits=shifted_logits)
1305
+
1306
+ if not return_dict:
1307
+ output = (logits,) + outputs[2:]
1308
+ return ((loss,) + output) if loss is not None else output
1309
+
1310
+ return TFCausalLMOutputWithCrossAttentions(
1311
+ loss=loss,
1312
+ logits=logits,
1313
+ past_key_values=outputs.past_key_values,
1314
+ hidden_states=outputs.hidden_states,
1315
+ attentions=outputs.attentions,
1316
+ cross_attentions=outputs.cross_attentions,
1317
+ )
1318
+
1319
+ def build(self, input_shape=None):
1320
+ if self.built:
1321
+ return
1322
+ self.built = True
1323
+ if getattr(self, "rembert", None) is not None:
1324
+ with tf.name_scope(self.rembert.name):
1325
+ self.rembert.build(None)
1326
+ if getattr(self, "mlm", None) is not None:
1327
+ with tf.name_scope(self.mlm.name):
1328
+ self.mlm.build(None)
1329
+
1330
+
1331
+ @add_start_docstrings(
1332
+ """
1333
+ RemBERT Model transformer with a sequence classification/regression head on top e.g., for GLUE tasks.
1334
+ """,
1335
+ REMBERT_START_DOCSTRING,
1336
+ )
1337
+ class TFRemBertForSequenceClassification(TFRemBertPreTrainedModel, TFSequenceClassificationLoss):
1338
+ def __init__(self, config: RemBertConfig, *inputs, **kwargs):
1339
+ super().__init__(config, *inputs, **kwargs)
1340
+
1341
+ self.num_labels = config.num_labels
1342
+
1343
+ self.rembert = TFRemBertMainLayer(config, name="rembert")
1344
+ self.dropout = keras.layers.Dropout(rate=config.classifier_dropout_prob)
1345
+ self.classifier = keras.layers.Dense(
1346
+ units=config.num_labels,
1347
+ kernel_initializer=get_initializer(config.initializer_range),
1348
+ name="classifier",
1349
+ )
1350
+ self.config = config
1351
+
1352
+ @unpack_inputs
1353
+ @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1354
+ @add_code_sample_docstrings(
1355
+ checkpoint="google/rembert",
1356
+ output_type=TFSequenceClassifierOutput,
1357
+ config_class=_CONFIG_FOR_DOC,
1358
+ )
1359
+ def call(
1360
+ self,
1361
+ input_ids: TFModelInputType | None = None,
1362
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1363
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1364
+ position_ids: np.ndarray | tf.Tensor | None = None,
1365
+ head_mask: np.ndarray | tf.Tensor | None = None,
1366
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1367
+ output_attentions: Optional[bool] = None,
1368
+ output_hidden_states: Optional[bool] = None,
1369
+ return_dict: Optional[bool] = None,
1370
+ labels: np.ndarray | tf.Tensor | None = None,
1371
+ training: Optional[bool] = False,
1372
+ ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
1373
+ r"""
1374
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
1375
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1376
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1377
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1378
+ """
1379
+ outputs = self.rembert(
1380
+ input_ids=input_ids,
1381
+ attention_mask=attention_mask,
1382
+ token_type_ids=token_type_ids,
1383
+ position_ids=position_ids,
1384
+ head_mask=head_mask,
1385
+ inputs_embeds=inputs_embeds,
1386
+ output_attentions=output_attentions,
1387
+ output_hidden_states=output_hidden_states,
1388
+ return_dict=return_dict,
1389
+ training=training,
1390
+ )
1391
+ pooled_output = outputs[1]
1392
+ pooled_output = self.dropout(inputs=pooled_output, training=training)
1393
+ logits = self.classifier(inputs=pooled_output)
1394
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
1395
+
1396
+ if not return_dict:
1397
+ output = (logits,) + outputs[2:]
1398
+ return ((loss,) + output) if loss is not None else output
1399
+
1400
+ return TFSequenceClassifierOutput(
1401
+ loss=loss,
1402
+ logits=logits,
1403
+ hidden_states=outputs.hidden_states,
1404
+ attentions=outputs.attentions,
1405
+ )
1406
+
1407
+ def build(self, input_shape=None):
1408
+ if self.built:
1409
+ return
1410
+ self.built = True
1411
+ if getattr(self, "rembert", None) is not None:
1412
+ with tf.name_scope(self.rembert.name):
1413
+ self.rembert.build(None)
1414
+ if getattr(self, "classifier", None) is not None:
1415
+ with tf.name_scope(self.classifier.name):
1416
+ self.classifier.build([None, None, self.config.hidden_size])
1417
+
1418
+
1419
+ @add_start_docstrings(
1420
+ """
1421
+ RemBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
1422
+ softmax) e.g. for RocStories/SWAG tasks.
1423
+ """,
1424
+ REMBERT_START_DOCSTRING,
1425
+ )
1426
+ class TFRemBertForMultipleChoice(TFRemBertPreTrainedModel, TFMultipleChoiceLoss):
1427
+ def __init__(self, config: RemBertConfig, *inputs, **kwargs):
1428
+ super().__init__(config, *inputs, **kwargs)
1429
+
1430
+ self.rembert = TFRemBertMainLayer(config, name="rembert")
1431
+ self.dropout = keras.layers.Dropout(rate=config.classifier_dropout_prob)
1432
+ self.classifier = keras.layers.Dense(
1433
+ units=1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
1434
+ )
1435
+ self.config = config
1436
+
1437
+ @unpack_inputs
1438
+ @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
1439
+ @add_code_sample_docstrings(
1440
+ checkpoint="google/rembert",
1441
+ output_type=TFMultipleChoiceModelOutput,
1442
+ config_class=_CONFIG_FOR_DOC,
1443
+ )
1444
+ def call(
1445
+ self,
1446
+ input_ids: TFModelInputType | None = None,
1447
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1448
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1449
+ position_ids: np.ndarray | tf.Tensor | None = None,
1450
+ head_mask: np.ndarray | tf.Tensor | None = None,
1451
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1452
+ output_attentions: Optional[bool] = None,
1453
+ output_hidden_states: Optional[bool] = None,
1454
+ return_dict: Optional[bool] = None,
1455
+ labels: np.ndarray | tf.Tensor | None = None,
1456
+ training: Optional[bool] = False,
1457
+ ) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]:
1458
+ r"""
1459
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
1460
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
1461
+ where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)
1462
+ """
1463
+
1464
+ if input_ids is not None:
1465
+ num_choices = shape_list(input_ids)[1]
1466
+ seq_length = shape_list(input_ids)[2]
1467
+ else:
1468
+ num_choices = shape_list(inputs_embeds)[1]
1469
+ seq_length = shape_list(inputs_embeds)[2]
1470
+
1471
+ flat_input_ids = tf.reshape(tensor=input_ids, shape=(-1, seq_length)) if input_ids is not None else None
1472
+ flat_attention_mask = (
1473
+ tf.reshape(tensor=attention_mask, shape=(-1, seq_length)) if attention_mask is not None else None
1474
+ )
1475
+ flat_token_type_ids = (
1476
+ tf.reshape(tensor=token_type_ids, shape=(-1, seq_length)) if token_type_ids is not None else None
1477
+ )
1478
+ flat_position_ids = (
1479
+ tf.reshape(tensor=position_ids, shape=(-1, seq_length)) if position_ids is not None else None
1480
+ )
1481
+ flat_inputs_embeds = (
1482
+ tf.reshape(tensor=inputs_embeds, shape=(-1, seq_length, shape_list(inputs_embeds)[3]))
1483
+ if inputs_embeds is not None
1484
+ else None
1485
+ )
1486
+ outputs = self.rembert(
1487
+ input_ids=flat_input_ids,
1488
+ attention_mask=flat_attention_mask,
1489
+ token_type_ids=flat_token_type_ids,
1490
+ position_ids=flat_position_ids,
1491
+ head_mask=head_mask,
1492
+ inputs_embeds=flat_inputs_embeds,
1493
+ output_attentions=output_attentions,
1494
+ output_hidden_states=output_hidden_states,
1495
+ return_dict=return_dict,
1496
+ training=training,
1497
+ )
1498
+ pooled_output = outputs[1]
1499
+ pooled_output = self.dropout(inputs=pooled_output, training=training)
1500
+ logits = self.classifier(inputs=pooled_output)
1501
+ reshaped_logits = tf.reshape(tensor=logits, shape=(-1, num_choices))
1502
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=reshaped_logits)
1503
+
1504
+ if not return_dict:
1505
+ output = (reshaped_logits,) + outputs[2:]
1506
+ return ((loss,) + output) if loss is not None else output
1507
+
1508
+ return TFMultipleChoiceModelOutput(
1509
+ loss=loss,
1510
+ logits=reshaped_logits,
1511
+ hidden_states=outputs.hidden_states,
1512
+ attentions=outputs.attentions,
1513
+ )
1514
+
1515
+ def build(self, input_shape=None):
1516
+ if self.built:
1517
+ return
1518
+ self.built = True
1519
+ if getattr(self, "rembert", None) is not None:
1520
+ with tf.name_scope(self.rembert.name):
1521
+ self.rembert.build(None)
1522
+ if getattr(self, "classifier", None) is not None:
1523
+ with tf.name_scope(self.classifier.name):
1524
+ self.classifier.build([None, None, self.config.hidden_size])
1525
+
1526
+
1527
+ @add_start_docstrings(
1528
+ """
1529
+ RemBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1530
+ Named-Entity-Recognition (NER) tasks.
1531
+ """,
1532
+ REMBERT_START_DOCSTRING,
1533
+ )
1534
+ class TFRemBertForTokenClassification(TFRemBertPreTrainedModel, TFTokenClassificationLoss):
1535
+ def __init__(self, config: RemBertConfig, *inputs, **kwargs):
1536
+ super().__init__(config, *inputs, **kwargs)
1537
+
1538
+ self.num_labels = config.num_labels
1539
+
1540
+ self.rembert = TFRemBertMainLayer(config, name="rembert", add_pooling_layer=False)
1541
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
1542
+ self.classifier = keras.layers.Dense(
1543
+ units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
1544
+ )
1545
+ self.config = config
1546
+
1547
+ @unpack_inputs
1548
+ @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1549
+ @add_code_sample_docstrings(
1550
+ checkpoint="google/rembert",
1551
+ output_type=TFTokenClassifierOutput,
1552
+ config_class=_CONFIG_FOR_DOC,
1553
+ )
1554
+ def call(
1555
+ self,
1556
+ input_ids: TFModelInputType | None = None,
1557
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1558
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1559
+ position_ids: np.ndarray | tf.Tensor | None = None,
1560
+ head_mask: np.ndarray | tf.Tensor | None = None,
1561
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1562
+ output_attentions: Optional[bool] = None,
1563
+ output_hidden_states: Optional[bool] = None,
1564
+ return_dict: Optional[bool] = None,
1565
+ labels: np.ndarray | tf.Tensor | None = None,
1566
+ training: Optional[bool] = False,
1567
+ ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
1568
+ r"""
1569
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
1570
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1571
+ """
1572
+ outputs = self.rembert(
1573
+ input_ids=input_ids,
1574
+ attention_mask=attention_mask,
1575
+ token_type_ids=token_type_ids,
1576
+ position_ids=position_ids,
1577
+ head_mask=head_mask,
1578
+ inputs_embeds=inputs_embeds,
1579
+ output_attentions=output_attentions,
1580
+ output_hidden_states=output_hidden_states,
1581
+ return_dict=return_dict,
1582
+ training=training,
1583
+ )
1584
+ sequence_output = outputs[0]
1585
+ sequence_output = self.dropout(inputs=sequence_output, training=training)
1586
+ logits = self.classifier(inputs=sequence_output)
1587
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
1588
+
1589
+ if not return_dict:
1590
+ output = (logits,) + outputs[1:]
1591
+ return ((loss,) + output) if loss is not None else output
1592
+
1593
+ return TFTokenClassifierOutput(
1594
+ loss=loss,
1595
+ logits=logits,
1596
+ hidden_states=outputs.hidden_states,
1597
+ attentions=outputs.attentions,
1598
+ )
1599
+
1600
+ def build(self, input_shape=None):
1601
+ if self.built:
1602
+ return
1603
+ self.built = True
1604
+ if getattr(self, "rembert", None) is not None:
1605
+ with tf.name_scope(self.rembert.name):
1606
+ self.rembert.build(None)
1607
+ if getattr(self, "classifier", None) is not None:
1608
+ with tf.name_scope(self.classifier.name):
1609
+ self.classifier.build([None, None, self.config.hidden_size])
1610
+
1611
+
1612
+ @add_start_docstrings(
1613
+ """
1614
+ RemBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1615
+ layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
1616
+ """,
1617
+ REMBERT_START_DOCSTRING,
1618
+ )
1619
+ class TFRemBertForQuestionAnswering(TFRemBertPreTrainedModel, TFQuestionAnsweringLoss):
1620
+ def __init__(self, config: RemBertConfig, *inputs, **kwargs):
1621
+ super().__init__(config, *inputs, **kwargs)
1622
+
1623
+ self.num_labels = config.num_labels
1624
+
1625
+ self.rembert = TFRemBertMainLayer(config, add_pooling_layer=False, name="rembert")
1626
+ self.qa_outputs = keras.layers.Dense(
1627
+ units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
1628
+ )
1629
+ self.config = config
1630
+
1631
+ @unpack_inputs
1632
+ @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1633
+ @add_code_sample_docstrings(
1634
+ checkpoint="google/rembert",
1635
+ output_type=TFQuestionAnsweringModelOutput,
1636
+ config_class=_CONFIG_FOR_DOC,
1637
+ )
1638
+ def call(
1639
+ self,
1640
+ input_ids: TFModelInputType | None = None,
1641
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1642
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1643
+ position_ids: np.ndarray | tf.Tensor | None = None,
1644
+ head_mask: np.ndarray | tf.Tensor | None = None,
1645
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1646
+ output_attentions: Optional[bool] = None,
1647
+ output_hidden_states: Optional[bool] = None,
1648
+ return_dict: Optional[bool] = None,
1649
+ start_positions: np.ndarray | tf.Tensor | None = None,
1650
+ end_positions: np.ndarray | tf.Tensor | None = None,
1651
+ training: Optional[bool] = False,
1652
+ ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
1653
+ r"""
1654
+ start_positions (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
1655
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1656
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1657
+ are not taken into account for computing the loss.
1658
+ end_positions (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
1659
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1660
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1661
+ are not taken into account for computing the loss.
1662
+ """
1663
+ outputs = self.rembert(
1664
+ input_ids=input_ids,
1665
+ attention_mask=attention_mask,
1666
+ token_type_ids=token_type_ids,
1667
+ position_ids=position_ids,
1668
+ head_mask=head_mask,
1669
+ inputs_embeds=inputs_embeds,
1670
+ output_attentions=output_attentions,
1671
+ output_hidden_states=output_hidden_states,
1672
+ return_dict=return_dict,
1673
+ training=training,
1674
+ )
1675
+ sequence_output = outputs[0]
1676
+ logits = self.qa_outputs(inputs=sequence_output)
1677
+ start_logits, end_logits = tf.split(value=logits, num_or_size_splits=2, axis=-1)
1678
+ start_logits = tf.squeeze(input=start_logits, axis=-1)
1679
+ end_logits = tf.squeeze(input=end_logits, axis=-1)
1680
+ loss = None
1681
+
1682
+ if start_positions is not None and end_positions is not None:
1683
+ labels = {"start_position": start_positions}
1684
+ labels["end_position"] = end_positions
1685
+ loss = self.hf_compute_loss(labels=labels, logits=(start_logits, end_logits))
1686
+
1687
+ if not return_dict:
1688
+ output = (start_logits, end_logits) + outputs[2:]
1689
+ return ((loss,) + output) if loss is not None else output
1690
+
1691
+ return TFQuestionAnsweringModelOutput(
1692
+ loss=loss,
1693
+ start_logits=start_logits,
1694
+ end_logits=end_logits,
1695
+ hidden_states=outputs.hidden_states,
1696
+ attentions=outputs.attentions,
1697
+ )
1698
+
1699
+ def build(self, input_shape=None):
1700
+ if self.built:
1701
+ return
1702
+ self.built = True
1703
+ if getattr(self, "rembert", None) is not None:
1704
+ with tf.name_scope(self.rembert.name):
1705
+ self.rembert.build(None)
1706
+ if getattr(self, "qa_outputs", None) is not None:
1707
+ with tf.name_scope(self.qa_outputs.name):
1708
+ self.qa_outputs.build([None, None, self.config.hidden_size])
1709
+
1710
+
1711
+ __all__ = [
1712
+ "TFRemBertForCausalLM",
1713
+ "TFRemBertForMaskedLM",
1714
+ "TFRemBertForMultipleChoice",
1715
+ "TFRemBertForQuestionAnswering",
1716
+ "TFRemBertForSequenceClassification",
1717
+ "TFRemBertForTokenClassification",
1718
+ "TFRemBertLayer",
1719
+ "TFRemBertModel",
1720
+ "TFRemBertPreTrainedModel",
1721
+ ]
docs/transformers/build/lib/transformers/models/rembert/tokenization_rembert.py ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright The HuggingFace Team and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for RemBERT."""
16
+
17
+ import os
18
+ from shutil import copyfile
19
+ from typing import List, Optional, Tuple
20
+
21
+ import sentencepiece as spm
22
+
23
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer
24
+ from ...utils import logging
25
+ from ...utils.import_utils import requires
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+ VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.model"}
31
+
32
+
33
+ @requires(backends=("sentencepiece",))
34
+ class RemBertTokenizer(PreTrainedTokenizer):
35
+ """
36
+ Construct a RemBERT tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
37
+
38
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
39
+ this superclass for more information regarding those methods.
40
+
41
+ Args:
42
+ vocab_file (`str`):
43
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
44
+ contains the vocabulary necessary to instantiate a tokenizer.
45
+ bos_token (`str`, *optional*, defaults to `"[CLS]"`):
46
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
47
+
48
+ <Tip>
49
+
50
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
51
+ sequence. The token used is the `cls_token`.
52
+
53
+ </Tip>
54
+
55
+ eos_token (`str`, *optional*, defaults to `"[SEP]"`):
56
+ The end of sequence token.
57
+
58
+ <Tip>
59
+
60
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
61
+ The token used is the `sep_token`.
62
+
63
+ </Tip>
64
+
65
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
66
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
67
+ token instead.
68
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
69
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
70
+ sequence classification or for a text and a question for question answering. It is also used as the last
71
+ token of a sequence built with special tokens.
72
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
73
+ The token used for padding, for example when batching sequences of different lengths.
74
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
75
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
76
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
77
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
78
+ The token used for masking values. This is the token used when training this model with masked language
79
+ modeling. This is the token which the model will try to predict.
80
+
81
+ Attributes:
82
+ sp_model (`SentencePieceProcessor`):
83
+ The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
84
+ """
85
+
86
+ vocab_files_names = VOCAB_FILES_NAMES
87
+
88
+ def __init__(
89
+ self,
90
+ vocab_file,
91
+ do_lower_case=False,
92
+ remove_space=True,
93
+ keep_accents=True,
94
+ bos_token="[CLS]",
95
+ eos_token="[SEP]",
96
+ unk_token="[UNK]",
97
+ sep_token="[SEP]",
98
+ pad_token="[PAD]",
99
+ cls_token="[CLS]",
100
+ mask_token="[MASK]",
101
+ **kwargs,
102
+ ):
103
+ # Mask token behave like a normal word, i.e. include the space before it
104
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
105
+
106
+ self.do_lower_case = do_lower_case
107
+ self.remove_space = remove_space
108
+ self.keep_accents = keep_accents
109
+ self.vocab_file = vocab_file
110
+
111
+ self.sp_model = spm.SentencePieceProcessor()
112
+ self.sp_model.Load(vocab_file)
113
+ super().__init__(
114
+ do_lower_case=do_lower_case,
115
+ remove_space=remove_space,
116
+ keep_accents=keep_accents,
117
+ bos_token=bos_token,
118
+ eos_token=eos_token,
119
+ unk_token=unk_token,
120
+ sep_token=sep_token,
121
+ pad_token=pad_token,
122
+ cls_token=cls_token,
123
+ mask_token=mask_token,
124
+ **kwargs,
125
+ )
126
+
127
+ @property
128
+ def vocab_size(self):
129
+ return len(self.sp_model)
130
+
131
+ def get_vocab(self):
132
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
133
+ vocab.update(self.added_tokens_encoder)
134
+ return vocab
135
+
136
+ def __getstate__(self):
137
+ state = self.__dict__.copy()
138
+ state["sp_model"] = None
139
+ return state
140
+
141
+ def __setstate__(self, d):
142
+ self.__dict__ = d
143
+ self.sp_model = spm.SentencePieceProcessor()
144
+ self.sp_model.Load(self.vocab_file)
145
+
146
+ def _tokenize(self, text, sample=False):
147
+ """Tokenize a string."""
148
+ pieces = self.sp_model.EncodeAsPieces(text)
149
+ return pieces
150
+
151
+ def _convert_token_to_id(self, token):
152
+ """Converts a token (str) in an id using the vocab."""
153
+ return self.sp_model.PieceToId(token)
154
+
155
+ def _convert_id_to_token(self, index):
156
+ """Converts an index (integer) in a token (str) using the vocab."""
157
+ return self.sp_model.IdToPiece(index)
158
+
159
+ def convert_tokens_to_string(self, tokens):
160
+ out_string = self.sp_model.decode_pieces(tokens)
161
+ return out_string
162
+
163
+ def build_inputs_with_special_tokens(
164
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
165
+ ) -> List[int]:
166
+ """
167
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
168
+ adding special tokens. A REMBERT sequence has the following format:
169
+
170
+ - single sequence: `[CLS] X [SEP]`
171
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
172
+
173
+ Args:
174
+ token_ids_0 (`List[int]`):
175
+ List of IDs to which the special tokens will be added.
176
+ token_ids_1 (`List[int]`, *optional*):
177
+ Optional second list of IDs for sequence pairs.
178
+
179
+ Returns:
180
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
181
+ """
182
+ sep = [self.sep_token_id]
183
+ cls = [self.cls_token_id]
184
+ if token_ids_1 is None:
185
+ return cls + token_ids_0 + sep
186
+ return cls + token_ids_0 + sep + token_ids_1 + sep
187
+
188
+ def get_special_tokens_mask(
189
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
190
+ ) -> List[int]:
191
+ """
192
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
193
+ special tokens using the tokenizer `prepare_for_model` method.
194
+
195
+ Args:
196
+ token_ids_0 (`List[int]`):
197
+ List of IDs.
198
+ token_ids_1 (`List[int]`, *optional*):
199
+ Optional second list of IDs for sequence pairs.
200
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
201
+ Whether or not the token list is already formatted with special tokens for the model.
202
+
203
+ Returns:
204
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
205
+ """
206
+
207
+ if already_has_special_tokens:
208
+ if token_ids_1 is not None:
209
+ raise ValueError(
210
+ "You should not supply a second sequence if the provided sequence of "
211
+ "ids is already formatted with special tokens for the model."
212
+ )
213
+ return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_0]
214
+
215
+ if token_ids_1 is not None:
216
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
217
+ return [1] + ([0] * len(token_ids_0)) + [1]
218
+
219
+ def create_token_type_ids_from_sequences(
220
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
221
+ ) -> List[int]:
222
+ """
223
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A RemBERT
224
+ sequence pair mask has the following format:
225
+
226
+ ```
227
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
228
+ | first sequence | second sequence |
229
+ ```
230
+
231
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
232
+
233
+ Args:
234
+ token_ids_0 (`List[int]`):
235
+ List of IDs.
236
+ token_ids_1 (`List[int]`, *optional*):
237
+ Optional second list of IDs for sequence pairs.
238
+
239
+ Returns:
240
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
241
+ """
242
+ sep = [self.sep_token_id]
243
+ cls = [self.cls_token_id]
244
+
245
+ if token_ids_1 is None:
246
+ return len(cls + token_ids_0 + sep) * [0]
247
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
248
+
249
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
250
+ if not os.path.isdir(save_directory):
251
+ logger.error("Vocabulary path ({}) should be a directory".format(save_directory))
252
+ return
253
+ out_vocab_file = os.path.join(
254
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
255
+ )
256
+
257
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
258
+ copyfile(self.vocab_file, out_vocab_file)
259
+ elif not os.path.isfile(self.vocab_file):
260
+ with open(out_vocab_file, "wb") as fi:
261
+ content_spiece_model = self.sp_model.serialized_model_proto()
262
+ fi.write(content_spiece_model)
263
+
264
+ return (out_vocab_file,)
265
+
266
+
267
+ __all__ = ["RemBertTokenizer"]
docs/transformers/build/lib/transformers/models/rembert/tokenization_rembert_fast.py ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 Google AI, Google Brain and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for RemBERT model."""
16
+
17
+ import os
18
+ from shutil import copyfile
19
+ from typing import List, Optional, Tuple
20
+
21
+ from ...tokenization_utils import AddedToken
22
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
23
+ from ...utils import is_sentencepiece_available, logging
24
+
25
+
26
+ if is_sentencepiece_available():
27
+ from .tokenization_rembert import RemBertTokenizer
28
+ else:
29
+ RemBertTokenizer = None
30
+
31
+ logger = logging.get_logger(__name__)
32
+ VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"}
33
+
34
+
35
+ SPIECE_UNDERLINE = "▁"
36
+
37
+
38
+ class RemBertTokenizerFast(PreTrainedTokenizerFast):
39
+ """
40
+ Construct a "fast" RemBert tokenizer (backed by HuggingFace's *tokenizers* library). Based on
41
+ [Unigram](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models). This
42
+ tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to
43
+ this superclass for more information regarding those methods
44
+
45
+ Args:
46
+ vocab_file (`str`):
47
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
48
+ contains the vocabulary necessary to instantiate a tokenizer.
49
+ do_lower_case (`bool`, *optional*, defaults to `True`):
50
+ Whether or not to lowercase the input when tokenizing.
51
+ remove_space (`bool`, *optional*, defaults to `True`):
52
+ Whether or not to strip the text when tokenizing (removing excess spaces before and after the string).
53
+ keep_accents (`bool`, *optional*, defaults to `False`):
54
+ Whether or not to keep accents when tokenizing.
55
+ bos_token (`str`, *optional*, defaults to `"[CLS]"`):
56
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
57
+
58
+ <Tip>
59
+
60
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
61
+ sequence. The token used is the `cls_token`.
62
+
63
+ </Tip>
64
+
65
+ eos_token (`str`, *optional*, defaults to `"[SEP]"`):
66
+ The end of sequence token. .. note:: When building a sequence using special tokens, this is not the token
67
+ that is used for the end of sequence. The token used is the `sep_token`.
68
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
69
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
70
+ token instead.
71
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
72
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
73
+ sequence classification or for a text and a question for question answering. It is also used as the last
74
+ token of a sequence built with special tokens.
75
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
76
+ The token used for padding, for example when batching sequences of different lengths.
77
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
78
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
79
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
80
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
81
+ The token used for masking values. This is the token used when training this model with masked language
82
+ modeling. This is the token which the model will try to predict.
83
+ """
84
+
85
+ vocab_files_names = VOCAB_FILES_NAMES
86
+ slow_tokenizer_class = RemBertTokenizer
87
+
88
+ def __init__(
89
+ self,
90
+ vocab_file=None,
91
+ tokenizer_file=None,
92
+ do_lower_case=True,
93
+ remove_space=True,
94
+ keep_accents=False,
95
+ bos_token="[CLS]",
96
+ eos_token="[SEP]",
97
+ unk_token="<unk>",
98
+ sep_token="[SEP]",
99
+ pad_token="<pad>",
100
+ cls_token="[CLS]",
101
+ mask_token="[MASK]",
102
+ **kwargs,
103
+ ):
104
+ # Mask token behave like a normal word, i.e. include the space before it
105
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
106
+
107
+ super().__init__(
108
+ vocab_file,
109
+ tokenizer_file=tokenizer_file,
110
+ do_lower_case=do_lower_case,
111
+ remove_space=remove_space,
112
+ keep_accents=keep_accents,
113
+ bos_token=bos_token,
114
+ eos_token=eos_token,
115
+ unk_token=unk_token,
116
+ sep_token=sep_token,
117
+ pad_token=pad_token,
118
+ cls_token=cls_token,
119
+ mask_token=mask_token,
120
+ **kwargs,
121
+ )
122
+
123
+ self.do_lower_case = do_lower_case
124
+ self.remove_space = remove_space
125
+ self.keep_accents = keep_accents
126
+ self.vocab_file = vocab_file
127
+
128
+ @property
129
+ def can_save_slow_tokenizer(self) -> bool:
130
+ return os.path.isfile(self.vocab_file) if self.vocab_file else False
131
+
132
+ def build_inputs_with_special_tokens(
133
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
134
+ ) -> List[int]:
135
+ """
136
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
137
+ adding special tokens. A RemBERT sequence has the following format:
138
+
139
+ - single sequence: `[CLS] X [SEP]`
140
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
141
+
142
+ Args:
143
+ token_ids_0 (`List[int]`):
144
+ List of IDs to which the special tokens will be added
145
+ token_ids_1 (`List[int]`, *optional*, defaults to `None`):
146
+ Optional second list of IDs for sequence pairs.
147
+
148
+ Returns:
149
+ `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.
150
+ """
151
+ sep = [self.sep_token_id]
152
+ cls = [self.cls_token_id]
153
+ if token_ids_1 is None:
154
+ return cls + token_ids_0 + sep
155
+ return cls + token_ids_0 + sep + token_ids_1 + sep
156
+
157
+ def get_special_tokens_mask(
158
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
159
+ ) -> List[int]:
160
+ """
161
+ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
162
+ special tokens using the tokenizer `prepare_for_model` method.
163
+
164
+ Args:
165
+ token_ids_0 (`List[int]`):
166
+ List of ids.
167
+ token_ids_1 (`List[int]`, *optional*, defaults to `None`):
168
+ Optional second list of IDs for sequence pairs.
169
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
170
+ Set to True if the token list is already formatted with special tokens for the model
171
+
172
+ Returns:
173
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
174
+ """
175
+
176
+ if already_has_special_tokens:
177
+ if token_ids_1 is not None:
178
+ raise ValueError(
179
+ "You should not supply a second sequence if the provided sequence of "
180
+ "ids is already formatted with special tokens for the model."
181
+ )
182
+ return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_0]
183
+
184
+ if token_ids_1 is not None:
185
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
186
+ return [1] + ([0] * len(token_ids_0)) + [1]
187
+
188
+ def create_token_type_ids_from_sequences(
189
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
190
+ ) -> List[int]:
191
+ """
192
+ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. A RemBERT
193
+ sequence pair mask has the following format:
194
+
195
+ ```
196
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
197
+ | first sequence | second sequence |
198
+ ```
199
+
200
+ if token_ids_1 is None, only returns the first portion of the mask (0s).
201
+
202
+ Args:
203
+ token_ids_0 (`List[int]`):
204
+ List of ids.
205
+ token_ids_1 (`List[int]`, *optional*, defaults to `None`):
206
+ Optional second list of IDs for sequence pairs.
207
+
208
+ Returns:
209
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
210
+ """
211
+ sep = [self.sep_token_id]
212
+ cls = [self.cls_token_id]
213
+
214
+ if token_ids_1 is None:
215
+ return len(cls + token_ids_0 + sep) * [0]
216
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
217
+
218
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
219
+ if not os.path.isdir(save_directory):
220
+ logger.error("Vocabulary path ({}) should be a directory".format(save_directory))
221
+ return
222
+ out_vocab_file = os.path.join(
223
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
224
+ )
225
+
226
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
227
+ copyfile(self.vocab_file, out_vocab_file)
228
+
229
+ return (out_vocab_file,)
230
+
231
+
232
+ __all__ = ["RemBertTokenizerFast"]
docs/transformers/build/lib/transformers/models/resnet/__init__.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import _LazyModule
17
+ from ...utils.import_utils import define_import_structure
18
+
19
+
20
+ if TYPE_CHECKING:
21
+ from .configuration_resnet import *
22
+ from .modeling_flax_resnet import *
23
+ from .modeling_resnet import *
24
+ from .modeling_tf_resnet import *
25
+ else:
26
+ import sys
27
+
28
+ _file = globals()["__file__"]
29
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
docs/transformers/build/lib/transformers/models/resnet/configuration_resnet.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Microsoft Research, Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ResNet model configuration"""
16
+
17
+ from collections import OrderedDict
18
+ from typing import Mapping
19
+
20
+ from packaging import version
21
+
22
+ from ...configuration_utils import PretrainedConfig
23
+ from ...onnx import OnnxConfig
24
+ from ...utils import logging
25
+ from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+
31
+ class ResNetConfig(BackboneConfigMixin, PretrainedConfig):
32
+ r"""
33
+ This is the configuration class to store the configuration of a [`ResNetModel`]. It is used to instantiate an
34
+ ResNet model according to the specified arguments, defining the model architecture. Instantiating a configuration
35
+ with the defaults will yield a similar configuration to that of the ResNet
36
+ [microsoft/resnet-50](https://huggingface.co/microsoft/resnet-50) architecture.
37
+
38
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
39
+ documentation from [`PretrainedConfig`] for more information.
40
+
41
+ Args:
42
+ num_channels (`int`, *optional*, defaults to 3):
43
+ The number of input channels.
44
+ embedding_size (`int`, *optional*, defaults to 64):
45
+ Dimensionality (hidden size) for the embedding layer.
46
+ hidden_sizes (`List[int]`, *optional*, defaults to `[256, 512, 1024, 2048]`):
47
+ Dimensionality (hidden size) at each stage.
48
+ depths (`List[int]`, *optional*, defaults to `[3, 4, 6, 3]`):
49
+ Depth (number of layers) for each stage.
50
+ layer_type (`str`, *optional*, defaults to `"bottleneck"`):
51
+ The layer to use, it can be either `"basic"` (used for smaller models, like resnet-18 or resnet-34) or
52
+ `"bottleneck"` (used for larger models like resnet-50 and above).
53
+ hidden_act (`str`, *optional*, defaults to `"relu"`):
54
+ The non-linear activation function in each block. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"`
55
+ are supported.
56
+ downsample_in_first_stage (`bool`, *optional*, defaults to `False`):
57
+ If `True`, the first stage will downsample the inputs using a `stride` of 2.
58
+ downsample_in_bottleneck (`bool`, *optional*, defaults to `False`):
59
+ If `True`, the first conv 1x1 in ResNetBottleNeckLayer will downsample the inputs using a `stride` of 2.
60
+ out_features (`List[str]`, *optional*):
61
+ If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
62
+ (depending on how many stages the model has). If unset and `out_indices` is set, will default to the
63
+ corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
64
+ same order as defined in the `stage_names` attribute.
65
+ out_indices (`List[int]`, *optional*):
66
+ If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
67
+ many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
68
+ If unset and `out_features` is unset, will default to the last stage. Must be in the
69
+ same order as defined in the `stage_names` attribute.
70
+
71
+ Example:
72
+ ```python
73
+ >>> from transformers import ResNetConfig, ResNetModel
74
+
75
+ >>> # Initializing a ResNet resnet-50 style configuration
76
+ >>> configuration = ResNetConfig()
77
+
78
+ >>> # Initializing a model (with random weights) from the resnet-50 style configuration
79
+ >>> model = ResNetModel(configuration)
80
+
81
+ >>> # Accessing the model configuration
82
+ >>> configuration = model.config
83
+ ```
84
+ """
85
+
86
+ model_type = "resnet"
87
+ layer_types = ["basic", "bottleneck"]
88
+
89
+ def __init__(
90
+ self,
91
+ num_channels=3,
92
+ embedding_size=64,
93
+ hidden_sizes=[256, 512, 1024, 2048],
94
+ depths=[3, 4, 6, 3],
95
+ layer_type="bottleneck",
96
+ hidden_act="relu",
97
+ downsample_in_first_stage=False,
98
+ downsample_in_bottleneck=False,
99
+ out_features=None,
100
+ out_indices=None,
101
+ **kwargs,
102
+ ):
103
+ super().__init__(**kwargs)
104
+ if layer_type not in self.layer_types:
105
+ raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types)}")
106
+ self.num_channels = num_channels
107
+ self.embedding_size = embedding_size
108
+ self.hidden_sizes = hidden_sizes
109
+ self.depths = depths
110
+ self.layer_type = layer_type
111
+ self.hidden_act = hidden_act
112
+ self.downsample_in_first_stage = downsample_in_first_stage
113
+ self.downsample_in_bottleneck = downsample_in_bottleneck
114
+ self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(depths) + 1)]
115
+ self._out_features, self._out_indices = get_aligned_output_features_output_indices(
116
+ out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
117
+ )
118
+
119
+
120
+ class ResNetOnnxConfig(OnnxConfig):
121
+ torch_onnx_minimum_version = version.parse("1.11")
122
+
123
+ @property
124
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
125
+ return OrderedDict(
126
+ [
127
+ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
128
+ ]
129
+ )
130
+
131
+ @property
132
+ def atol_for_validation(self) -> float:
133
+ return 1e-3
134
+
135
+
136
+ __all__ = ["ResNetConfig", "ResNetOnnxConfig"]
docs/transformers/build/lib/transformers/models/resnet/convert_resnet_to_pytorch.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert ResNet checkpoints from timm."""
16
+
17
+ import argparse
18
+ import json
19
+ from dataclasses import dataclass, field
20
+ from functools import partial
21
+ from pathlib import Path
22
+ from typing import List, Optional
23
+
24
+ import timm
25
+ import torch
26
+ import torch.nn as nn
27
+ from huggingface_hub import hf_hub_download
28
+ from torch import Tensor
29
+
30
+ from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
31
+ from transformers.utils import logging
32
+
33
+
34
+ logging.set_verbosity_info()
35
+ logger = logging.get_logger()
36
+
37
+
38
+ @dataclass
39
+ class Tracker:
40
+ module: nn.Module
41
+ traced: List[nn.Module] = field(default_factory=list)
42
+ handles: list = field(default_factory=list)
43
+
44
+ def _forward_hook(self, m, inputs: Tensor, outputs: Tensor):
45
+ has_not_submodules = len(list(m.modules())) == 1 or isinstance(m, nn.Conv2d) or isinstance(m, nn.BatchNorm2d)
46
+ if has_not_submodules:
47
+ self.traced.append(m)
48
+
49
+ def __call__(self, x: Tensor):
50
+ for m in self.module.modules():
51
+ self.handles.append(m.register_forward_hook(self._forward_hook))
52
+ self.module(x)
53
+ [x.remove() for x in self.handles]
54
+ return self
55
+
56
+ @property
57
+ def parametrized(self):
58
+ # check the len of the state_dict keys to see if we have learnable params
59
+ return list(filter(lambda x: len(list(x.state_dict().keys())) > 0, self.traced))
60
+
61
+
62
+ @dataclass
63
+ class ModuleTransfer:
64
+ src: nn.Module
65
+ dest: nn.Module
66
+ verbose: int = 0
67
+ src_skip: List = field(default_factory=list)
68
+ dest_skip: List = field(default_factory=list)
69
+
70
+ def __call__(self, x: Tensor):
71
+ """
72
+ Transfer the weights of `self.src` to `self.dest` by performing a forward pass using `x` as input. Under the
73
+ hood we tracked all the operations in both modules.
74
+ """
75
+ dest_traced = Tracker(self.dest)(x).parametrized
76
+ src_traced = Tracker(self.src)(x).parametrized
77
+
78
+ src_traced = list(filter(lambda x: type(x) not in self.src_skip, src_traced))
79
+ dest_traced = list(filter(lambda x: type(x) not in self.dest_skip, dest_traced))
80
+
81
+ if len(dest_traced) != len(src_traced):
82
+ raise Exception(
83
+ f"Numbers of operations are different. Source module has {len(src_traced)} operations while"
84
+ f" destination module has {len(dest_traced)}."
85
+ )
86
+
87
+ for dest_m, src_m in zip(dest_traced, src_traced):
88
+ dest_m.load_state_dict(src_m.state_dict())
89
+ if self.verbose == 1:
90
+ print(f"Transfered from={src_m} to={dest_m}")
91
+
92
+
93
+ def convert_weight_and_push(name: str, config: ResNetConfig, save_directory: Path, push_to_hub: bool = True):
94
+ print(f"Converting {name}...")
95
+ with torch.no_grad():
96
+ from_model = timm.create_model(name, pretrained=True).eval()
97
+ our_model = ResNetForImageClassification(config).eval()
98
+ module_transfer = ModuleTransfer(src=from_model, dest=our_model)
99
+ x = torch.randn((1, 3, 224, 224))
100
+ module_transfer(x)
101
+
102
+ assert torch.allclose(from_model(x), our_model(x).logits), "The model logits don't match the original one."
103
+
104
+ checkpoint_name = f"resnet{'-'.join(name.split('resnet'))}"
105
+ print(checkpoint_name)
106
+
107
+ if push_to_hub:
108
+ our_model.push_to_hub(
109
+ repo_path_or_name=save_directory / checkpoint_name,
110
+ commit_message="Add model",
111
+ use_temp_dir=True,
112
+ )
113
+
114
+ # we can use the convnext one
115
+ image_processor = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k")
116
+ image_processor.push_to_hub(
117
+ repo_path_or_name=save_directory / checkpoint_name,
118
+ commit_message="Add image processor",
119
+ use_temp_dir=True,
120
+ )
121
+
122
+ print(f"Pushed {checkpoint_name}")
123
+
124
+
125
+ def convert_weights_and_push(save_directory: Path, model_name: Optional[str] = None, push_to_hub: bool = True):
126
+ filename = "imagenet-1k-id2label.json"
127
+ num_labels = 1000
128
+ expected_shape = (1, num_labels)
129
+
130
+ repo_id = "huggingface/label-files"
131
+ num_labels = num_labels
132
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
133
+ id2label = {int(k): v for k, v in id2label.items()}
134
+
135
+ id2label = id2label
136
+ label2id = {v: k for k, v in id2label.items()}
137
+
138
+ ImageNetPreTrainedConfig = partial(ResNetConfig, num_labels=num_labels, id2label=id2label, label2id=label2id)
139
+
140
+ names_to_config = {
141
+ "resnet18": ImageNetPreTrainedConfig(
142
+ depths=[2, 2, 2, 2], hidden_sizes=[64, 128, 256, 512], layer_type="basic"
143
+ ),
144
+ "resnet26": ImageNetPreTrainedConfig(
145
+ depths=[2, 2, 2, 2], hidden_sizes=[256, 512, 1024, 2048], layer_type="bottleneck"
146
+ ),
147
+ "resnet34": ImageNetPreTrainedConfig(
148
+ depths=[3, 4, 6, 3], hidden_sizes=[64, 128, 256, 512], layer_type="basic"
149
+ ),
150
+ "resnet50": ImageNetPreTrainedConfig(
151
+ depths=[3, 4, 6, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type="bottleneck"
152
+ ),
153
+ "resnet101": ImageNetPreTrainedConfig(
154
+ depths=[3, 4, 23, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type="bottleneck"
155
+ ),
156
+ "resnet152": ImageNetPreTrainedConfig(
157
+ depths=[3, 8, 36, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type="bottleneck"
158
+ ),
159
+ }
160
+
161
+ if model_name:
162
+ convert_weight_and_push(model_name, names_to_config[model_name], save_directory, push_to_hub)
163
+ else:
164
+ for model_name, config in names_to_config.items():
165
+ convert_weight_and_push(model_name, config, save_directory, push_to_hub)
166
+ return config, expected_shape
167
+
168
+
169
+ if __name__ == "__main__":
170
+ parser = argparse.ArgumentParser()
171
+ # Required parameters
172
+ parser.add_argument(
173
+ "--model_name",
174
+ default=None,
175
+ type=str,
176
+ help=(
177
+ "The name of the model you wish to convert, it must be one of the supported resnet* architecture,"
178
+ " currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."
179
+ ),
180
+ )
181
+ parser.add_argument(
182
+ "--pytorch_dump_folder_path",
183
+ default=None,
184
+ type=Path,
185
+ required=True,
186
+ help="Path to the output PyTorch model directory.",
187
+ )
188
+ parser.add_argument(
189
+ "--push_to_hub",
190
+ default=True,
191
+ type=bool,
192
+ required=False,
193
+ help="If True, push model and image processor to the hub.",
194
+ )
195
+
196
+ args = parser.parse_args()
197
+ pytorch_dump_folder_path: Path = args.pytorch_dump_folder_path
198
+ pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
199
+ convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
docs/transformers/build/lib/transformers/models/resnet/modeling_flax_resnet.py ADDED
@@ -0,0 +1,704 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from functools import partial
17
+ from typing import Optional, Tuple
18
+
19
+ import flax.linen as nn
20
+ import jax
21
+ import jax.numpy as jnp
22
+ from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
23
+ from flax.traverse_util import flatten_dict, unflatten_dict
24
+
25
+ from ...modeling_flax_outputs import (
26
+ FlaxBaseModelOutputWithNoAttention,
27
+ FlaxBaseModelOutputWithPoolingAndNoAttention,
28
+ FlaxImageClassifierOutputWithNoAttention,
29
+ )
30
+ from ...modeling_flax_utils import (
31
+ ACT2FN,
32
+ FlaxPreTrainedModel,
33
+ append_replace_return_docstrings,
34
+ overwrite_call_docstring,
35
+ )
36
+ from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward
37
+ from .configuration_resnet import ResNetConfig
38
+
39
+
40
+ RESNET_START_DOCSTRING = r"""
41
+
42
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
43
+ library implements for all its model (such as downloading, saving and converting weights from PyTorch models)
44
+
45
+ This model is also a
46
+ [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as
47
+ a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and
48
+ behavior.
49
+
50
+ Finally, this model supports inherent JAX features such as:
51
+
52
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
53
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
54
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
55
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
56
+
57
+ Parameters:
58
+ config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
59
+ Initializing with a config file does not load the weights associated with the model, only the
60
+ configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
61
+ dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
62
+ The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
63
+ `jax.numpy.bfloat16` (on TPUs).
64
+
65
+ This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
66
+ specified all the computation will be performed with the given `dtype`.
67
+
68
+ **Note that this only specifies the dtype of the computation and does not influence the dtype of model
69
+ parameters.**
70
+
71
+ If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
72
+ [`~FlaxPreTrainedModel.to_bf16`].
73
+ """
74
+
75
+
76
+ RESNET_INPUTS_DOCSTRING = r"""
77
+ Args:
78
+ pixel_values (`jax.numpy.float32` of shape `(batch_size, num_channels, height, width)`):
79
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
80
+ [`AutoImageProcessor.__call__`] for details.
81
+ output_hidden_states (`bool`, *optional*):
82
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
83
+ more detail.
84
+ return_dict (`bool`, *optional*):
85
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
86
+ """
87
+
88
+
89
+ class Identity(nn.Module):
90
+ """Identity function."""
91
+
92
+ @nn.compact
93
+ def __call__(self, x, **kwargs):
94
+ return x
95
+
96
+
97
+ class FlaxResNetConvLayer(nn.Module):
98
+ out_channels: int
99
+ kernel_size: int = 3
100
+ stride: int = 1
101
+ activation: Optional[str] = "relu"
102
+ dtype: jnp.dtype = jnp.float32
103
+
104
+ def setup(self):
105
+ self.convolution = nn.Conv(
106
+ self.out_channels,
107
+ kernel_size=(self.kernel_size, self.kernel_size),
108
+ strides=self.stride,
109
+ padding=self.kernel_size // 2,
110
+ dtype=self.dtype,
111
+ use_bias=False,
112
+ kernel_init=nn.initializers.variance_scaling(2.0, mode="fan_out", distribution="normal", dtype=self.dtype),
113
+ )
114
+ self.normalization = nn.BatchNorm(momentum=0.9, epsilon=1e-05, dtype=self.dtype)
115
+ self.activation_func = ACT2FN[self.activation] if self.activation is not None else Identity()
116
+
117
+ def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
118
+ hidden_state = self.convolution(x)
119
+ hidden_state = self.normalization(hidden_state, use_running_average=deterministic)
120
+ hidden_state = self.activation_func(hidden_state)
121
+ return hidden_state
122
+
123
+
124
+ class FlaxResNetEmbeddings(nn.Module):
125
+ """
126
+ ResNet Embeddings (stem) composed of a single aggressive convolution.
127
+ """
128
+
129
+ config: ResNetConfig
130
+ dtype: jnp.dtype = jnp.float32
131
+
132
+ def setup(self):
133
+ self.embedder = FlaxResNetConvLayer(
134
+ self.config.embedding_size,
135
+ kernel_size=7,
136
+ stride=2,
137
+ activation=self.config.hidden_act,
138
+ dtype=self.dtype,
139
+ )
140
+
141
+ self.max_pool = partial(nn.max_pool, window_shape=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1)))
142
+
143
+ def __call__(self, pixel_values: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
144
+ num_channels = pixel_values.shape[-1]
145
+ if num_channels != self.config.num_channels:
146
+ raise ValueError(
147
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
148
+ )
149
+ embedding = self.embedder(pixel_values, deterministic=deterministic)
150
+ embedding = self.max_pool(embedding)
151
+ return embedding
152
+
153
+
154
+ class FlaxResNetShortCut(nn.Module):
155
+ """
156
+ ResNet shortcut, used to project the residual features to the correct size. If needed, it is also used to
157
+ downsample the input using `stride=2`.
158
+ """
159
+
160
+ out_channels: int
161
+ stride: int = 2
162
+ dtype: jnp.dtype = jnp.float32
163
+
164
+ def setup(self):
165
+ self.convolution = nn.Conv(
166
+ self.out_channels,
167
+ kernel_size=(1, 1),
168
+ strides=self.stride,
169
+ use_bias=False,
170
+ kernel_init=nn.initializers.variance_scaling(2.0, mode="fan_out", distribution="truncated_normal"),
171
+ dtype=self.dtype,
172
+ )
173
+ self.normalization = nn.BatchNorm(momentum=0.9, epsilon=1e-05, dtype=self.dtype)
174
+
175
+ def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
176
+ hidden_state = self.convolution(x)
177
+ hidden_state = self.normalization(hidden_state, use_running_average=deterministic)
178
+ return hidden_state
179
+
180
+
181
+ class FlaxResNetBasicLayerCollection(nn.Module):
182
+ out_channels: int
183
+ stride: int = 1
184
+ dtype: jnp.dtype = jnp.float32
185
+
186
+ def setup(self):
187
+ self.layer = [
188
+ FlaxResNetConvLayer(self.out_channels, stride=self.stride, dtype=self.dtype),
189
+ FlaxResNetConvLayer(self.out_channels, activation=None, dtype=self.dtype),
190
+ ]
191
+
192
+ def __call__(self, hidden_state: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
193
+ for layer in self.layer:
194
+ hidden_state = layer(hidden_state, deterministic=deterministic)
195
+ return hidden_state
196
+
197
+
198
+ class FlaxResNetBasicLayer(nn.Module):
199
+ """
200
+ A classic ResNet's residual layer composed by two `3x3` convolutions.
201
+ """
202
+
203
+ in_channels: int
204
+ out_channels: int
205
+ stride: int = 1
206
+ activation: Optional[str] = "relu"
207
+ dtype: jnp.dtype = jnp.float32
208
+
209
+ def setup(self):
210
+ should_apply_shortcut = self.in_channels != self.out_channels or self.stride != 1
211
+ self.shortcut = (
212
+ FlaxResNetShortCut(self.out_channels, stride=self.stride, dtype=self.dtype)
213
+ if should_apply_shortcut
214
+ else None
215
+ )
216
+ self.layer = FlaxResNetBasicLayerCollection(
217
+ out_channels=self.out_channels,
218
+ stride=self.stride,
219
+ dtype=self.dtype,
220
+ )
221
+ self.activation_func = ACT2FN[self.activation]
222
+
223
+ def __call__(self, hidden_state, deterministic: bool = True):
224
+ residual = hidden_state
225
+ hidden_state = self.layer(hidden_state, deterministic=deterministic)
226
+
227
+ if self.shortcut is not None:
228
+ residual = self.shortcut(residual, deterministic=deterministic)
229
+ hidden_state += residual
230
+
231
+ hidden_state = self.activation_func(hidden_state)
232
+ return hidden_state
233
+
234
+
235
+ class FlaxResNetBottleNeckLayerCollection(nn.Module):
236
+ out_channels: int
237
+ stride: int = 1
238
+ activation: Optional[str] = "relu"
239
+ reduction: int = 4
240
+ dtype: jnp.dtype = jnp.float32
241
+
242
+ def setup(self):
243
+ reduces_channels = self.out_channels // self.reduction
244
+
245
+ self.layer = [
246
+ FlaxResNetConvLayer(reduces_channels, kernel_size=1, dtype=self.dtype, name="0"),
247
+ FlaxResNetConvLayer(reduces_channels, stride=self.stride, dtype=self.dtype, name="1"),
248
+ FlaxResNetConvLayer(self.out_channels, kernel_size=1, activation=None, dtype=self.dtype, name="2"),
249
+ ]
250
+
251
+ def __call__(self, hidden_state: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
252
+ for layer in self.layer:
253
+ hidden_state = layer(hidden_state, deterministic=deterministic)
254
+ return hidden_state
255
+
256
+
257
+ class FlaxResNetBottleNeckLayer(nn.Module):
258
+ """
259
+ A classic ResNet's bottleneck layer composed by three `3x3` convolutions. The first `1x1` convolution reduces the
260
+ input by a factor of `reduction` in order to make the second `3x3` convolution faster. The last `1x1` convolution
261
+ remaps the reduced features to `out_channels`.
262
+ """
263
+
264
+ in_channels: int
265
+ out_channels: int
266
+ stride: int = 1
267
+ activation: Optional[str] = "relu"
268
+ reduction: int = 4
269
+ dtype: jnp.dtype = jnp.float32
270
+
271
+ def setup(self):
272
+ should_apply_shortcut = self.in_channels != self.out_channels or self.stride != 1
273
+ self.shortcut = (
274
+ FlaxResNetShortCut(self.out_channels, stride=self.stride, dtype=self.dtype)
275
+ if should_apply_shortcut
276
+ else None
277
+ )
278
+
279
+ self.layer = FlaxResNetBottleNeckLayerCollection(
280
+ self.out_channels,
281
+ stride=self.stride,
282
+ activation=self.activation,
283
+ reduction=self.reduction,
284
+ dtype=self.dtype,
285
+ )
286
+
287
+ self.activation_func = ACT2FN[self.activation]
288
+
289
+ def __call__(self, hidden_state: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
290
+ residual = hidden_state
291
+
292
+ if self.shortcut is not None:
293
+ residual = self.shortcut(residual, deterministic=deterministic)
294
+ hidden_state = self.layer(hidden_state, deterministic)
295
+ hidden_state += residual
296
+ hidden_state = self.activation_func(hidden_state)
297
+ return hidden_state
298
+
299
+
300
+ class FlaxResNetStageLayersCollection(nn.Module):
301
+ """
302
+ A ResNet stage composed by stacked layers.
303
+ """
304
+
305
+ config: ResNetConfig
306
+ in_channels: int
307
+ out_channels: int
308
+ stride: int = 2
309
+ depth: int = 2
310
+ dtype: jnp.dtype = jnp.float32
311
+
312
+ def setup(self):
313
+ layer = FlaxResNetBottleNeckLayer if self.config.layer_type == "bottleneck" else FlaxResNetBasicLayer
314
+
315
+ layers = [
316
+ # downsampling is done in the first layer with stride of 2
317
+ layer(
318
+ self.in_channels,
319
+ self.out_channels,
320
+ stride=self.stride,
321
+ activation=self.config.hidden_act,
322
+ dtype=self.dtype,
323
+ name="0",
324
+ ),
325
+ ]
326
+
327
+ for i in range(self.depth - 1):
328
+ layers.append(
329
+ layer(
330
+ self.out_channels,
331
+ self.out_channels,
332
+ activation=self.config.hidden_act,
333
+ dtype=self.dtype,
334
+ name=str(i + 1),
335
+ )
336
+ )
337
+
338
+ self.layers = layers
339
+
340
+ def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
341
+ hidden_state = x
342
+ for layer in self.layers:
343
+ hidden_state = layer(hidden_state, deterministic=deterministic)
344
+ return hidden_state
345
+
346
+
347
+ class FlaxResNetStage(nn.Module):
348
+ """
349
+ A ResNet stage composed by stacked layers.
350
+ """
351
+
352
+ config: ResNetConfig
353
+ in_channels: int
354
+ out_channels: int
355
+ stride: int = 2
356
+ depth: int = 2
357
+ dtype: jnp.dtype = jnp.float32
358
+
359
+ def setup(self):
360
+ self.layers = FlaxResNetStageLayersCollection(
361
+ self.config,
362
+ in_channels=self.in_channels,
363
+ out_channels=self.out_channels,
364
+ stride=self.stride,
365
+ depth=self.depth,
366
+ dtype=self.dtype,
367
+ )
368
+
369
+ def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
370
+ return self.layers(x, deterministic=deterministic)
371
+
372
+
373
+ class FlaxResNetStageCollection(nn.Module):
374
+ config: ResNetConfig
375
+ dtype: jnp.dtype = jnp.float32
376
+
377
+ def setup(self):
378
+ in_out_channels = zip(self.config.hidden_sizes, self.config.hidden_sizes[1:])
379
+ stages = [
380
+ FlaxResNetStage(
381
+ self.config,
382
+ self.config.embedding_size,
383
+ self.config.hidden_sizes[0],
384
+ stride=2 if self.config.downsample_in_first_stage else 1,
385
+ depth=self.config.depths[0],
386
+ dtype=self.dtype,
387
+ name="0",
388
+ )
389
+ ]
390
+
391
+ for i, ((in_channels, out_channels), depth) in enumerate(zip(in_out_channels, self.config.depths[1:])):
392
+ stages.append(
393
+ FlaxResNetStage(self.config, in_channels, out_channels, depth=depth, dtype=self.dtype, name=str(i + 1))
394
+ )
395
+
396
+ self.stages = stages
397
+
398
+ def __call__(
399
+ self,
400
+ hidden_state: jnp.ndarray,
401
+ output_hidden_states: bool = False,
402
+ deterministic: bool = True,
403
+ ) -> FlaxBaseModelOutputWithNoAttention:
404
+ hidden_states = () if output_hidden_states else None
405
+
406
+ for stage_module in self.stages:
407
+ if output_hidden_states:
408
+ hidden_states = hidden_states + (hidden_state.transpose(0, 3, 1, 2),)
409
+
410
+ hidden_state = stage_module(hidden_state, deterministic=deterministic)
411
+
412
+ return hidden_state, hidden_states
413
+
414
+
415
+ class FlaxResNetEncoder(nn.Module):
416
+ config: ResNetConfig
417
+ dtype: jnp.dtype = jnp.float32
418
+
419
+ def setup(self):
420
+ self.stages = FlaxResNetStageCollection(self.config, dtype=self.dtype)
421
+
422
+ def __call__(
423
+ self,
424
+ hidden_state: jnp.ndarray,
425
+ output_hidden_states: bool = False,
426
+ return_dict: bool = True,
427
+ deterministic: bool = True,
428
+ ) -> FlaxBaseModelOutputWithNoAttention:
429
+ hidden_state, hidden_states = self.stages(
430
+ hidden_state, output_hidden_states=output_hidden_states, deterministic=deterministic
431
+ )
432
+
433
+ if output_hidden_states:
434
+ hidden_states = hidden_states + (hidden_state.transpose(0, 3, 1, 2),)
435
+
436
+ if not return_dict:
437
+ return tuple(v for v in [hidden_state, hidden_states] if v is not None)
438
+
439
+ return FlaxBaseModelOutputWithNoAttention(
440
+ last_hidden_state=hidden_state,
441
+ hidden_states=hidden_states,
442
+ )
443
+
444
+
445
+ class FlaxResNetPreTrainedModel(FlaxPreTrainedModel):
446
+ """
447
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
448
+ models.
449
+ """
450
+
451
+ config_class = ResNetConfig
452
+ base_model_prefix = "resnet"
453
+ main_input_name = "pixel_values"
454
+ module_class: nn.Module = None
455
+
456
+ def __init__(
457
+ self,
458
+ config: ResNetConfig,
459
+ input_shape=(1, 224, 224, 3),
460
+ seed: int = 0,
461
+ dtype: jnp.dtype = jnp.float32,
462
+ _do_init: bool = True,
463
+ **kwargs,
464
+ ):
465
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
466
+ if input_shape is None:
467
+ input_shape = (1, config.image_size, config.image_size, config.num_channels)
468
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
469
+
470
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
471
+ # init input tensors
472
+ pixel_values = jnp.zeros(input_shape, dtype=self.dtype)
473
+
474
+ rngs = {"params": rng}
475
+
476
+ random_params = self.module.init(rngs, pixel_values, return_dict=False)
477
+
478
+ if params is not None:
479
+ random_params = flatten_dict(unfreeze(random_params))
480
+ params = flatten_dict(unfreeze(params))
481
+ for missing_key in self._missing_keys:
482
+ params[missing_key] = random_params[missing_key]
483
+ self._missing_keys = set()
484
+ return freeze(unflatten_dict(params))
485
+ else:
486
+ return random_params
487
+
488
+ @add_start_docstrings_to_model_forward(RESNET_INPUTS_DOCSTRING)
489
+ def __call__(
490
+ self,
491
+ pixel_values,
492
+ params: dict = None,
493
+ train: bool = False,
494
+ output_hidden_states: Optional[bool] = None,
495
+ return_dict: Optional[bool] = None,
496
+ ):
497
+ output_hidden_states = (
498
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
499
+ )
500
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
501
+
502
+ pixel_values = jnp.transpose(pixel_values, (0, 2, 3, 1))
503
+
504
+ # Handle any PRNG if needed
505
+ rngs = {}
506
+
507
+ return self.module.apply(
508
+ {
509
+ "params": params["params"] if params is not None else self.params["params"],
510
+ "batch_stats": params["batch_stats"] if params is not None else self.params["batch_stats"],
511
+ },
512
+ jnp.array(pixel_values, dtype=jnp.float32),
513
+ not train,
514
+ output_hidden_states,
515
+ return_dict,
516
+ rngs=rngs,
517
+ mutable=["batch_stats"] if train else False, # Returing tuple with batch_stats only when train is True
518
+ )
519
+
520
+
521
+ class FlaxResNetModule(nn.Module):
522
+ config: ResNetConfig
523
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
524
+
525
+ def setup(self):
526
+ self.embedder = FlaxResNetEmbeddings(self.config, dtype=self.dtype)
527
+ self.encoder = FlaxResNetEncoder(self.config, dtype=self.dtype)
528
+
529
+ # Adaptive average pooling used in resnet
530
+ self.pooler = partial(
531
+ nn.avg_pool,
532
+ padding=((0, 0), (0, 0)),
533
+ )
534
+
535
+ def __call__(
536
+ self,
537
+ pixel_values,
538
+ deterministic: bool = True,
539
+ output_hidden_states: bool = False,
540
+ return_dict: bool = True,
541
+ ) -> FlaxBaseModelOutputWithPoolingAndNoAttention:
542
+ output_hidden_states = (
543
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
544
+ )
545
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
546
+
547
+ embedding_output = self.embedder(pixel_values, deterministic=deterministic)
548
+
549
+ encoder_outputs = self.encoder(
550
+ embedding_output,
551
+ output_hidden_states=output_hidden_states,
552
+ return_dict=return_dict,
553
+ deterministic=deterministic,
554
+ )
555
+
556
+ last_hidden_state = encoder_outputs[0]
557
+
558
+ pooled_output = self.pooler(
559
+ last_hidden_state,
560
+ window_shape=(last_hidden_state.shape[1], last_hidden_state.shape[2]),
561
+ strides=(last_hidden_state.shape[1], last_hidden_state.shape[2]),
562
+ ).transpose(0, 3, 1, 2)
563
+
564
+ last_hidden_state = last_hidden_state.transpose(0, 3, 1, 2)
565
+
566
+ if not return_dict:
567
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
568
+
569
+ return FlaxBaseModelOutputWithPoolingAndNoAttention(
570
+ last_hidden_state=last_hidden_state,
571
+ pooler_output=pooled_output,
572
+ hidden_states=encoder_outputs.hidden_states,
573
+ )
574
+
575
+
576
+ @add_start_docstrings(
577
+ "The bare ResNet model outputting raw features without any specific head on top.",
578
+ RESNET_START_DOCSTRING,
579
+ )
580
+ class FlaxResNetModel(FlaxResNetPreTrainedModel):
581
+ module_class = FlaxResNetModule
582
+
583
+
584
+ FLAX_VISION_MODEL_DOCSTRING = """
585
+ Returns:
586
+
587
+ Examples:
588
+
589
+ ```python
590
+ >>> from transformers import AutoImageProcessor, FlaxResNetModel
591
+ >>> from PIL import Image
592
+ >>> import requests
593
+
594
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
595
+ >>> image = Image.open(requests.get(url, stream=True).raw)
596
+ >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/resnet-50")
597
+ >>> model = FlaxResNetModel.from_pretrained("microsoft/resnet-50")
598
+ >>> inputs = image_processor(images=image, return_tensors="np")
599
+ >>> outputs = model(**inputs)
600
+ >>> last_hidden_states = outputs.last_hidden_state
601
+ ```
602
+ """
603
+
604
+ overwrite_call_docstring(FlaxResNetModel, FLAX_VISION_MODEL_DOCSTRING)
605
+ append_replace_return_docstrings(
606
+ FlaxResNetModel, output_type=FlaxBaseModelOutputWithPoolingAndNoAttention, config_class=ResNetConfig
607
+ )
608
+
609
+
610
+ class FlaxResNetClassifierCollection(nn.Module):
611
+ config: ResNetConfig
612
+ dtype: jnp.dtype = jnp.float32
613
+
614
+ def setup(self):
615
+ self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype, name="1")
616
+
617
+ def __call__(self, x: jnp.ndarray) -> jnp.ndarray:
618
+ return self.classifier(x)
619
+
620
+
621
+ class FlaxResNetForImageClassificationModule(nn.Module):
622
+ config: ResNetConfig
623
+ dtype: jnp.dtype = jnp.float32
624
+
625
+ def setup(self):
626
+ self.resnet = FlaxResNetModule(config=self.config, dtype=self.dtype)
627
+
628
+ if self.config.num_labels > 0:
629
+ self.classifier = FlaxResNetClassifierCollection(self.config, dtype=self.dtype)
630
+ else:
631
+ self.classifier = Identity()
632
+
633
+ def __call__(
634
+ self,
635
+ pixel_values=None,
636
+ deterministic: bool = True,
637
+ output_hidden_states=None,
638
+ return_dict=None,
639
+ ):
640
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
641
+
642
+ outputs = self.resnet(
643
+ pixel_values,
644
+ deterministic=deterministic,
645
+ output_hidden_states=output_hidden_states,
646
+ return_dict=return_dict,
647
+ )
648
+
649
+ pooled_output = outputs.pooler_output if return_dict else outputs[1]
650
+
651
+ logits = self.classifier(pooled_output[:, :, 0, 0])
652
+
653
+ if not return_dict:
654
+ output = (logits,) + outputs[2:]
655
+ return output
656
+
657
+ return FlaxImageClassifierOutputWithNoAttention(logits=logits, hidden_states=outputs.hidden_states)
658
+
659
+
660
+ @add_start_docstrings(
661
+ """
662
+ ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
663
+ ImageNet.
664
+ """,
665
+ RESNET_START_DOCSTRING,
666
+ )
667
+ class FlaxResNetForImageClassification(FlaxResNetPreTrainedModel):
668
+ module_class = FlaxResNetForImageClassificationModule
669
+
670
+
671
+ FLAX_VISION_CLASSIF_DOCSTRING = """
672
+ Returns:
673
+
674
+ Example:
675
+
676
+ ```python
677
+ >>> from transformers import AutoImageProcessor, FlaxResNetForImageClassification
678
+ >>> from PIL import Image
679
+ >>> import jax
680
+ >>> import requests
681
+
682
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
683
+ >>> image = Image.open(requests.get(url, stream=True).raw)
684
+
685
+ >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/resnet-50")
686
+ >>> model = FlaxResNetForImageClassification.from_pretrained("microsoft/resnet-50")
687
+
688
+ >>> inputs = image_processor(images=image, return_tensors="np")
689
+ >>> outputs = model(**inputs)
690
+ >>> logits = outputs.logits
691
+
692
+ >>> # model predicts one of the 1000 ImageNet classes
693
+ >>> predicted_class_idx = jax.numpy.argmax(logits, axis=-1)
694
+ >>> print("Predicted class:", model.config.id2label[predicted_class_idx.item()])
695
+ ```
696
+ """
697
+
698
+ overwrite_call_docstring(FlaxResNetForImageClassification, FLAX_VISION_CLASSIF_DOCSTRING)
699
+ append_replace_return_docstrings(
700
+ FlaxResNetForImageClassification, output_type=FlaxImageClassifierOutputWithNoAttention, config_class=ResNetConfig
701
+ )
702
+
703
+
704
+ __all__ = ["FlaxResNetForImageClassification", "FlaxResNetModel", "FlaxResNetPreTrainedModel"]
docs/transformers/build/lib/transformers/models/resnet/modeling_resnet.py ADDED
@@ -0,0 +1,520 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Microsoft Research, Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """PyTorch ResNet model."""
16
+
17
+ import math
18
+ from typing import Optional
19
+
20
+ import torch
21
+ import torch.utils.checkpoint
22
+ from torch import Tensor, nn
23
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
24
+
25
+ from ...activations import ACT2FN
26
+ from ...modeling_outputs import (
27
+ BackboneOutput,
28
+ BaseModelOutputWithNoAttention,
29
+ BaseModelOutputWithPoolingAndNoAttention,
30
+ ImageClassifierOutputWithNoAttention,
31
+ )
32
+ from ...modeling_utils import PreTrainedModel
33
+ from ...utils import (
34
+ add_code_sample_docstrings,
35
+ add_start_docstrings,
36
+ add_start_docstrings_to_model_forward,
37
+ logging,
38
+ replace_return_docstrings,
39
+ )
40
+ from ...utils.backbone_utils import BackboneMixin
41
+ from .configuration_resnet import ResNetConfig
42
+
43
+
44
+ logger = logging.get_logger(__name__)
45
+
46
+ # General docstring
47
+ _CONFIG_FOR_DOC = "ResNetConfig"
48
+
49
+ # Base docstring
50
+ _CHECKPOINT_FOR_DOC = "microsoft/resnet-50"
51
+ _EXPECTED_OUTPUT_SHAPE = [1, 2048, 7, 7]
52
+
53
+ # Image classification docstring
54
+ _IMAGE_CLASS_CHECKPOINT = "microsoft/resnet-50"
55
+ _IMAGE_CLASS_EXPECTED_OUTPUT = "tiger cat"
56
+
57
+
58
+ class ResNetConvLayer(nn.Module):
59
+ def __init__(
60
+ self, in_channels: int, out_channels: int, kernel_size: int = 3, stride: int = 1, activation: str = "relu"
61
+ ):
62
+ super().__init__()
63
+ self.convolution = nn.Conv2d(
64
+ in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=kernel_size // 2, bias=False
65
+ )
66
+ self.normalization = nn.BatchNorm2d(out_channels)
67
+ self.activation = ACT2FN[activation] if activation is not None else nn.Identity()
68
+
69
+ def forward(self, input: Tensor) -> Tensor:
70
+ hidden_state = self.convolution(input)
71
+ hidden_state = self.normalization(hidden_state)
72
+ hidden_state = self.activation(hidden_state)
73
+ return hidden_state
74
+
75
+
76
+ class ResNetEmbeddings(nn.Module):
77
+ """
78
+ ResNet Embeddings (stem) composed of a single aggressive convolution.
79
+ """
80
+
81
+ def __init__(self, config: ResNetConfig):
82
+ super().__init__()
83
+ self.embedder = ResNetConvLayer(
84
+ config.num_channels, config.embedding_size, kernel_size=7, stride=2, activation=config.hidden_act
85
+ )
86
+ self.pooler = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
87
+ self.num_channels = config.num_channels
88
+
89
+ def forward(self, pixel_values: Tensor) -> Tensor:
90
+ num_channels = pixel_values.shape[1]
91
+ if num_channels != self.num_channels:
92
+ raise ValueError(
93
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
94
+ )
95
+ embedding = self.embedder(pixel_values)
96
+ embedding = self.pooler(embedding)
97
+ return embedding
98
+
99
+
100
+ class ResNetShortCut(nn.Module):
101
+ """
102
+ ResNet shortcut, used to project the residual features to the correct size. If needed, it is also used to
103
+ downsample the input using `stride=2`.
104
+ """
105
+
106
+ def __init__(self, in_channels: int, out_channels: int, stride: int = 2):
107
+ super().__init__()
108
+ self.convolution = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False)
109
+ self.normalization = nn.BatchNorm2d(out_channels)
110
+
111
+ def forward(self, input: Tensor) -> Tensor:
112
+ hidden_state = self.convolution(input)
113
+ hidden_state = self.normalization(hidden_state)
114
+ return hidden_state
115
+
116
+
117
+ class ResNetBasicLayer(nn.Module):
118
+ """
119
+ A classic ResNet's residual layer composed by two `3x3` convolutions.
120
+ """
121
+
122
+ def __init__(self, in_channels: int, out_channels: int, stride: int = 1, activation: str = "relu"):
123
+ super().__init__()
124
+ should_apply_shortcut = in_channels != out_channels or stride != 1
125
+ self.shortcut = (
126
+ ResNetShortCut(in_channels, out_channels, stride=stride) if should_apply_shortcut else nn.Identity()
127
+ )
128
+ self.layer = nn.Sequential(
129
+ ResNetConvLayer(in_channels, out_channels, stride=stride),
130
+ ResNetConvLayer(out_channels, out_channels, activation=None),
131
+ )
132
+ self.activation = ACT2FN[activation]
133
+
134
+ def forward(self, hidden_state):
135
+ residual = hidden_state
136
+ hidden_state = self.layer(hidden_state)
137
+ residual = self.shortcut(residual)
138
+ hidden_state += residual
139
+ hidden_state = self.activation(hidden_state)
140
+ return hidden_state
141
+
142
+
143
+ class ResNetBottleNeckLayer(nn.Module):
144
+ """
145
+ A classic ResNet's bottleneck layer composed by three `3x3` convolutions.
146
+
147
+ The first `1x1` convolution reduces the input by a factor of `reduction` in order to make the second `3x3`
148
+ convolution faster. The last `1x1` convolution remaps the reduced features to `out_channels`. If
149
+ `downsample_in_bottleneck` is true, downsample will be in the first layer instead of the second layer.
150
+ """
151
+
152
+ def __init__(
153
+ self,
154
+ in_channels: int,
155
+ out_channels: int,
156
+ stride: int = 1,
157
+ activation: str = "relu",
158
+ reduction: int = 4,
159
+ downsample_in_bottleneck: bool = False,
160
+ ):
161
+ super().__init__()
162
+ should_apply_shortcut = in_channels != out_channels or stride != 1
163
+ reduces_channels = out_channels // reduction
164
+ self.shortcut = (
165
+ ResNetShortCut(in_channels, out_channels, stride=stride) if should_apply_shortcut else nn.Identity()
166
+ )
167
+ self.layer = nn.Sequential(
168
+ ResNetConvLayer(
169
+ in_channels, reduces_channels, kernel_size=1, stride=stride if downsample_in_bottleneck else 1
170
+ ),
171
+ ResNetConvLayer(reduces_channels, reduces_channels, stride=stride if not downsample_in_bottleneck else 1),
172
+ ResNetConvLayer(reduces_channels, out_channels, kernel_size=1, activation=None),
173
+ )
174
+ self.activation = ACT2FN[activation]
175
+
176
+ def forward(self, hidden_state):
177
+ residual = hidden_state
178
+ hidden_state = self.layer(hidden_state)
179
+ residual = self.shortcut(residual)
180
+ hidden_state += residual
181
+ hidden_state = self.activation(hidden_state)
182
+ return hidden_state
183
+
184
+
185
+ class ResNetStage(nn.Module):
186
+ """
187
+ A ResNet stage composed by stacked layers.
188
+ """
189
+
190
+ def __init__(
191
+ self,
192
+ config: ResNetConfig,
193
+ in_channels: int,
194
+ out_channels: int,
195
+ stride: int = 2,
196
+ depth: int = 2,
197
+ ):
198
+ super().__init__()
199
+
200
+ layer = ResNetBottleNeckLayer if config.layer_type == "bottleneck" else ResNetBasicLayer
201
+
202
+ if config.layer_type == "bottleneck":
203
+ first_layer = layer(
204
+ in_channels,
205
+ out_channels,
206
+ stride=stride,
207
+ activation=config.hidden_act,
208
+ downsample_in_bottleneck=config.downsample_in_bottleneck,
209
+ )
210
+ else:
211
+ first_layer = layer(in_channels, out_channels, stride=stride, activation=config.hidden_act)
212
+ self.layers = nn.Sequential(
213
+ first_layer, *[layer(out_channels, out_channels, activation=config.hidden_act) for _ in range(depth - 1)]
214
+ )
215
+
216
+ def forward(self, input: Tensor) -> Tensor:
217
+ hidden_state = input
218
+ for layer in self.layers:
219
+ hidden_state = layer(hidden_state)
220
+ return hidden_state
221
+
222
+
223
+ class ResNetEncoder(nn.Module):
224
+ def __init__(self, config: ResNetConfig):
225
+ super().__init__()
226
+ self.stages = nn.ModuleList([])
227
+ # based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
228
+ self.stages.append(
229
+ ResNetStage(
230
+ config,
231
+ config.embedding_size,
232
+ config.hidden_sizes[0],
233
+ stride=2 if config.downsample_in_first_stage else 1,
234
+ depth=config.depths[0],
235
+ )
236
+ )
237
+ in_out_channels = zip(config.hidden_sizes, config.hidden_sizes[1:])
238
+ for (in_channels, out_channels), depth in zip(in_out_channels, config.depths[1:]):
239
+ self.stages.append(ResNetStage(config, in_channels, out_channels, depth=depth))
240
+
241
+ def forward(
242
+ self, hidden_state: Tensor, output_hidden_states: bool = False, return_dict: bool = True
243
+ ) -> BaseModelOutputWithNoAttention:
244
+ hidden_states = () if output_hidden_states else None
245
+
246
+ for stage_module in self.stages:
247
+ if output_hidden_states:
248
+ hidden_states = hidden_states + (hidden_state,)
249
+
250
+ hidden_state = stage_module(hidden_state)
251
+
252
+ if output_hidden_states:
253
+ hidden_states = hidden_states + (hidden_state,)
254
+
255
+ if not return_dict:
256
+ return tuple(v for v in [hidden_state, hidden_states] if v is not None)
257
+
258
+ return BaseModelOutputWithNoAttention(
259
+ last_hidden_state=hidden_state,
260
+ hidden_states=hidden_states,
261
+ )
262
+
263
+
264
+ class ResNetPreTrainedModel(PreTrainedModel):
265
+ """
266
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
267
+ models.
268
+ """
269
+
270
+ config_class = ResNetConfig
271
+ base_model_prefix = "resnet"
272
+ main_input_name = "pixel_values"
273
+ _no_split_modules = ["ResNetConvLayer", "ResNetShortCut"]
274
+
275
+ def _init_weights(self, module):
276
+ if isinstance(module, nn.Conv2d):
277
+ nn.init.kaiming_normal_(module.weight, mode="fan_out", nonlinearity="relu")
278
+ # copied from the `reset_parameters` method of `class Linear(Module)` in `torch`.
279
+ elif isinstance(module, nn.Linear):
280
+ nn.init.kaiming_uniform_(module.weight, a=math.sqrt(5))
281
+ if module.bias is not None:
282
+ fan_in, _ = nn.init._calculate_fan_in_and_fan_out(module.weight)
283
+ bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
284
+ nn.init.uniform_(module.bias, -bound, bound)
285
+ elif isinstance(module, (nn.BatchNorm2d, nn.GroupNorm)):
286
+ nn.init.constant_(module.weight, 1)
287
+ nn.init.constant_(module.bias, 0)
288
+
289
+
290
+ RESNET_START_DOCSTRING = r"""
291
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
292
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
293
+ behavior.
294
+
295
+ Parameters:
296
+ config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
297
+ Initializing with a config file does not load the weights associated with the model, only the
298
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
299
+ """
300
+
301
+ RESNET_INPUTS_DOCSTRING = r"""
302
+ Args:
303
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
304
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
305
+ [`ConvNextImageProcessor.__call__`] for details.
306
+
307
+ output_hidden_states (`bool`, *optional*):
308
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
309
+ more detail.
310
+ return_dict (`bool`, *optional*):
311
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
312
+ """
313
+
314
+
315
+ @add_start_docstrings(
316
+ "The bare ResNet model outputting raw features without any specific head on top.",
317
+ RESNET_START_DOCSTRING,
318
+ )
319
+ class ResNetModel(ResNetPreTrainedModel):
320
+ def __init__(self, config):
321
+ super().__init__(config)
322
+ self.config = config
323
+ self.embedder = ResNetEmbeddings(config)
324
+ self.encoder = ResNetEncoder(config)
325
+ self.pooler = nn.AdaptiveAvgPool2d((1, 1))
326
+ # Initialize weights and apply final processing
327
+ self.post_init()
328
+
329
+ @add_start_docstrings_to_model_forward(RESNET_INPUTS_DOCSTRING)
330
+ @add_code_sample_docstrings(
331
+ checkpoint=_CHECKPOINT_FOR_DOC,
332
+ output_type=BaseModelOutputWithPoolingAndNoAttention,
333
+ config_class=_CONFIG_FOR_DOC,
334
+ modality="vision",
335
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
336
+ )
337
+ def forward(
338
+ self, pixel_values: Tensor, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None
339
+ ) -> BaseModelOutputWithPoolingAndNoAttention:
340
+ output_hidden_states = (
341
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
342
+ )
343
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
344
+
345
+ embedding_output = self.embedder(pixel_values)
346
+
347
+ encoder_outputs = self.encoder(
348
+ embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict
349
+ )
350
+
351
+ last_hidden_state = encoder_outputs[0]
352
+
353
+ pooled_output = self.pooler(last_hidden_state)
354
+
355
+ if not return_dict:
356
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
357
+
358
+ return BaseModelOutputWithPoolingAndNoAttention(
359
+ last_hidden_state=last_hidden_state,
360
+ pooler_output=pooled_output,
361
+ hidden_states=encoder_outputs.hidden_states,
362
+ )
363
+
364
+
365
+ @add_start_docstrings(
366
+ """
367
+ ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
368
+ ImageNet.
369
+ """,
370
+ RESNET_START_DOCSTRING,
371
+ )
372
+ class ResNetForImageClassification(ResNetPreTrainedModel):
373
+ def __init__(self, config):
374
+ super().__init__(config)
375
+ self.num_labels = config.num_labels
376
+ self.resnet = ResNetModel(config)
377
+ # classification head
378
+ self.classifier = nn.Sequential(
379
+ nn.Flatten(),
380
+ nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity(),
381
+ )
382
+ # initialize weights and apply final processing
383
+ self.post_init()
384
+
385
+ @add_start_docstrings_to_model_forward(RESNET_INPUTS_DOCSTRING)
386
+ @add_code_sample_docstrings(
387
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
388
+ output_type=ImageClassifierOutputWithNoAttention,
389
+ config_class=_CONFIG_FOR_DOC,
390
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
391
+ )
392
+ def forward(
393
+ self,
394
+ pixel_values: Optional[torch.FloatTensor] = None,
395
+ labels: Optional[torch.LongTensor] = None,
396
+ output_hidden_states: Optional[bool] = None,
397
+ return_dict: Optional[bool] = None,
398
+ ) -> ImageClassifierOutputWithNoAttention:
399
+ r"""
400
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
401
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
402
+ config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
403
+ """
404
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
405
+
406
+ outputs = self.resnet(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
407
+
408
+ pooled_output = outputs.pooler_output if return_dict else outputs[1]
409
+
410
+ logits = self.classifier(pooled_output)
411
+
412
+ loss = None
413
+
414
+ if labels is not None:
415
+ if self.config.problem_type is None:
416
+ if self.num_labels == 1:
417
+ self.config.problem_type = "regression"
418
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
419
+ self.config.problem_type = "single_label_classification"
420
+ else:
421
+ self.config.problem_type = "multi_label_classification"
422
+ if self.config.problem_type == "regression":
423
+ loss_fct = MSELoss()
424
+ if self.num_labels == 1:
425
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
426
+ else:
427
+ loss = loss_fct(logits, labels)
428
+ elif self.config.problem_type == "single_label_classification":
429
+ loss_fct = CrossEntropyLoss()
430
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
431
+ elif self.config.problem_type == "multi_label_classification":
432
+ loss_fct = BCEWithLogitsLoss()
433
+ loss = loss_fct(logits, labels)
434
+
435
+ if not return_dict:
436
+ output = (logits,) + outputs[2:]
437
+ return (loss,) + output if loss is not None else output
438
+
439
+ return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
440
+
441
+
442
+ @add_start_docstrings(
443
+ """
444
+ ResNet backbone, to be used with frameworks like DETR and MaskFormer.
445
+ """,
446
+ RESNET_START_DOCSTRING,
447
+ )
448
+ class ResNetBackbone(ResNetPreTrainedModel, BackboneMixin):
449
+ def __init__(self, config):
450
+ super().__init__(config)
451
+ super()._init_backbone(config)
452
+
453
+ self.num_features = [config.embedding_size] + config.hidden_sizes
454
+ self.embedder = ResNetEmbeddings(config)
455
+ self.encoder = ResNetEncoder(config)
456
+
457
+ # initialize weights and apply final processing
458
+ self.post_init()
459
+
460
+ @add_start_docstrings_to_model_forward(RESNET_INPUTS_DOCSTRING)
461
+ @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC)
462
+ def forward(
463
+ self, pixel_values: Tensor, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None
464
+ ) -> BackboneOutput:
465
+ """
466
+ Returns:
467
+
468
+ Examples:
469
+
470
+ ```python
471
+ >>> from transformers import AutoImageProcessor, AutoBackbone
472
+ >>> import torch
473
+ >>> from PIL import Image
474
+ >>> import requests
475
+
476
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
477
+ >>> image = Image.open(requests.get(url, stream=True).raw)
478
+
479
+ >>> processor = AutoImageProcessor.from_pretrained("microsoft/resnet-50")
480
+ >>> model = AutoBackbone.from_pretrained(
481
+ ... "microsoft/resnet-50", out_features=["stage1", "stage2", "stage3", "stage4"]
482
+ ... )
483
+
484
+ >>> inputs = processor(image, return_tensors="pt")
485
+
486
+ >>> outputs = model(**inputs)
487
+ >>> feature_maps = outputs.feature_maps
488
+ >>> list(feature_maps[-1].shape)
489
+ [1, 2048, 7, 7]
490
+ ```"""
491
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
492
+ output_hidden_states = (
493
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
494
+ )
495
+
496
+ embedding_output = self.embedder(pixel_values)
497
+
498
+ outputs = self.encoder(embedding_output, output_hidden_states=True, return_dict=True)
499
+
500
+ hidden_states = outputs.hidden_states
501
+
502
+ feature_maps = ()
503
+ for idx, stage in enumerate(self.stage_names):
504
+ if stage in self.out_features:
505
+ feature_maps += (hidden_states[idx],)
506
+
507
+ if not return_dict:
508
+ output = (feature_maps,)
509
+ if output_hidden_states:
510
+ output += (outputs.hidden_states,)
511
+ return output
512
+
513
+ return BackboneOutput(
514
+ feature_maps=feature_maps,
515
+ hidden_states=outputs.hidden_states if output_hidden_states else None,
516
+ attentions=None,
517
+ )
518
+
519
+
520
+ __all__ = ["ResNetForImageClassification", "ResNetModel", "ResNetPreTrainedModel", "ResNetBackbone"]
docs/transformers/build/lib/transformers/models/resnet/modeling_tf_resnet.py ADDED
@@ -0,0 +1,596 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Microsoft Research, Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """TensorFlow ResNet model."""
16
+
17
+ from typing import Optional, Tuple, Union
18
+
19
+ import tensorflow as tf
20
+
21
+ from ...activations_tf import ACT2FN
22
+ from ...modeling_tf_outputs import (
23
+ TFBaseModelOutputWithNoAttention,
24
+ TFBaseModelOutputWithPoolingAndNoAttention,
25
+ TFImageClassifierOutputWithNoAttention,
26
+ )
27
+ from ...modeling_tf_utils import (
28
+ TFPreTrainedModel,
29
+ TFSequenceClassificationLoss,
30
+ keras,
31
+ keras_serializable,
32
+ unpack_inputs,
33
+ )
34
+ from ...tf_utils import shape_list
35
+ from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
36
+ from .configuration_resnet import ResNetConfig
37
+
38
+
39
+ logger = logging.get_logger(__name__)
40
+
41
+ # General docstring
42
+ _CONFIG_FOR_DOC = "ResNetConfig"
43
+
44
+ # Base docstring
45
+ _CHECKPOINT_FOR_DOC = "microsoft/resnet-50"
46
+ _EXPECTED_OUTPUT_SHAPE = [1, 2048, 7, 7]
47
+
48
+ # Image classification docstring
49
+ _IMAGE_CLASS_CHECKPOINT = "microsoft/resnet-50"
50
+ _IMAGE_CLASS_EXPECTED_OUTPUT = "tiger cat"
51
+
52
+
53
+ class TFResNetConvLayer(keras.layers.Layer):
54
+ def __init__(
55
+ self,
56
+ in_channels: int,
57
+ out_channels: int,
58
+ kernel_size: int = 3,
59
+ stride: int = 1,
60
+ activation: str = "relu",
61
+ **kwargs,
62
+ ) -> None:
63
+ super().__init__(**kwargs)
64
+ self.pad_value = kernel_size // 2
65
+ self.conv = keras.layers.Conv2D(
66
+ out_channels, kernel_size=kernel_size, strides=stride, padding="valid", use_bias=False, name="convolution"
67
+ )
68
+ # Use same default momentum and epsilon as PyTorch equivalent
69
+ self.normalization = keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.9, name="normalization")
70
+ self.activation = ACT2FN[activation] if activation is not None else keras.layers.Activation("linear")
71
+ self.in_channels = in_channels
72
+ self.out_channels = out_channels
73
+
74
+ def convolution(self, hidden_state: tf.Tensor) -> tf.Tensor:
75
+ # Pad to match that done in the PyTorch Conv2D model
76
+ height_pad = width_pad = (self.pad_value, self.pad_value)
77
+ hidden_state = tf.pad(hidden_state, [(0, 0), height_pad, width_pad, (0, 0)])
78
+ hidden_state = self.conv(hidden_state)
79
+ return hidden_state
80
+
81
+ def call(self, hidden_state: tf.Tensor, training: bool = False) -> tf.Tensor:
82
+ hidden_state = self.convolution(hidden_state)
83
+ hidden_state = self.normalization(hidden_state, training=training)
84
+ hidden_state = self.activation(hidden_state)
85
+ return hidden_state
86
+
87
+ def build(self, input_shape=None):
88
+ if self.built:
89
+ return
90
+ self.built = True
91
+ if getattr(self, "conv", None) is not None:
92
+ with tf.name_scope(self.conv.name):
93
+ self.conv.build([None, None, None, self.in_channels])
94
+ if getattr(self, "normalization", None) is not None:
95
+ with tf.name_scope(self.normalization.name):
96
+ self.normalization.build([None, None, None, self.out_channels])
97
+
98
+
99
+ class TFResNetEmbeddings(keras.layers.Layer):
100
+ """
101
+ ResNet Embeddings (stem) composed of a single aggressive convolution.
102
+ """
103
+
104
+ def __init__(self, config: ResNetConfig, **kwargs) -> None:
105
+ super().__init__(**kwargs)
106
+ self.embedder = TFResNetConvLayer(
107
+ config.num_channels,
108
+ config.embedding_size,
109
+ kernel_size=7,
110
+ stride=2,
111
+ activation=config.hidden_act,
112
+ name="embedder",
113
+ )
114
+ self.pooler = keras.layers.MaxPool2D(pool_size=3, strides=2, padding="valid", name="pooler")
115
+ self.num_channels = config.num_channels
116
+
117
+ def call(self, pixel_values: tf.Tensor, training: bool = False) -> tf.Tensor:
118
+ _, _, _, num_channels = shape_list(pixel_values)
119
+ if tf.executing_eagerly() and num_channels != self.num_channels:
120
+ raise ValueError(
121
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
122
+ )
123
+ hidden_state = pixel_values
124
+ hidden_state = self.embedder(hidden_state)
125
+ hidden_state = tf.pad(hidden_state, [[0, 0], [1, 1], [1, 1], [0, 0]])
126
+ hidden_state = self.pooler(hidden_state)
127
+ return hidden_state
128
+
129
+ def build(self, input_shape=None):
130
+ if self.built:
131
+ return
132
+ self.built = True
133
+ if getattr(self, "embedder", None) is not None:
134
+ with tf.name_scope(self.embedder.name):
135
+ self.embedder.build(None)
136
+ if getattr(self, "pooler", None) is not None:
137
+ with tf.name_scope(self.pooler.name):
138
+ self.pooler.build(None)
139
+
140
+
141
+ class TFResNetShortCut(keras.layers.Layer):
142
+ """
143
+ ResNet shortcut, used to project the residual features to the correct size. If needed, it is also used to
144
+ downsample the input using `stride=2`.
145
+ """
146
+
147
+ def __init__(self, in_channels: int, out_channels: int, stride: int = 2, **kwargs) -> None:
148
+ super().__init__(**kwargs)
149
+ self.convolution = keras.layers.Conv2D(
150
+ out_channels, kernel_size=1, strides=stride, use_bias=False, name="convolution"
151
+ )
152
+ # Use same default momentum and epsilon as PyTorch equivalent
153
+ self.normalization = keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.9, name="normalization")
154
+ self.in_channels = in_channels
155
+ self.out_channels = out_channels
156
+
157
+ def call(self, x: tf.Tensor, training: bool = False) -> tf.Tensor:
158
+ hidden_state = x
159
+ hidden_state = self.convolution(hidden_state)
160
+ hidden_state = self.normalization(hidden_state, training=training)
161
+ return hidden_state
162
+
163
+ def build(self, input_shape=None):
164
+ if self.built:
165
+ return
166
+ self.built = True
167
+ if getattr(self, "convolution", None) is not None:
168
+ with tf.name_scope(self.convolution.name):
169
+ self.convolution.build([None, None, None, self.in_channels])
170
+ if getattr(self, "normalization", None) is not None:
171
+ with tf.name_scope(self.normalization.name):
172
+ self.normalization.build([None, None, None, self.out_channels])
173
+
174
+
175
+ class TFResNetBasicLayer(keras.layers.Layer):
176
+ """
177
+ A classic ResNet's residual layer composed by two `3x3` convolutions.
178
+ """
179
+
180
+ def __init__(
181
+ self, in_channels: int, out_channels: int, stride: int = 1, activation: str = "relu", **kwargs
182
+ ) -> None:
183
+ super().__init__(**kwargs)
184
+ should_apply_shortcut = in_channels != out_channels or stride != 1
185
+ self.conv1 = TFResNetConvLayer(in_channels, out_channels, stride=stride, name="layer.0")
186
+ self.conv2 = TFResNetConvLayer(out_channels, out_channels, activation=None, name="layer.1")
187
+ self.shortcut = (
188
+ TFResNetShortCut(in_channels, out_channels, stride=stride, name="shortcut")
189
+ if should_apply_shortcut
190
+ else keras.layers.Activation("linear", name="shortcut")
191
+ )
192
+ self.activation = ACT2FN[activation]
193
+
194
+ def call(self, hidden_state: tf.Tensor, training: bool = False) -> tf.Tensor:
195
+ residual = hidden_state
196
+ hidden_state = self.conv1(hidden_state, training=training)
197
+ hidden_state = self.conv2(hidden_state, training=training)
198
+ residual = self.shortcut(residual, training=training)
199
+ hidden_state += residual
200
+ hidden_state = self.activation(hidden_state)
201
+ return hidden_state
202
+
203
+ def build(self, input_shape=None):
204
+ if self.built:
205
+ return
206
+ self.built = True
207
+ if getattr(self, "conv1", None) is not None:
208
+ with tf.name_scope(self.conv1.name):
209
+ self.conv1.build(None)
210
+ if getattr(self, "conv2", None) is not None:
211
+ with tf.name_scope(self.conv2.name):
212
+ self.conv2.build(None)
213
+ if getattr(self, "shortcut", None) is not None:
214
+ with tf.name_scope(self.shortcut.name):
215
+ self.shortcut.build(None)
216
+
217
+
218
+ class TFResNetBottleNeckLayer(keras.layers.Layer):
219
+ """
220
+ A classic ResNet's bottleneck layer composed by three `3x3` convolutions.
221
+
222
+ The first `1x1` convolution reduces the input by a factor of `reduction` in order to make the second `3x3`
223
+ convolution faster. The last `1x1` convolution remaps the reduced features to `out_channels`.
224
+ """
225
+
226
+ def __init__(
227
+ self,
228
+ in_channels: int,
229
+ out_channels: int,
230
+ stride: int = 1,
231
+ activation: str = "relu",
232
+ reduction: int = 4,
233
+ **kwargs,
234
+ ) -> None:
235
+ super().__init__(**kwargs)
236
+ should_apply_shortcut = in_channels != out_channels or stride != 1
237
+ reduces_channels = out_channels // reduction
238
+ self.conv0 = TFResNetConvLayer(in_channels, reduces_channels, kernel_size=1, name="layer.0")
239
+ self.conv1 = TFResNetConvLayer(reduces_channels, reduces_channels, stride=stride, name="layer.1")
240
+ self.conv2 = TFResNetConvLayer(reduces_channels, out_channels, kernel_size=1, activation=None, name="layer.2")
241
+ self.shortcut = (
242
+ TFResNetShortCut(in_channels, out_channels, stride=stride, name="shortcut")
243
+ if should_apply_shortcut
244
+ else keras.layers.Activation("linear", name="shortcut")
245
+ )
246
+ self.activation = ACT2FN[activation]
247
+
248
+ def call(self, hidden_state: tf.Tensor, training: bool = False) -> tf.Tensor:
249
+ residual = hidden_state
250
+ hidden_state = self.conv0(hidden_state, training=training)
251
+ hidden_state = self.conv1(hidden_state, training=training)
252
+ hidden_state = self.conv2(hidden_state, training=training)
253
+ residual = self.shortcut(residual, training=training)
254
+ hidden_state += residual
255
+ hidden_state = self.activation(hidden_state)
256
+ return hidden_state
257
+
258
+ def build(self, input_shape=None):
259
+ if self.built:
260
+ return
261
+ self.built = True
262
+ if getattr(self, "conv0", None) is not None:
263
+ with tf.name_scope(self.conv0.name):
264
+ self.conv0.build(None)
265
+ if getattr(self, "conv1", None) is not None:
266
+ with tf.name_scope(self.conv1.name):
267
+ self.conv1.build(None)
268
+ if getattr(self, "conv2", None) is not None:
269
+ with tf.name_scope(self.conv2.name):
270
+ self.conv2.build(None)
271
+ if getattr(self, "shortcut", None) is not None:
272
+ with tf.name_scope(self.shortcut.name):
273
+ self.shortcut.build(None)
274
+
275
+
276
+ class TFResNetStage(keras.layers.Layer):
277
+ """
278
+ A ResNet stage composed of stacked layers.
279
+ """
280
+
281
+ def __init__(
282
+ self, config: ResNetConfig, in_channels: int, out_channels: int, stride: int = 2, depth: int = 2, **kwargs
283
+ ) -> None:
284
+ super().__init__(**kwargs)
285
+
286
+ layer = TFResNetBottleNeckLayer if config.layer_type == "bottleneck" else TFResNetBasicLayer
287
+
288
+ layers = [layer(in_channels, out_channels, stride=stride, activation=config.hidden_act, name="layers.0")]
289
+ layers += [
290
+ layer(out_channels, out_channels, activation=config.hidden_act, name=f"layers.{i + 1}")
291
+ for i in range(depth - 1)
292
+ ]
293
+ self.stage_layers = layers
294
+
295
+ def call(self, hidden_state: tf.Tensor, training: bool = False) -> tf.Tensor:
296
+ for layer in self.stage_layers:
297
+ hidden_state = layer(hidden_state, training=training)
298
+ return hidden_state
299
+
300
+ def build(self, input_shape=None):
301
+ if self.built:
302
+ return
303
+ self.built = True
304
+ if getattr(self, "stage_layers", None) is not None:
305
+ for layer in self.stage_layers:
306
+ with tf.name_scope(layer.name):
307
+ layer.build(None)
308
+
309
+
310
+ class TFResNetEncoder(keras.layers.Layer):
311
+ def __init__(self, config: ResNetConfig, **kwargs) -> None:
312
+ super().__init__(**kwargs)
313
+ # based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
314
+ self.stages = [
315
+ TFResNetStage(
316
+ config,
317
+ config.embedding_size,
318
+ config.hidden_sizes[0],
319
+ stride=2 if config.downsample_in_first_stage else 1,
320
+ depth=config.depths[0],
321
+ name="stages.0",
322
+ )
323
+ ]
324
+ for i, (in_channels, out_channels, depth) in enumerate(
325
+ zip(config.hidden_sizes, config.hidden_sizes[1:], config.depths[1:])
326
+ ):
327
+ self.stages.append(TFResNetStage(config, in_channels, out_channels, depth=depth, name=f"stages.{i + 1}"))
328
+
329
+ def call(
330
+ self,
331
+ hidden_state: tf.Tensor,
332
+ output_hidden_states: bool = False,
333
+ return_dict: bool = True,
334
+ training: bool = False,
335
+ ) -> TFBaseModelOutputWithNoAttention:
336
+ hidden_states = () if output_hidden_states else None
337
+
338
+ for stage_module in self.stages:
339
+ if output_hidden_states:
340
+ hidden_states = hidden_states + (hidden_state,)
341
+
342
+ hidden_state = stage_module(hidden_state, training=training)
343
+
344
+ if output_hidden_states:
345
+ hidden_states = hidden_states + (hidden_state,)
346
+
347
+ if not return_dict:
348
+ return tuple(v for v in [hidden_state, hidden_states] if v is not None)
349
+
350
+ return TFBaseModelOutputWithNoAttention(last_hidden_state=hidden_state, hidden_states=hidden_states)
351
+
352
+ def build(self, input_shape=None):
353
+ if self.built:
354
+ return
355
+ self.built = True
356
+ if getattr(self, "stages", None) is not None:
357
+ for layer in self.stages:
358
+ with tf.name_scope(layer.name):
359
+ layer.build(None)
360
+
361
+
362
+ class TFResNetPreTrainedModel(TFPreTrainedModel):
363
+ """
364
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
365
+ models.
366
+ """
367
+
368
+ config_class = ResNetConfig
369
+ base_model_prefix = "resnet"
370
+ main_input_name = "pixel_values"
371
+
372
+ @property
373
+ def input_signature(self):
374
+ return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224), dtype=tf.float32)}
375
+
376
+
377
+ RESNET_START_DOCSTRING = r"""
378
+ This model is a TensorFlow
379
+ [keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
380
+ regular TensorFlow Module and refer to the TensorFlow documentation for all matter related to general usage and
381
+ behavior.
382
+
383
+ Parameters:
384
+ config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
385
+ Initializing with a config file does not load the weights associated with the model, only the
386
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
387
+ """
388
+
389
+
390
+ RESNET_INPUTS_DOCSTRING = r"""
391
+ Args:
392
+ pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
393
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
394
+ [`ConvNextImageProcessor.__call__`] for details.
395
+
396
+ output_hidden_states (`bool`, *optional*):
397
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
398
+ more detail.
399
+ return_dict (`bool`, *optional*):
400
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
401
+ """
402
+
403
+
404
+ @keras_serializable
405
+ class TFResNetMainLayer(keras.layers.Layer):
406
+ config_class = ResNetConfig
407
+
408
+ def __init__(self, config: ResNetConfig, **kwargs) -> None:
409
+ super().__init__(**kwargs)
410
+ self.config = config
411
+ self.embedder = TFResNetEmbeddings(config, name="embedder")
412
+ self.encoder = TFResNetEncoder(config, name="encoder")
413
+ self.pooler = keras.layers.GlobalAveragePooling2D(keepdims=True)
414
+
415
+ @unpack_inputs
416
+ def call(
417
+ self,
418
+ pixel_values: tf.Tensor,
419
+ output_hidden_states: Optional[bool] = None,
420
+ return_dict: Optional[bool] = None,
421
+ training: bool = False,
422
+ ) -> Union[Tuple[tf.Tensor], TFBaseModelOutputWithPoolingAndNoAttention]:
423
+ output_hidden_states = (
424
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
425
+ )
426
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
427
+
428
+ # TF 2.0 image layers can't use NCHW format when running on CPU.
429
+ # We transpose to NHWC format and then transpose back after the full forward pass.
430
+ # (batch_size, num_channels, height, width) -> (batch_size, height, width, num_channels)
431
+ pixel_values = tf.transpose(pixel_values, perm=[0, 2, 3, 1])
432
+ embedding_output = self.embedder(pixel_values, training=training)
433
+
434
+ encoder_outputs = self.encoder(
435
+ embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training
436
+ )
437
+
438
+ last_hidden_state = encoder_outputs[0]
439
+
440
+ pooled_output = self.pooler(last_hidden_state)
441
+
442
+ # Transpose all the outputs to the NCHW format
443
+ # (batch_size, height, width, num_channels) -> (batch_size, num_channels, height, width)
444
+ last_hidden_state = tf.transpose(last_hidden_state, (0, 3, 1, 2))
445
+ pooled_output = tf.transpose(pooled_output, (0, 3, 1, 2))
446
+ hidden_states = ()
447
+ for hidden_state in encoder_outputs[1:]:
448
+ hidden_states = hidden_states + tuple(tf.transpose(h, (0, 3, 1, 2)) for h in hidden_state)
449
+
450
+ if not return_dict:
451
+ return (last_hidden_state, pooled_output) + hidden_states
452
+
453
+ hidden_states = hidden_states if output_hidden_states else None
454
+
455
+ return TFBaseModelOutputWithPoolingAndNoAttention(
456
+ last_hidden_state=last_hidden_state,
457
+ pooler_output=pooled_output,
458
+ hidden_states=hidden_states,
459
+ )
460
+
461
+ def build(self, input_shape=None):
462
+ if self.built:
463
+ return
464
+ self.built = True
465
+ if getattr(self, "embedder", None) is not None:
466
+ with tf.name_scope(self.embedder.name):
467
+ self.embedder.build(None)
468
+ if getattr(self, "encoder", None) is not None:
469
+ with tf.name_scope(self.encoder.name):
470
+ self.encoder.build(None)
471
+
472
+
473
+ @add_start_docstrings(
474
+ "The bare ResNet model outputting raw features without any specific head on top.",
475
+ RESNET_START_DOCSTRING,
476
+ )
477
+ class TFResNetModel(TFResNetPreTrainedModel):
478
+ def __init__(self, config: ResNetConfig, **kwargs) -> None:
479
+ super().__init__(config, **kwargs)
480
+ self.resnet = TFResNetMainLayer(config=config, name="resnet")
481
+
482
+ @add_start_docstrings_to_model_forward(RESNET_INPUTS_DOCSTRING)
483
+ @add_code_sample_docstrings(
484
+ checkpoint=_CHECKPOINT_FOR_DOC,
485
+ output_type=TFBaseModelOutputWithPoolingAndNoAttention,
486
+ config_class=_CONFIG_FOR_DOC,
487
+ modality="vision",
488
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
489
+ )
490
+ @unpack_inputs
491
+ def call(
492
+ self,
493
+ pixel_values: tf.Tensor,
494
+ output_hidden_states: Optional[bool] = None,
495
+ return_dict: Optional[bool] = None,
496
+ training: bool = False,
497
+ ) -> Union[Tuple[tf.Tensor], TFBaseModelOutputWithPoolingAndNoAttention]:
498
+ output_hidden_states = (
499
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
500
+ )
501
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
502
+
503
+ resnet_outputs = self.resnet(
504
+ pixel_values=pixel_values,
505
+ output_hidden_states=output_hidden_states,
506
+ return_dict=return_dict,
507
+ training=training,
508
+ )
509
+ return resnet_outputs
510
+
511
+ def build(self, input_shape=None):
512
+ if self.built:
513
+ return
514
+ self.built = True
515
+ if getattr(self, "resnet", None) is not None:
516
+ with tf.name_scope(self.resnet.name):
517
+ self.resnet.build(None)
518
+
519
+
520
+ @add_start_docstrings(
521
+ """
522
+ ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
523
+ ImageNet.
524
+ """,
525
+ RESNET_START_DOCSTRING,
526
+ )
527
+ class TFResNetForImageClassification(TFResNetPreTrainedModel, TFSequenceClassificationLoss):
528
+ def __init__(self, config: ResNetConfig, **kwargs) -> None:
529
+ super().__init__(config, **kwargs)
530
+ self.num_labels = config.num_labels
531
+ self.resnet = TFResNetMainLayer(config, name="resnet")
532
+ # classification head
533
+ self.classifier_layer = (
534
+ keras.layers.Dense(config.num_labels, name="classifier.1")
535
+ if config.num_labels > 0
536
+ else keras.layers.Activation("linear", name="classifier.1")
537
+ )
538
+ self.config = config
539
+
540
+ def classifier(self, x: tf.Tensor) -> tf.Tensor:
541
+ x = keras.layers.Flatten()(x)
542
+ logits = self.classifier_layer(x)
543
+ return logits
544
+
545
+ @add_start_docstrings_to_model_forward(RESNET_INPUTS_DOCSTRING)
546
+ @add_code_sample_docstrings(
547
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
548
+ output_type=TFImageClassifierOutputWithNoAttention,
549
+ config_class=_CONFIG_FOR_DOC,
550
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
551
+ )
552
+ @unpack_inputs
553
+ def call(
554
+ self,
555
+ pixel_values: Optional[tf.Tensor] = None,
556
+ labels: Optional[tf.Tensor] = None,
557
+ output_hidden_states: Optional[bool] = None,
558
+ return_dict: Optional[bool] = None,
559
+ training: bool = False,
560
+ ) -> Union[Tuple[tf.Tensor], TFImageClassifierOutputWithNoAttention]:
561
+ r"""
562
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
563
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
564
+ config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
565
+ """
566
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
567
+
568
+ outputs = self.resnet(
569
+ pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training
570
+ )
571
+
572
+ pooled_output = outputs.pooler_output if return_dict else outputs[1]
573
+
574
+ logits = self.classifier(pooled_output)
575
+
576
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
577
+
578
+ if not return_dict:
579
+ output = (logits,) + outputs[2:]
580
+ return (loss,) + output if loss is not None else output
581
+
582
+ return TFImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
583
+
584
+ def build(self, input_shape=None):
585
+ if self.built:
586
+ return
587
+ self.built = True
588
+ if getattr(self, "resnet", None) is not None:
589
+ with tf.name_scope(self.resnet.name):
590
+ self.resnet.build(None)
591
+ if getattr(self, "classifier_layer", None) is not None:
592
+ with tf.name_scope(self.classifier_layer.name):
593
+ self.classifier_layer.build([None, None, self.config.hidden_sizes[-1]])
594
+
595
+
596
+ __all__ = ["TFResNetForImageClassification", "TFResNetModel", "TFResNetPreTrainedModel"]
docs/transformers/build/lib/transformers/models/roberta/__init__.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import _LazyModule
17
+ from ...utils.import_utils import define_import_structure
18
+
19
+
20
+ if TYPE_CHECKING:
21
+ from .configuration_roberta import *
22
+ from .modeling_flax_roberta import *
23
+ from .modeling_roberta import *
24
+ from .modeling_tf_roberta import *
25
+ from .tokenization_roberta import *
26
+ from .tokenization_roberta_fast import *
27
+ else:
28
+ import sys
29
+
30
+ _file = globals()["__file__"]
31
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
docs/transformers/build/lib/transformers/models/roberta/configuration_roberta.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """RoBERTa configuration"""
17
+
18
+ from collections import OrderedDict
19
+ from typing import Mapping
20
+
21
+ from ...configuration_utils import PretrainedConfig
22
+ from ...onnx import OnnxConfig
23
+ from ...utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+
29
+ class RobertaConfig(PretrainedConfig):
30
+ r"""
31
+ This is the configuration class to store the configuration of a [`RobertaModel`] or a [`TFRobertaModel`]. It is
32
+ used to instantiate a RoBERTa model according to the specified arguments, defining the model architecture.
33
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the RoBERTa
34
+ [FacebookAI/roberta-base](https://huggingface.co/FacebookAI/roberta-base) architecture.
35
+
36
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
37
+ documentation from [`PretrainedConfig`] for more information.
38
+
39
+
40
+ Args:
41
+ vocab_size (`int`, *optional*, defaults to 50265):
42
+ Vocabulary size of the RoBERTa model. Defines the number of different tokens that can be represented by the
43
+ `inputs_ids` passed when calling [`RobertaModel`] or [`TFRobertaModel`].
44
+ hidden_size (`int`, *optional*, defaults to 768):
45
+ Dimensionality of the encoder layers and the pooler layer.
46
+ num_hidden_layers (`int`, *optional*, defaults to 12):
47
+ Number of hidden layers in the Transformer encoder.
48
+ num_attention_heads (`int`, *optional*, defaults to 12):
49
+ Number of attention heads for each attention layer in the Transformer encoder.
50
+ intermediate_size (`int`, *optional*, defaults to 3072):
51
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
52
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
53
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
54
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
55
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
56
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
57
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
58
+ The dropout ratio for the attention probabilities.
59
+ max_position_embeddings (`int`, *optional*, defaults to 512):
60
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
61
+ just in case (e.g., 512 or 1024 or 2048).
62
+ type_vocab_size (`int`, *optional*, defaults to 2):
63
+ The vocabulary size of the `token_type_ids` passed when calling [`RobertaModel`] or [`TFRobertaModel`].
64
+ initializer_range (`float`, *optional*, defaults to 0.02):
65
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
66
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
67
+ The epsilon used by the layer normalization layers.
68
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
69
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
70
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
71
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
72
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
73
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
74
+ is_decoder (`bool`, *optional*, defaults to `False`):
75
+ Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
76
+ use_cache (`bool`, *optional*, defaults to `True`):
77
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
78
+ relevant if `config.is_decoder=True`.
79
+ classifier_dropout (`float`, *optional*):
80
+ The dropout ratio for the classification head.
81
+
82
+ Examples:
83
+
84
+ ```python
85
+ >>> from transformers import RobertaConfig, RobertaModel
86
+
87
+ >>> # Initializing a RoBERTa configuration
88
+ >>> configuration = RobertaConfig()
89
+
90
+ >>> # Initializing a model (with random weights) from the configuration
91
+ >>> model = RobertaModel(configuration)
92
+
93
+ >>> # Accessing the model configuration
94
+ >>> configuration = model.config
95
+ ```"""
96
+
97
+ model_type = "roberta"
98
+
99
+ def __init__(
100
+ self,
101
+ vocab_size=50265,
102
+ hidden_size=768,
103
+ num_hidden_layers=12,
104
+ num_attention_heads=12,
105
+ intermediate_size=3072,
106
+ hidden_act="gelu",
107
+ hidden_dropout_prob=0.1,
108
+ attention_probs_dropout_prob=0.1,
109
+ max_position_embeddings=512,
110
+ type_vocab_size=2,
111
+ initializer_range=0.02,
112
+ layer_norm_eps=1e-12,
113
+ pad_token_id=1,
114
+ bos_token_id=0,
115
+ eos_token_id=2,
116
+ position_embedding_type="absolute",
117
+ use_cache=True,
118
+ classifier_dropout=None,
119
+ **kwargs,
120
+ ):
121
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
122
+
123
+ self.vocab_size = vocab_size
124
+ self.hidden_size = hidden_size
125
+ self.num_hidden_layers = num_hidden_layers
126
+ self.num_attention_heads = num_attention_heads
127
+ self.hidden_act = hidden_act
128
+ self.intermediate_size = intermediate_size
129
+ self.hidden_dropout_prob = hidden_dropout_prob
130
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
131
+ self.max_position_embeddings = max_position_embeddings
132
+ self.type_vocab_size = type_vocab_size
133
+ self.initializer_range = initializer_range
134
+ self.layer_norm_eps = layer_norm_eps
135
+ self.position_embedding_type = position_embedding_type
136
+ self.use_cache = use_cache
137
+ self.classifier_dropout = classifier_dropout
138
+
139
+
140
+ class RobertaOnnxConfig(OnnxConfig):
141
+ @property
142
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
143
+ if self.task == "multiple-choice":
144
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
145
+ else:
146
+ dynamic_axis = {0: "batch", 1: "sequence"}
147
+ return OrderedDict(
148
+ [
149
+ ("input_ids", dynamic_axis),
150
+ ("attention_mask", dynamic_axis),
151
+ ]
152
+ )
153
+
154
+
155
+ __all__ = ["RobertaConfig", "RobertaOnnxConfig"]
docs/transformers/build/lib/transformers/models/roberta/convert_roberta_original_pytorch_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert RoBERTa checkpoint."""
16
+
17
+ import argparse
18
+ import pathlib
19
+
20
+ import fairseq
21
+ import torch
22
+ from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
23
+ from fairseq.modules import TransformerSentenceEncoderLayer
24
+ from packaging import version
25
+
26
+ from transformers import RobertaConfig, RobertaForMaskedLM, RobertaForSequenceClassification
27
+ from transformers.models.bert.modeling_bert import (
28
+ BertIntermediate,
29
+ BertLayer,
30
+ BertOutput,
31
+ BertSelfAttention,
32
+ BertSelfOutput,
33
+ )
34
+ from transformers.utils import logging
35
+
36
+
37
+ if version.parse(fairseq.__version__) < version.parse("0.9.0"):
38
+ raise Exception("requires fairseq >= 0.9.0")
39
+
40
+
41
+ logging.set_verbosity_info()
42
+ logger = logging.get_logger(__name__)
43
+
44
+ SAMPLE_TEXT = "Hello world! cécé herlolip"
45
+
46
+
47
+ def convert_roberta_checkpoint_to_pytorch(
48
+ roberta_checkpoint_path: str, pytorch_dump_folder_path: str, classification_head: bool
49
+ ):
50
+ """
51
+ Copy/paste/tweak roberta's weights to our BERT structure.
52
+ """
53
+ roberta = FairseqRobertaModel.from_pretrained(roberta_checkpoint_path)
54
+ roberta.eval() # disable dropout
55
+ roberta_sent_encoder = roberta.model.encoder.sentence_encoder
56
+ config = RobertaConfig(
57
+ vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings,
58
+ hidden_size=roberta.args.encoder_embed_dim,
59
+ num_hidden_layers=roberta.args.encoder_layers,
60
+ num_attention_heads=roberta.args.encoder_attention_heads,
61
+ intermediate_size=roberta.args.encoder_ffn_embed_dim,
62
+ max_position_embeddings=514,
63
+ type_vocab_size=1,
64
+ layer_norm_eps=1e-5, # PyTorch default used in fairseq
65
+ )
66
+ if classification_head:
67
+ config.num_labels = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
68
+ print("Our BERT config:", config)
69
+
70
+ model = RobertaForSequenceClassification(config) if classification_head else RobertaForMaskedLM(config)
71
+ model.eval()
72
+
73
+ # Now let's copy all the weights.
74
+ # Embeddings
75
+ model.roberta.embeddings.word_embeddings.weight = roberta_sent_encoder.embed_tokens.weight
76
+ model.roberta.embeddings.position_embeddings.weight = roberta_sent_encoder.embed_positions.weight
77
+ model.roberta.embeddings.token_type_embeddings.weight.data = torch.zeros_like(
78
+ model.roberta.embeddings.token_type_embeddings.weight
79
+ ) # just zero them out b/c RoBERTa doesn't use them.
80
+ model.roberta.embeddings.LayerNorm.weight = roberta_sent_encoder.emb_layer_norm.weight
81
+ model.roberta.embeddings.LayerNorm.bias = roberta_sent_encoder.emb_layer_norm.bias
82
+
83
+ for i in range(config.num_hidden_layers):
84
+ # Encoder: start of layer
85
+ layer: BertLayer = model.roberta.encoder.layer[i]
86
+ roberta_layer: TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
87
+
88
+ # self attention
89
+ self_attn: BertSelfAttention = layer.attention.self
90
+ assert (
91
+ roberta_layer.self_attn.k_proj.weight.data.shape
92
+ == roberta_layer.self_attn.q_proj.weight.data.shape
93
+ == roberta_layer.self_attn.v_proj.weight.data.shape
94
+ == torch.Size((config.hidden_size, config.hidden_size))
95
+ )
96
+
97
+ self_attn.query.weight.data = roberta_layer.self_attn.q_proj.weight
98
+ self_attn.query.bias.data = roberta_layer.self_attn.q_proj.bias
99
+ self_attn.key.weight.data = roberta_layer.self_attn.k_proj.weight
100
+ self_attn.key.bias.data = roberta_layer.self_attn.k_proj.bias
101
+ self_attn.value.weight.data = roberta_layer.self_attn.v_proj.weight
102
+ self_attn.value.bias.data = roberta_layer.self_attn.v_proj.bias
103
+
104
+ # self-attention output
105
+ self_output: BertSelfOutput = layer.attention.output
106
+ assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
107
+ self_output.dense.weight = roberta_layer.self_attn.out_proj.weight
108
+ self_output.dense.bias = roberta_layer.self_attn.out_proj.bias
109
+ self_output.LayerNorm.weight = roberta_layer.self_attn_layer_norm.weight
110
+ self_output.LayerNorm.bias = roberta_layer.self_attn_layer_norm.bias
111
+
112
+ # intermediate
113
+ intermediate: BertIntermediate = layer.intermediate
114
+ assert intermediate.dense.weight.shape == roberta_layer.fc1.weight.shape
115
+ intermediate.dense.weight = roberta_layer.fc1.weight
116
+ intermediate.dense.bias = roberta_layer.fc1.bias
117
+
118
+ # output
119
+ bert_output: BertOutput = layer.output
120
+ assert bert_output.dense.weight.shape == roberta_layer.fc2.weight.shape
121
+ bert_output.dense.weight = roberta_layer.fc2.weight
122
+ bert_output.dense.bias = roberta_layer.fc2.bias
123
+ bert_output.LayerNorm.weight = roberta_layer.final_layer_norm.weight
124
+ bert_output.LayerNorm.bias = roberta_layer.final_layer_norm.bias
125
+ # end of layer
126
+
127
+ if classification_head:
128
+ model.classifier.dense.weight = roberta.model.classification_heads["mnli"].dense.weight
129
+ model.classifier.dense.bias = roberta.model.classification_heads["mnli"].dense.bias
130
+ model.classifier.out_proj.weight = roberta.model.classification_heads["mnli"].out_proj.weight
131
+ model.classifier.out_proj.bias = roberta.model.classification_heads["mnli"].out_proj.bias
132
+ else:
133
+ # LM Head
134
+ model.lm_head.dense.weight = roberta.model.encoder.lm_head.dense.weight
135
+ model.lm_head.dense.bias = roberta.model.encoder.lm_head.dense.bias
136
+ model.lm_head.layer_norm.weight = roberta.model.encoder.lm_head.layer_norm.weight
137
+ model.lm_head.layer_norm.bias = roberta.model.encoder.lm_head.layer_norm.bias
138
+ model.lm_head.decoder.weight = roberta.model.encoder.lm_head.weight
139
+ model.lm_head.decoder.bias = roberta.model.encoder.lm_head.bias
140
+
141
+ # Let's check that we get the same results.
142
+ input_ids: torch.Tensor = roberta.encode(SAMPLE_TEXT).unsqueeze(0) # batch of size 1
143
+
144
+ our_output = model(input_ids)[0]
145
+ if classification_head:
146
+ their_output = roberta.model.classification_heads["mnli"](roberta.extract_features(input_ids))
147
+ else:
148
+ their_output = roberta.model(input_ids)[0]
149
+ print(our_output.shape, their_output.shape)
150
+ max_absolute_diff = torch.max(torch.abs(our_output - their_output)).item()
151
+ print(f"max_absolute_diff = {max_absolute_diff}") # ~ 1e-7
152
+ success = torch.allclose(our_output, their_output, atol=1e-3)
153
+ print("Do both models output the same tensors?", "🔥" if success else "💩")
154
+ if not success:
155
+ raise Exception("Something went wRoNg")
156
+
157
+ pathlib.Path(pytorch_dump_folder_path).mkdir(parents=True, exist_ok=True)
158
+ print(f"Saving model to {pytorch_dump_folder_path}")
159
+ model.save_pretrained(pytorch_dump_folder_path)
160
+
161
+
162
+ if __name__ == "__main__":
163
+ parser = argparse.ArgumentParser()
164
+ # Required parameters
165
+ parser.add_argument(
166
+ "--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
167
+ )
168
+ parser.add_argument(
169
+ "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
170
+ )
171
+ parser.add_argument(
172
+ "--classification_head", action="store_true", help="Whether to convert a final classification head."
173
+ )
174
+ args = parser.parse_args()
175
+ convert_roberta_checkpoint_to_pytorch(
176
+ args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
177
+ )
docs/transformers/build/lib/transformers/models/roberta/modeling_flax_roberta.py ADDED
@@ -0,0 +1,1500 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Google Flax Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ from typing import Callable, Optional, Tuple
16
+
17
+ import flax.linen as nn
18
+ import jax
19
+ import jax.numpy as jnp
20
+ import numpy as np
21
+ from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
22
+ from flax.linen import combine_masks, make_causal_mask
23
+ from flax.linen import partitioning as nn_partitioning
24
+ from flax.linen.attention import dot_product_attention_weights
25
+ from flax.traverse_util import flatten_dict, unflatten_dict
26
+ from jax import lax
27
+
28
+ from ...modeling_flax_outputs import (
29
+ FlaxBaseModelOutputWithPastAndCrossAttentions,
30
+ FlaxBaseModelOutputWithPooling,
31
+ FlaxBaseModelOutputWithPoolingAndCrossAttentions,
32
+ FlaxCausalLMOutputWithCrossAttentions,
33
+ FlaxMaskedLMOutput,
34
+ FlaxMultipleChoiceModelOutput,
35
+ FlaxQuestionAnsweringModelOutput,
36
+ FlaxSequenceClassifierOutput,
37
+ FlaxTokenClassifierOutput,
38
+ )
39
+ from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring, overwrite_call_docstring
40
+ from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging
41
+ from .configuration_roberta import RobertaConfig
42
+
43
+
44
+ logger = logging.get_logger(__name__)
45
+
46
+ _CHECKPOINT_FOR_DOC = "FacebookAI/roberta-base"
47
+ _CONFIG_FOR_DOC = "RobertaConfig"
48
+
49
+ remat = nn_partitioning.remat
50
+
51
+
52
+ def create_position_ids_from_input_ids(input_ids, padding_idx):
53
+ """
54
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
55
+ are ignored. This is modified from fairseq's `utils.make_positions`.
56
+
57
+ Args:
58
+ input_ids: jnp.ndarray
59
+ padding_idx: int
60
+
61
+ Returns: jnp.ndarray
62
+ """
63
+ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
64
+ mask = (input_ids != padding_idx).astype("i4")
65
+
66
+ if mask.ndim > 2:
67
+ mask = mask.reshape((-1, mask.shape[-1]))
68
+ incremental_indices = jnp.cumsum(mask, axis=1).astype("i4") * mask
69
+ incremental_indices = incremental_indices.reshape(input_ids.shape)
70
+ else:
71
+ incremental_indices = jnp.cumsum(mask, axis=1).astype("i4") * mask
72
+
73
+ return incremental_indices.astype("i4") + padding_idx
74
+
75
+
76
+ ROBERTA_START_DOCSTRING = r"""
77
+
78
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
79
+ library implements for all its model (such as downloading, saving and converting weights from PyTorch models)
80
+
81
+ This model is also a
82
+ [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as
83
+ a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and
84
+ behavior.
85
+
86
+ Finally, this model supports inherent JAX features such as:
87
+
88
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
89
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
90
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
91
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
92
+
93
+ Parameters:
94
+ config ([`RobertaConfig`]): Model configuration class with all the parameters of the
95
+ model. Initializing with a config file does not load the weights associated with the model, only the
96
+ configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
97
+ """
98
+
99
+ ROBERTA_INPUTS_DOCSTRING = r"""
100
+ Args:
101
+ input_ids (`numpy.ndarray` of shape `({0})`):
102
+ Indices of input sequence tokens in the vocabulary.
103
+
104
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
105
+ [`PreTrainedTokenizer.__call__`] for details.
106
+
107
+ [What are input IDs?](../glossary#input-ids)
108
+ attention_mask (`numpy.ndarray` of shape `({0})`, *optional*):
109
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
110
+
111
+ - 1 for tokens that are **not masked**,
112
+ - 0 for tokens that are **masked**.
113
+
114
+ [What are attention masks?](../glossary#attention-mask)
115
+ token_type_ids (`numpy.ndarray` of shape `({0})`, *optional*):
116
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
117
+ 1]`:
118
+
119
+ - 0 corresponds to a *sentence A* token,
120
+ - 1 corresponds to a *sentence B* token.
121
+
122
+ [What are token type IDs?](../glossary#token-type-ids)
123
+ position_ids (`numpy.ndarray` of shape `({0})`, *optional*):
124
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
125
+ config.max_position_embeddings - 1]`.
126
+ head_mask (`numpy.ndarray` of shape `({0})`, `optional):
127
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
128
+
129
+ - 1 indicates the head is **not masked**,
130
+ - 0 indicates the head is **masked**.
131
+
132
+ return_dict (`bool`, *optional*):
133
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
134
+ """
135
+
136
+
137
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertEmbeddings with Bert->Roberta
138
+ class FlaxRobertaEmbeddings(nn.Module):
139
+ """Construct the embeddings from word, position and token_type embeddings."""
140
+
141
+ config: RobertaConfig
142
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
143
+
144
+ def setup(self):
145
+ self.word_embeddings = nn.Embed(
146
+ self.config.vocab_size,
147
+ self.config.hidden_size,
148
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
149
+ dtype=self.dtype,
150
+ )
151
+ self.position_embeddings = nn.Embed(
152
+ self.config.max_position_embeddings,
153
+ self.config.hidden_size,
154
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
155
+ dtype=self.dtype,
156
+ )
157
+ self.token_type_embeddings = nn.Embed(
158
+ self.config.type_vocab_size,
159
+ self.config.hidden_size,
160
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
161
+ dtype=self.dtype,
162
+ )
163
+ self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
164
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
165
+
166
+ def __call__(self, input_ids, token_type_ids, position_ids, attention_mask, deterministic: bool = True):
167
+ # Embed
168
+ inputs_embeds = self.word_embeddings(input_ids.astype("i4"))
169
+ position_embeds = self.position_embeddings(position_ids.astype("i4"))
170
+ token_type_embeddings = self.token_type_embeddings(token_type_ids.astype("i4"))
171
+
172
+ # Sum all embeddings
173
+ hidden_states = inputs_embeds + token_type_embeddings + position_embeds
174
+
175
+ # Layer Norm
176
+ hidden_states = self.LayerNorm(hidden_states)
177
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
178
+ return hidden_states
179
+
180
+
181
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertSelfAttention with Bert->Roberta
182
+ class FlaxRobertaSelfAttention(nn.Module):
183
+ config: RobertaConfig
184
+ causal: bool = False
185
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
186
+
187
+ def setup(self):
188
+ self.head_dim = self.config.hidden_size // self.config.num_attention_heads
189
+ if self.config.hidden_size % self.config.num_attention_heads != 0:
190
+ raise ValueError(
191
+ "`config.hidden_size`: {self.config.hidden_size} has to be a multiple of `config.num_attention_heads` "
192
+ " : {self.config.num_attention_heads}"
193
+ )
194
+
195
+ self.query = nn.Dense(
196
+ self.config.hidden_size,
197
+ dtype=self.dtype,
198
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
199
+ )
200
+ self.key = nn.Dense(
201
+ self.config.hidden_size,
202
+ dtype=self.dtype,
203
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
204
+ )
205
+ self.value = nn.Dense(
206
+ self.config.hidden_size,
207
+ dtype=self.dtype,
208
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
209
+ )
210
+
211
+ if self.causal:
212
+ self.causal_mask = make_causal_mask(
213
+ jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool"
214
+ )
215
+
216
+ def _split_heads(self, hidden_states):
217
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.config.num_attention_heads, self.head_dim))
218
+
219
+ def _merge_heads(self, hidden_states):
220
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.config.hidden_size,))
221
+
222
+ @nn.compact
223
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartAttention._concatenate_to_cache
224
+ def _concatenate_to_cache(self, key, value, query, attention_mask):
225
+ """
226
+ This function takes projected key, value states from a single input token and concatenates the states to cached
227
+ states from previous steps. This function is slightly adapted from the official Flax repository:
228
+ https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
229
+ """
230
+ # detect if we're initializing by absence of existing cache data.
231
+ is_initialized = self.has_variable("cache", "cached_key")
232
+ cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
233
+ cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
234
+ cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
235
+
236
+ if is_initialized:
237
+ *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
238
+ # update key, value caches with our new 1d spatial slices
239
+ cur_index = cache_index.value
240
+ indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
241
+ key = lax.dynamic_update_slice(cached_key.value, key, indices)
242
+ value = lax.dynamic_update_slice(cached_value.value, value, indices)
243
+ cached_key.value = key
244
+ cached_value.value = value
245
+ num_updated_cache_vectors = query.shape[1]
246
+ cache_index.value = cache_index.value + num_updated_cache_vectors
247
+ # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
248
+ pad_mask = jnp.broadcast_to(
249
+ jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
250
+ tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
251
+ )
252
+ attention_mask = combine_masks(pad_mask, attention_mask)
253
+ return key, value, attention_mask
254
+
255
+ def __call__(
256
+ self,
257
+ hidden_states,
258
+ attention_mask,
259
+ layer_head_mask,
260
+ key_value_states: Optional[jnp.ndarray] = None,
261
+ init_cache: bool = False,
262
+ deterministic=True,
263
+ output_attentions: bool = False,
264
+ ):
265
+ # if key_value_states are provided this layer is used as a cross-attention layer
266
+ # for the decoder
267
+ is_cross_attention = key_value_states is not None
268
+ batch_size = hidden_states.shape[0]
269
+
270
+ # get query proj
271
+ query_states = self.query(hidden_states)
272
+ # get key, value proj
273
+ if is_cross_attention:
274
+ # cross_attentions
275
+ key_states = self.key(key_value_states)
276
+ value_states = self.value(key_value_states)
277
+ else:
278
+ # self_attention
279
+ key_states = self.key(hidden_states)
280
+ value_states = self.value(hidden_states)
281
+
282
+ query_states = self._split_heads(query_states)
283
+ key_states = self._split_heads(key_states)
284
+ value_states = self._split_heads(value_states)
285
+
286
+ # handle cache prepare causal attention mask
287
+ if self.causal:
288
+ query_length, key_length = query_states.shape[1], key_states.shape[1]
289
+ if self.has_variable("cache", "cached_key"):
290
+ mask_shift = self.variables["cache"]["cache_index"]
291
+ max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
292
+ causal_mask = lax.dynamic_slice(
293
+ self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)
294
+ )
295
+ else:
296
+ causal_mask = self.causal_mask[:, :, :query_length, :key_length]
297
+ causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:])
298
+
299
+ # combine masks if needed
300
+ if attention_mask is not None and self.causal:
301
+ attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape)
302
+ attention_mask = combine_masks(attention_mask, causal_mask)
303
+ elif self.causal:
304
+ attention_mask = causal_mask
305
+ elif attention_mask is not None:
306
+ attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
307
+
308
+ # During fast autoregressive decoding, we feed one position at a time,
309
+ # and cache the keys and values step by step.
310
+ if self.causal and (self.has_variable("cache", "cached_key") or init_cache):
311
+ key_states, value_states, attention_mask = self._concatenate_to_cache(
312
+ key_states, value_states, query_states, attention_mask
313
+ )
314
+
315
+ # Convert the boolean attention mask to an attention bias.
316
+ if attention_mask is not None:
317
+ # attention mask in the form of attention bias
318
+ attention_bias = lax.select(
319
+ attention_mask > 0,
320
+ jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
321
+ jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype),
322
+ )
323
+ else:
324
+ attention_bias = None
325
+
326
+ dropout_rng = None
327
+ if not deterministic and self.config.attention_probs_dropout_prob > 0.0:
328
+ dropout_rng = self.make_rng("dropout")
329
+
330
+ attn_weights = dot_product_attention_weights(
331
+ query_states,
332
+ key_states,
333
+ bias=attention_bias,
334
+ dropout_rng=dropout_rng,
335
+ dropout_rate=self.config.attention_probs_dropout_prob,
336
+ broadcast_dropout=True,
337
+ deterministic=deterministic,
338
+ dtype=self.dtype,
339
+ precision=None,
340
+ )
341
+
342
+ # Mask heads if we want to
343
+ if layer_head_mask is not None:
344
+ attn_weights = jnp.einsum("...hqk,h->...hqk", attn_weights, layer_head_mask)
345
+
346
+ attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
347
+ attn_output = attn_output.reshape(attn_output.shape[:2] + (-1,))
348
+
349
+ outputs = (attn_output, attn_weights) if output_attentions else (attn_output,)
350
+ return outputs
351
+
352
+
353
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertSelfOutput with Bert->Roberta
354
+ class FlaxRobertaSelfOutput(nn.Module):
355
+ config: RobertaConfig
356
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
357
+
358
+ def setup(self):
359
+ self.dense = nn.Dense(
360
+ self.config.hidden_size,
361
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
362
+ dtype=self.dtype,
363
+ )
364
+ self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
365
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
366
+
367
+ def __call__(self, hidden_states, input_tensor, deterministic: bool = True):
368
+ hidden_states = self.dense(hidden_states)
369
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
370
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
371
+ return hidden_states
372
+
373
+
374
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertAttention with Bert->Roberta
375
+ class FlaxRobertaAttention(nn.Module):
376
+ config: RobertaConfig
377
+ causal: bool = False
378
+ dtype: jnp.dtype = jnp.float32
379
+
380
+ def setup(self):
381
+ self.self = FlaxRobertaSelfAttention(self.config, causal=self.causal, dtype=self.dtype)
382
+ self.output = FlaxRobertaSelfOutput(self.config, dtype=self.dtype)
383
+
384
+ def __call__(
385
+ self,
386
+ hidden_states,
387
+ attention_mask,
388
+ layer_head_mask,
389
+ key_value_states=None,
390
+ init_cache=False,
391
+ deterministic=True,
392
+ output_attentions: bool = False,
393
+ ):
394
+ # Attention mask comes in as attention_mask.shape == (*batch_sizes, kv_length)
395
+ # FLAX expects: attention_mask.shape == (*batch_sizes, 1, 1, kv_length) such that it is broadcastable
396
+ # with attn_weights.shape == (*batch_sizes, num_heads, q_length, kv_length)
397
+ attn_outputs = self.self(
398
+ hidden_states,
399
+ attention_mask,
400
+ layer_head_mask=layer_head_mask,
401
+ key_value_states=key_value_states,
402
+ init_cache=init_cache,
403
+ deterministic=deterministic,
404
+ output_attentions=output_attentions,
405
+ )
406
+ attn_output = attn_outputs[0]
407
+ hidden_states = self.output(attn_output, hidden_states, deterministic=deterministic)
408
+
409
+ outputs = (hidden_states,)
410
+
411
+ if output_attentions:
412
+ outputs += (attn_outputs[1],)
413
+
414
+ return outputs
415
+
416
+
417
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertIntermediate with Bert->Roberta
418
+ class FlaxRobertaIntermediate(nn.Module):
419
+ config: RobertaConfig
420
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
421
+
422
+ def setup(self):
423
+ self.dense = nn.Dense(
424
+ self.config.intermediate_size,
425
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
426
+ dtype=self.dtype,
427
+ )
428
+ self.activation = ACT2FN[self.config.hidden_act]
429
+
430
+ def __call__(self, hidden_states):
431
+ hidden_states = self.dense(hidden_states)
432
+ hidden_states = self.activation(hidden_states)
433
+ return hidden_states
434
+
435
+
436
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertOutput with Bert->Roberta
437
+ class FlaxRobertaOutput(nn.Module):
438
+ config: RobertaConfig
439
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
440
+
441
+ def setup(self):
442
+ self.dense = nn.Dense(
443
+ self.config.hidden_size,
444
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
445
+ dtype=self.dtype,
446
+ )
447
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
448
+ self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
449
+
450
+ def __call__(self, hidden_states, attention_output, deterministic: bool = True):
451
+ hidden_states = self.dense(hidden_states)
452
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
453
+ hidden_states = self.LayerNorm(hidden_states + attention_output)
454
+ return hidden_states
455
+
456
+
457
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertLayer with Bert->Roberta
458
+ class FlaxRobertaLayer(nn.Module):
459
+ config: RobertaConfig
460
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
461
+
462
+ def setup(self):
463
+ self.attention = FlaxRobertaAttention(self.config, causal=self.config.is_decoder, dtype=self.dtype)
464
+ self.intermediate = FlaxRobertaIntermediate(self.config, dtype=self.dtype)
465
+ self.output = FlaxRobertaOutput(self.config, dtype=self.dtype)
466
+ if self.config.add_cross_attention:
467
+ self.crossattention = FlaxRobertaAttention(self.config, causal=False, dtype=self.dtype)
468
+
469
+ def __call__(
470
+ self,
471
+ hidden_states,
472
+ attention_mask,
473
+ layer_head_mask,
474
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
475
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
476
+ init_cache: bool = False,
477
+ deterministic: bool = True,
478
+ output_attentions: bool = False,
479
+ ):
480
+ # Self Attention
481
+ attention_outputs = self.attention(
482
+ hidden_states,
483
+ attention_mask,
484
+ layer_head_mask=layer_head_mask,
485
+ init_cache=init_cache,
486
+ deterministic=deterministic,
487
+ output_attentions=output_attentions,
488
+ )
489
+ attention_output = attention_outputs[0]
490
+
491
+ # Cross-Attention Block
492
+ if encoder_hidden_states is not None:
493
+ cross_attention_outputs = self.crossattention(
494
+ attention_output,
495
+ attention_mask=encoder_attention_mask,
496
+ layer_head_mask=layer_head_mask,
497
+ key_value_states=encoder_hidden_states,
498
+ deterministic=deterministic,
499
+ output_attentions=output_attentions,
500
+ )
501
+ attention_output = cross_attention_outputs[0]
502
+
503
+ hidden_states = self.intermediate(attention_output)
504
+ hidden_states = self.output(hidden_states, attention_output, deterministic=deterministic)
505
+
506
+ outputs = (hidden_states,)
507
+
508
+ if output_attentions:
509
+ outputs += (attention_outputs[1],)
510
+ if encoder_hidden_states is not None:
511
+ outputs += (cross_attention_outputs[1],)
512
+ return outputs
513
+
514
+
515
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertLayerCollection with Bert->Roberta
516
+ class FlaxRobertaLayerCollection(nn.Module):
517
+ config: RobertaConfig
518
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
519
+ gradient_checkpointing: bool = False
520
+
521
+ def setup(self):
522
+ if self.gradient_checkpointing:
523
+ FlaxRobertaCheckpointLayer = remat(FlaxRobertaLayer, static_argnums=(5, 6, 7))
524
+ self.layers = [
525
+ FlaxRobertaCheckpointLayer(self.config, name=str(i), dtype=self.dtype)
526
+ for i in range(self.config.num_hidden_layers)
527
+ ]
528
+ else:
529
+ self.layers = [
530
+ FlaxRobertaLayer(self.config, name=str(i), dtype=self.dtype)
531
+ for i in range(self.config.num_hidden_layers)
532
+ ]
533
+
534
+ def __call__(
535
+ self,
536
+ hidden_states,
537
+ attention_mask,
538
+ head_mask,
539
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
540
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
541
+ init_cache: bool = False,
542
+ deterministic: bool = True,
543
+ output_attentions: bool = False,
544
+ output_hidden_states: bool = False,
545
+ return_dict: bool = True,
546
+ ):
547
+ all_attentions = () if output_attentions else None
548
+ all_hidden_states = () if output_hidden_states else None
549
+ all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
550
+
551
+ # Check if head_mask has a correct number of layers specified if desired
552
+ if head_mask is not None:
553
+ if head_mask.shape[0] != (len(self.layers)):
554
+ raise ValueError(
555
+ f"The head_mask should be specified for {len(self.layers)} layers, but it is for "
556
+ f" {head_mask.shape[0]}."
557
+ )
558
+
559
+ for i, layer in enumerate(self.layers):
560
+ if output_hidden_states:
561
+ all_hidden_states += (hidden_states,)
562
+
563
+ layer_outputs = layer(
564
+ hidden_states,
565
+ attention_mask,
566
+ head_mask[i] if head_mask is not None else None,
567
+ encoder_hidden_states,
568
+ encoder_attention_mask,
569
+ init_cache,
570
+ deterministic,
571
+ output_attentions,
572
+ )
573
+
574
+ hidden_states = layer_outputs[0]
575
+
576
+ if output_attentions:
577
+ all_attentions += (layer_outputs[1],)
578
+
579
+ if encoder_hidden_states is not None:
580
+ all_cross_attentions += (layer_outputs[2],)
581
+
582
+ if output_hidden_states:
583
+ all_hidden_states += (hidden_states,)
584
+
585
+ outputs = (hidden_states, all_hidden_states, all_attentions, all_cross_attentions)
586
+
587
+ if not return_dict:
588
+ return tuple(v for v in outputs if v is not None)
589
+
590
+ return FlaxBaseModelOutputWithPastAndCrossAttentions(
591
+ last_hidden_state=hidden_states,
592
+ hidden_states=all_hidden_states,
593
+ attentions=all_attentions,
594
+ cross_attentions=all_cross_attentions,
595
+ )
596
+
597
+
598
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertEncoder with Bert->Roberta
599
+ class FlaxRobertaEncoder(nn.Module):
600
+ config: RobertaConfig
601
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
602
+ gradient_checkpointing: bool = False
603
+
604
+ def setup(self):
605
+ self.layer = FlaxRobertaLayerCollection(
606
+ self.config,
607
+ dtype=self.dtype,
608
+ gradient_checkpointing=self.gradient_checkpointing,
609
+ )
610
+
611
+ def __call__(
612
+ self,
613
+ hidden_states,
614
+ attention_mask,
615
+ head_mask,
616
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
617
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
618
+ init_cache: bool = False,
619
+ deterministic: bool = True,
620
+ output_attentions: bool = False,
621
+ output_hidden_states: bool = False,
622
+ return_dict: bool = True,
623
+ ):
624
+ return self.layer(
625
+ hidden_states,
626
+ attention_mask,
627
+ head_mask=head_mask,
628
+ encoder_hidden_states=encoder_hidden_states,
629
+ encoder_attention_mask=encoder_attention_mask,
630
+ init_cache=init_cache,
631
+ deterministic=deterministic,
632
+ output_attentions=output_attentions,
633
+ output_hidden_states=output_hidden_states,
634
+ return_dict=return_dict,
635
+ )
636
+
637
+
638
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertPooler with Bert->Roberta
639
+ class FlaxRobertaPooler(nn.Module):
640
+ config: RobertaConfig
641
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
642
+
643
+ def setup(self):
644
+ self.dense = nn.Dense(
645
+ self.config.hidden_size,
646
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
647
+ dtype=self.dtype,
648
+ )
649
+
650
+ def __call__(self, hidden_states):
651
+ cls_hidden_state = hidden_states[:, 0]
652
+ cls_hidden_state = self.dense(cls_hidden_state)
653
+ return nn.tanh(cls_hidden_state)
654
+
655
+
656
+ class FlaxRobertaLMHead(nn.Module):
657
+ config: RobertaConfig
658
+ dtype: jnp.dtype = jnp.float32
659
+ bias_init: Callable[..., np.ndarray] = jax.nn.initializers.zeros
660
+
661
+ def setup(self):
662
+ self.dense = nn.Dense(
663
+ self.config.hidden_size,
664
+ dtype=self.dtype,
665
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
666
+ )
667
+ self.layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
668
+ self.decoder = nn.Dense(
669
+ self.config.vocab_size,
670
+ dtype=self.dtype,
671
+ use_bias=False,
672
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
673
+ )
674
+ self.bias = self.param("bias", self.bias_init, (self.config.vocab_size,))
675
+
676
+ def __call__(self, hidden_states, shared_embedding=None):
677
+ hidden_states = self.dense(hidden_states)
678
+ hidden_states = ACT2FN["gelu"](hidden_states)
679
+ hidden_states = self.layer_norm(hidden_states)
680
+
681
+ if shared_embedding is not None:
682
+ hidden_states = self.decoder.apply({"params": {"kernel": shared_embedding.T}}, hidden_states)
683
+ else:
684
+ hidden_states = self.decoder(hidden_states)
685
+
686
+ bias = jnp.asarray(self.bias, self.dtype)
687
+ hidden_states += bias
688
+ return hidden_states
689
+
690
+
691
+ class FlaxRobertaClassificationHead(nn.Module):
692
+ config: RobertaConfig
693
+ dtype: jnp.dtype = jnp.float32
694
+
695
+ def setup(self):
696
+ self.dense = nn.Dense(
697
+ self.config.hidden_size,
698
+ dtype=self.dtype,
699
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
700
+ )
701
+ classifier_dropout = (
702
+ self.config.classifier_dropout
703
+ if self.config.classifier_dropout is not None
704
+ else self.config.hidden_dropout_prob
705
+ )
706
+ self.dropout = nn.Dropout(rate=classifier_dropout)
707
+ self.out_proj = nn.Dense(
708
+ self.config.num_labels,
709
+ dtype=self.dtype,
710
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
711
+ )
712
+
713
+ def __call__(self, hidden_states, deterministic=True):
714
+ hidden_states = hidden_states[:, 0, :] # take <s> token (equiv. to [CLS])
715
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
716
+ hidden_states = self.dense(hidden_states)
717
+ hidden_states = nn.tanh(hidden_states)
718
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
719
+ hidden_states = self.out_proj(hidden_states)
720
+ return hidden_states
721
+
722
+
723
+ class FlaxRobertaPreTrainedModel(FlaxPreTrainedModel):
724
+ """
725
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
726
+ models.
727
+ """
728
+
729
+ config_class = RobertaConfig
730
+ base_model_prefix = "roberta"
731
+
732
+ module_class: nn.Module = None
733
+
734
+ def __init__(
735
+ self,
736
+ config: RobertaConfig,
737
+ input_shape: Tuple = (1, 1),
738
+ seed: int = 0,
739
+ dtype: jnp.dtype = jnp.float32,
740
+ _do_init: bool = True,
741
+ gradient_checkpointing: bool = False,
742
+ **kwargs,
743
+ ):
744
+ module = self.module_class(config=config, dtype=dtype, gradient_checkpointing=gradient_checkpointing, **kwargs)
745
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
746
+
747
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertPreTrainedModel.enable_gradient_checkpointing
748
+ def enable_gradient_checkpointing(self):
749
+ self._module = self.module_class(
750
+ config=self.config,
751
+ dtype=self.dtype,
752
+ gradient_checkpointing=True,
753
+ )
754
+
755
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
756
+ # init input tensors
757
+ input_ids = jnp.zeros(input_shape, dtype="i4")
758
+ token_type_ids = jnp.ones_like(input_ids)
759
+ position_ids = create_position_ids_from_input_ids(input_ids, self.config.pad_token_id)
760
+ attention_mask = jnp.ones_like(input_ids)
761
+ head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads))
762
+
763
+ params_rng, dropout_rng = jax.random.split(rng)
764
+ rngs = {"params": params_rng, "dropout": dropout_rng}
765
+
766
+ if self.config.add_cross_attention:
767
+ encoder_hidden_states = jnp.zeros(input_shape + (self.config.hidden_size,))
768
+ encoder_attention_mask = attention_mask
769
+ module_init_outputs = self.module.init(
770
+ rngs,
771
+ input_ids,
772
+ attention_mask,
773
+ token_type_ids,
774
+ position_ids,
775
+ head_mask,
776
+ encoder_hidden_states,
777
+ encoder_attention_mask,
778
+ return_dict=False,
779
+ )
780
+ else:
781
+ module_init_outputs = self.module.init(
782
+ rngs, input_ids, attention_mask, token_type_ids, position_ids, head_mask, return_dict=False
783
+ )
784
+
785
+ random_params = module_init_outputs["params"]
786
+
787
+ if params is not None:
788
+ random_params = flatten_dict(unfreeze(random_params))
789
+ params = flatten_dict(unfreeze(params))
790
+ for missing_key in self._missing_keys:
791
+ params[missing_key] = random_params[missing_key]
792
+ self._missing_keys = set()
793
+ return freeze(unflatten_dict(params))
794
+ else:
795
+ return random_params
796
+
797
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartDecoderPreTrainedModel.init_cache
798
+ def init_cache(self, batch_size, max_length):
799
+ r"""
800
+ Args:
801
+ batch_size (`int`):
802
+ batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
803
+ max_length (`int`):
804
+ maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
805
+ cache.
806
+ """
807
+ # init input variables to retrieve cache
808
+ input_ids = jnp.ones((batch_size, max_length), dtype="i4")
809
+ attention_mask = jnp.ones_like(input_ids, dtype="i4")
810
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
811
+
812
+ init_variables = self.module.init(
813
+ jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True
814
+ )
815
+ return unfreeze(init_variables["cache"])
816
+
817
+ @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
818
+ def __call__(
819
+ self,
820
+ input_ids,
821
+ attention_mask=None,
822
+ token_type_ids=None,
823
+ position_ids=None,
824
+ head_mask=None,
825
+ encoder_hidden_states=None,
826
+ encoder_attention_mask=None,
827
+ params: dict = None,
828
+ dropout_rng: jax.random.PRNGKey = None,
829
+ train: bool = False,
830
+ output_attentions: Optional[bool] = None,
831
+ output_hidden_states: Optional[bool] = None,
832
+ return_dict: Optional[bool] = None,
833
+ past_key_values: dict = None,
834
+ ):
835
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
836
+ output_hidden_states = (
837
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
838
+ )
839
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
840
+
841
+ # init input tensors if not passed
842
+ if token_type_ids is None:
843
+ token_type_ids = jnp.zeros_like(input_ids)
844
+
845
+ if position_ids is None:
846
+ position_ids = create_position_ids_from_input_ids(input_ids, self.config.pad_token_id)
847
+
848
+ if attention_mask is None:
849
+ attention_mask = jnp.ones_like(input_ids)
850
+
851
+ if head_mask is None:
852
+ head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads))
853
+
854
+ # Handle any PRNG if needed
855
+ rngs = {}
856
+ if dropout_rng is not None:
857
+ rngs["dropout"] = dropout_rng
858
+
859
+ inputs = {"params": params or self.params}
860
+
861
+ if self.config.add_cross_attention:
862
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed
863
+ # down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be
864
+ # changed by FlaxRobertaAttention module
865
+ if past_key_values:
866
+ inputs["cache"] = past_key_values
867
+ mutable = ["cache"]
868
+ else:
869
+ mutable = False
870
+
871
+ outputs = self.module.apply(
872
+ inputs,
873
+ jnp.array(input_ids, dtype="i4"),
874
+ jnp.array(attention_mask, dtype="i4"),
875
+ token_type_ids=jnp.array(token_type_ids, dtype="i4"),
876
+ position_ids=jnp.array(position_ids, dtype="i4"),
877
+ head_mask=jnp.array(head_mask, dtype="i4"),
878
+ encoder_hidden_states=encoder_hidden_states,
879
+ encoder_attention_mask=encoder_attention_mask,
880
+ deterministic=not train,
881
+ output_attentions=output_attentions,
882
+ output_hidden_states=output_hidden_states,
883
+ return_dict=return_dict,
884
+ rngs=rngs,
885
+ mutable=mutable,
886
+ )
887
+
888
+ # add updated cache to model output
889
+ if past_key_values is not None and return_dict:
890
+ outputs, past_key_values = outputs
891
+ outputs["past_key_values"] = unfreeze(past_key_values["cache"])
892
+ return outputs
893
+ elif past_key_values is not None and not return_dict:
894
+ outputs, past_key_values = outputs
895
+ outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:]
896
+
897
+ else:
898
+ outputs = self.module.apply(
899
+ inputs,
900
+ jnp.array(input_ids, dtype="i4"),
901
+ jnp.array(attention_mask, dtype="i4"),
902
+ token_type_ids=jnp.array(token_type_ids, dtype="i4"),
903
+ position_ids=jnp.array(position_ids, dtype="i4"),
904
+ head_mask=jnp.array(head_mask, dtype="i4"),
905
+ deterministic=not train,
906
+ output_attentions=output_attentions,
907
+ output_hidden_states=output_hidden_states,
908
+ return_dict=return_dict,
909
+ rngs=rngs,
910
+ )
911
+
912
+ return outputs
913
+
914
+
915
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertModule with Bert->Roberta
916
+ class FlaxRobertaModule(nn.Module):
917
+ config: RobertaConfig
918
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
919
+ add_pooling_layer: bool = True
920
+ gradient_checkpointing: bool = False
921
+
922
+ def setup(self):
923
+ self.embeddings = FlaxRobertaEmbeddings(self.config, dtype=self.dtype)
924
+ self.encoder = FlaxRobertaEncoder(
925
+ self.config,
926
+ dtype=self.dtype,
927
+ gradient_checkpointing=self.gradient_checkpointing,
928
+ )
929
+ self.pooler = FlaxRobertaPooler(self.config, dtype=self.dtype)
930
+
931
+ def __call__(
932
+ self,
933
+ input_ids,
934
+ attention_mask,
935
+ token_type_ids: Optional[jnp.ndarray] = None,
936
+ position_ids: Optional[jnp.ndarray] = None,
937
+ head_mask: Optional[jnp.ndarray] = None,
938
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
939
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
940
+ init_cache: bool = False,
941
+ deterministic: bool = True,
942
+ output_attentions: bool = False,
943
+ output_hidden_states: bool = False,
944
+ return_dict: bool = True,
945
+ ):
946
+ # make sure `token_type_ids` is correctly initialized when not passed
947
+ if token_type_ids is None:
948
+ token_type_ids = jnp.zeros_like(input_ids)
949
+
950
+ # make sure `position_ids` is correctly initialized when not passed
951
+ if position_ids is None:
952
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
953
+
954
+ hidden_states = self.embeddings(
955
+ input_ids, token_type_ids, position_ids, attention_mask, deterministic=deterministic
956
+ )
957
+ outputs = self.encoder(
958
+ hidden_states,
959
+ attention_mask,
960
+ head_mask=head_mask,
961
+ deterministic=deterministic,
962
+ encoder_hidden_states=encoder_hidden_states,
963
+ encoder_attention_mask=encoder_attention_mask,
964
+ init_cache=init_cache,
965
+ output_attentions=output_attentions,
966
+ output_hidden_states=output_hidden_states,
967
+ return_dict=return_dict,
968
+ )
969
+ hidden_states = outputs[0]
970
+ pooled = self.pooler(hidden_states) if self.add_pooling_layer else None
971
+
972
+ if not return_dict:
973
+ # if pooled is None, don't return it
974
+ if pooled is None:
975
+ return (hidden_states,) + outputs[1:]
976
+ return (hidden_states, pooled) + outputs[1:]
977
+
978
+ return FlaxBaseModelOutputWithPoolingAndCrossAttentions(
979
+ last_hidden_state=hidden_states,
980
+ pooler_output=pooled,
981
+ hidden_states=outputs.hidden_states,
982
+ attentions=outputs.attentions,
983
+ cross_attentions=outputs.cross_attentions,
984
+ )
985
+
986
+
987
+ @add_start_docstrings(
988
+ "The bare RoBERTa Model transformer outputting raw hidden-states without any specific head on top.",
989
+ ROBERTA_START_DOCSTRING,
990
+ )
991
+ class FlaxRobertaModel(FlaxRobertaPreTrainedModel):
992
+ module_class = FlaxRobertaModule
993
+
994
+
995
+ append_call_sample_docstring(FlaxRobertaModel, _CHECKPOINT_FOR_DOC, FlaxBaseModelOutputWithPooling, _CONFIG_FOR_DOC)
996
+
997
+
998
+ class FlaxRobertaForMaskedLMModule(nn.Module):
999
+ config: RobertaConfig
1000
+ dtype: jnp.dtype = jnp.float32
1001
+ gradient_checkpointing: bool = False
1002
+
1003
+ def setup(self):
1004
+ self.roberta = FlaxRobertaModule(
1005
+ config=self.config,
1006
+ add_pooling_layer=False,
1007
+ dtype=self.dtype,
1008
+ gradient_checkpointing=self.gradient_checkpointing,
1009
+ )
1010
+ self.lm_head = FlaxRobertaLMHead(config=self.config, dtype=self.dtype)
1011
+
1012
+ def __call__(
1013
+ self,
1014
+ input_ids,
1015
+ attention_mask,
1016
+ token_type_ids,
1017
+ position_ids,
1018
+ head_mask,
1019
+ deterministic: bool = True,
1020
+ output_attentions: bool = False,
1021
+ output_hidden_states: bool = False,
1022
+ return_dict: bool = True,
1023
+ ):
1024
+ # Model
1025
+ outputs = self.roberta(
1026
+ input_ids,
1027
+ attention_mask,
1028
+ token_type_ids,
1029
+ position_ids,
1030
+ head_mask,
1031
+ deterministic=deterministic,
1032
+ output_attentions=output_attentions,
1033
+ output_hidden_states=output_hidden_states,
1034
+ return_dict=return_dict,
1035
+ )
1036
+
1037
+ hidden_states = outputs[0]
1038
+ if self.config.tie_word_embeddings:
1039
+ shared_embedding = self.roberta.variables["params"]["embeddings"]["word_embeddings"]["embedding"]
1040
+ else:
1041
+ shared_embedding = None
1042
+
1043
+ # Compute the prediction scores
1044
+ logits = self.lm_head(hidden_states, shared_embedding=shared_embedding)
1045
+
1046
+ if not return_dict:
1047
+ return (logits,) + outputs[1:]
1048
+
1049
+ return FlaxMaskedLMOutput(
1050
+ logits=logits,
1051
+ hidden_states=outputs.hidden_states,
1052
+ attentions=outputs.attentions,
1053
+ )
1054
+
1055
+
1056
+ @add_start_docstrings("""RoBERTa Model with a `language modeling` head on top.""", ROBERTA_START_DOCSTRING)
1057
+ class FlaxRobertaForMaskedLM(FlaxRobertaPreTrainedModel):
1058
+ module_class = FlaxRobertaForMaskedLMModule
1059
+
1060
+
1061
+ append_call_sample_docstring(
1062
+ FlaxRobertaForMaskedLM,
1063
+ _CHECKPOINT_FOR_DOC,
1064
+ FlaxBaseModelOutputWithPooling,
1065
+ _CONFIG_FOR_DOC,
1066
+ mask="<mask>",
1067
+ )
1068
+
1069
+
1070
+ class FlaxRobertaForSequenceClassificationModule(nn.Module):
1071
+ config: RobertaConfig
1072
+ dtype: jnp.dtype = jnp.float32
1073
+ gradient_checkpointing: bool = False
1074
+
1075
+ def setup(self):
1076
+ self.roberta = FlaxRobertaModule(
1077
+ config=self.config,
1078
+ dtype=self.dtype,
1079
+ add_pooling_layer=False,
1080
+ gradient_checkpointing=self.gradient_checkpointing,
1081
+ )
1082
+ self.classifier = FlaxRobertaClassificationHead(config=self.config, dtype=self.dtype)
1083
+
1084
+ def __call__(
1085
+ self,
1086
+ input_ids,
1087
+ attention_mask,
1088
+ token_type_ids,
1089
+ position_ids,
1090
+ head_mask,
1091
+ deterministic: bool = True,
1092
+ output_attentions: bool = False,
1093
+ output_hidden_states: bool = False,
1094
+ return_dict: bool = True,
1095
+ ):
1096
+ # Model
1097
+ outputs = self.roberta(
1098
+ input_ids,
1099
+ attention_mask,
1100
+ token_type_ids,
1101
+ position_ids,
1102
+ head_mask,
1103
+ deterministic=deterministic,
1104
+ output_attentions=output_attentions,
1105
+ output_hidden_states=output_hidden_states,
1106
+ return_dict=return_dict,
1107
+ )
1108
+
1109
+ sequence_output = outputs[0]
1110
+ logits = self.classifier(sequence_output, deterministic=deterministic)
1111
+
1112
+ if not return_dict:
1113
+ return (logits,) + outputs[1:]
1114
+
1115
+ return FlaxSequenceClassifierOutput(
1116
+ logits=logits,
1117
+ hidden_states=outputs.hidden_states,
1118
+ attentions=outputs.attentions,
1119
+ )
1120
+
1121
+
1122
+ @add_start_docstrings(
1123
+ """
1124
+ Roberta Model transformer with a sequence classification/regression head on top (a linear layer on top of the
1125
+ pooled output) e.g. for GLUE tasks.
1126
+ """,
1127
+ ROBERTA_START_DOCSTRING,
1128
+ )
1129
+ class FlaxRobertaForSequenceClassification(FlaxRobertaPreTrainedModel):
1130
+ module_class = FlaxRobertaForSequenceClassificationModule
1131
+
1132
+
1133
+ append_call_sample_docstring(
1134
+ FlaxRobertaForSequenceClassification,
1135
+ _CHECKPOINT_FOR_DOC,
1136
+ FlaxSequenceClassifierOutput,
1137
+ _CONFIG_FOR_DOC,
1138
+ )
1139
+
1140
+
1141
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertForMultipleChoiceModule with Bert->Roberta, with self.bert->self.roberta
1142
+ class FlaxRobertaForMultipleChoiceModule(nn.Module):
1143
+ config: RobertaConfig
1144
+ dtype: jnp.dtype = jnp.float32
1145
+ gradient_checkpointing: bool = False
1146
+
1147
+ def setup(self):
1148
+ self.roberta = FlaxRobertaModule(
1149
+ config=self.config,
1150
+ dtype=self.dtype,
1151
+ gradient_checkpointing=self.gradient_checkpointing,
1152
+ )
1153
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
1154
+ self.classifier = nn.Dense(1, dtype=self.dtype)
1155
+
1156
+ def __call__(
1157
+ self,
1158
+ input_ids,
1159
+ attention_mask,
1160
+ token_type_ids,
1161
+ position_ids,
1162
+ head_mask,
1163
+ deterministic: bool = True,
1164
+ output_attentions: bool = False,
1165
+ output_hidden_states: bool = False,
1166
+ return_dict: bool = True,
1167
+ ):
1168
+ num_choices = input_ids.shape[1]
1169
+ input_ids = input_ids.reshape(-1, input_ids.shape[-1]) if input_ids is not None else None
1170
+ attention_mask = attention_mask.reshape(-1, attention_mask.shape[-1]) if attention_mask is not None else None
1171
+ token_type_ids = token_type_ids.reshape(-1, token_type_ids.shape[-1]) if token_type_ids is not None else None
1172
+ position_ids = position_ids.reshape(-1, position_ids.shape[-1]) if position_ids is not None else None
1173
+
1174
+ # Model
1175
+ outputs = self.roberta(
1176
+ input_ids,
1177
+ attention_mask,
1178
+ token_type_ids,
1179
+ position_ids,
1180
+ head_mask,
1181
+ deterministic=deterministic,
1182
+ output_attentions=output_attentions,
1183
+ output_hidden_states=output_hidden_states,
1184
+ return_dict=return_dict,
1185
+ )
1186
+
1187
+ pooled_output = outputs[1]
1188
+ pooled_output = self.dropout(pooled_output, deterministic=deterministic)
1189
+ logits = self.classifier(pooled_output)
1190
+
1191
+ reshaped_logits = logits.reshape(-1, num_choices)
1192
+
1193
+ if not return_dict:
1194
+ return (reshaped_logits,) + outputs[2:]
1195
+
1196
+ return FlaxMultipleChoiceModelOutput(
1197
+ logits=reshaped_logits,
1198
+ hidden_states=outputs.hidden_states,
1199
+ attentions=outputs.attentions,
1200
+ )
1201
+
1202
+
1203
+ @add_start_docstrings(
1204
+ """
1205
+ Roberta Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
1206
+ softmax) e.g. for RocStories/SWAG tasks.
1207
+ """,
1208
+ ROBERTA_START_DOCSTRING,
1209
+ )
1210
+ class FlaxRobertaForMultipleChoice(FlaxRobertaPreTrainedModel):
1211
+ module_class = FlaxRobertaForMultipleChoiceModule
1212
+
1213
+
1214
+ overwrite_call_docstring(
1215
+ FlaxRobertaForMultipleChoice, ROBERTA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
1216
+ )
1217
+ append_call_sample_docstring(
1218
+ FlaxRobertaForMultipleChoice,
1219
+ _CHECKPOINT_FOR_DOC,
1220
+ FlaxMultipleChoiceModelOutput,
1221
+ _CONFIG_FOR_DOC,
1222
+ )
1223
+
1224
+
1225
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertForTokenClassificationModule with Bert->Roberta, with self.bert->self.roberta
1226
+ class FlaxRobertaForTokenClassificationModule(nn.Module):
1227
+ config: RobertaConfig
1228
+ dtype: jnp.dtype = jnp.float32
1229
+ gradient_checkpointing: bool = False
1230
+
1231
+ def setup(self):
1232
+ self.roberta = FlaxRobertaModule(
1233
+ config=self.config,
1234
+ dtype=self.dtype,
1235
+ add_pooling_layer=False,
1236
+ gradient_checkpointing=self.gradient_checkpointing,
1237
+ )
1238
+ classifier_dropout = (
1239
+ self.config.classifier_dropout
1240
+ if self.config.classifier_dropout is not None
1241
+ else self.config.hidden_dropout_prob
1242
+ )
1243
+ self.dropout = nn.Dropout(rate=classifier_dropout)
1244
+ self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype)
1245
+
1246
+ def __call__(
1247
+ self,
1248
+ input_ids,
1249
+ attention_mask,
1250
+ token_type_ids,
1251
+ position_ids,
1252
+ head_mask,
1253
+ deterministic: bool = True,
1254
+ output_attentions: bool = False,
1255
+ output_hidden_states: bool = False,
1256
+ return_dict: bool = True,
1257
+ ):
1258
+ # Model
1259
+ outputs = self.roberta(
1260
+ input_ids,
1261
+ attention_mask,
1262
+ token_type_ids,
1263
+ position_ids,
1264
+ head_mask,
1265
+ deterministic=deterministic,
1266
+ output_attentions=output_attentions,
1267
+ output_hidden_states=output_hidden_states,
1268
+ return_dict=return_dict,
1269
+ )
1270
+
1271
+ hidden_states = outputs[0]
1272
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
1273
+ logits = self.classifier(hidden_states)
1274
+
1275
+ if not return_dict:
1276
+ return (logits,) + outputs[1:]
1277
+
1278
+ return FlaxTokenClassifierOutput(
1279
+ logits=logits,
1280
+ hidden_states=outputs.hidden_states,
1281
+ attentions=outputs.attentions,
1282
+ )
1283
+
1284
+
1285
+ @add_start_docstrings(
1286
+ """
1287
+ Roberta Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1288
+ Named-Entity-Recognition (NER) tasks.
1289
+ """,
1290
+ ROBERTA_START_DOCSTRING,
1291
+ )
1292
+ class FlaxRobertaForTokenClassification(FlaxRobertaPreTrainedModel):
1293
+ module_class = FlaxRobertaForTokenClassificationModule
1294
+
1295
+
1296
+ append_call_sample_docstring(
1297
+ FlaxRobertaForTokenClassification,
1298
+ _CHECKPOINT_FOR_DOC,
1299
+ FlaxTokenClassifierOutput,
1300
+ _CONFIG_FOR_DOC,
1301
+ )
1302
+
1303
+
1304
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertForQuestionAnsweringModule with Bert->Roberta, with self.bert->self.roberta
1305
+ class FlaxRobertaForQuestionAnsweringModule(nn.Module):
1306
+ config: RobertaConfig
1307
+ dtype: jnp.dtype = jnp.float32
1308
+ gradient_checkpointing: bool = False
1309
+
1310
+ def setup(self):
1311
+ self.roberta = FlaxRobertaModule(
1312
+ config=self.config,
1313
+ dtype=self.dtype,
1314
+ add_pooling_layer=False,
1315
+ gradient_checkpointing=self.gradient_checkpointing,
1316
+ )
1317
+ self.qa_outputs = nn.Dense(self.config.num_labels, dtype=self.dtype)
1318
+
1319
+ def __call__(
1320
+ self,
1321
+ input_ids,
1322
+ attention_mask,
1323
+ token_type_ids,
1324
+ position_ids,
1325
+ head_mask,
1326
+ deterministic: bool = True,
1327
+ output_attentions: bool = False,
1328
+ output_hidden_states: bool = False,
1329
+ return_dict: bool = True,
1330
+ ):
1331
+ # Model
1332
+ outputs = self.roberta(
1333
+ input_ids,
1334
+ attention_mask,
1335
+ token_type_ids,
1336
+ position_ids,
1337
+ head_mask,
1338
+ deterministic=deterministic,
1339
+ output_attentions=output_attentions,
1340
+ output_hidden_states=output_hidden_states,
1341
+ return_dict=return_dict,
1342
+ )
1343
+
1344
+ hidden_states = outputs[0]
1345
+
1346
+ logits = self.qa_outputs(hidden_states)
1347
+ start_logits, end_logits = jnp.split(logits, self.config.num_labels, axis=-1)
1348
+ start_logits = start_logits.squeeze(-1)
1349
+ end_logits = end_logits.squeeze(-1)
1350
+
1351
+ if not return_dict:
1352
+ return (start_logits, end_logits) + outputs[1:]
1353
+
1354
+ return FlaxQuestionAnsweringModelOutput(
1355
+ start_logits=start_logits,
1356
+ end_logits=end_logits,
1357
+ hidden_states=outputs.hidden_states,
1358
+ attentions=outputs.attentions,
1359
+ )
1360
+
1361
+
1362
+ @add_start_docstrings(
1363
+ """
1364
+ Roberta Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1365
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1366
+ """,
1367
+ ROBERTA_START_DOCSTRING,
1368
+ )
1369
+ class FlaxRobertaForQuestionAnswering(FlaxRobertaPreTrainedModel):
1370
+ module_class = FlaxRobertaForQuestionAnsweringModule
1371
+
1372
+
1373
+ append_call_sample_docstring(
1374
+ FlaxRobertaForQuestionAnswering,
1375
+ _CHECKPOINT_FOR_DOC,
1376
+ FlaxQuestionAnsweringModelOutput,
1377
+ _CONFIG_FOR_DOC,
1378
+ )
1379
+
1380
+
1381
+ class FlaxRobertaForCausalLMModule(nn.Module):
1382
+ config: RobertaConfig
1383
+ dtype: jnp.dtype = jnp.float32
1384
+ gradient_checkpointing: bool = False
1385
+
1386
+ def setup(self):
1387
+ self.roberta = FlaxRobertaModule(
1388
+ config=self.config,
1389
+ add_pooling_layer=False,
1390
+ dtype=self.dtype,
1391
+ gradient_checkpointing=self.gradient_checkpointing,
1392
+ )
1393
+ self.lm_head = FlaxRobertaLMHead(config=self.config, dtype=self.dtype)
1394
+
1395
+ def __call__(
1396
+ self,
1397
+ input_ids,
1398
+ attention_mask,
1399
+ position_ids,
1400
+ token_type_ids: Optional[jnp.ndarray] = None,
1401
+ head_mask: Optional[jnp.ndarray] = None,
1402
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
1403
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
1404
+ init_cache: bool = False,
1405
+ deterministic: bool = True,
1406
+ output_attentions: bool = False,
1407
+ output_hidden_states: bool = False,
1408
+ return_dict: bool = True,
1409
+ ):
1410
+ # Model
1411
+ outputs = self.roberta(
1412
+ input_ids,
1413
+ attention_mask,
1414
+ token_type_ids,
1415
+ position_ids,
1416
+ head_mask,
1417
+ encoder_hidden_states=encoder_hidden_states,
1418
+ encoder_attention_mask=encoder_attention_mask,
1419
+ init_cache=init_cache,
1420
+ deterministic=deterministic,
1421
+ output_attentions=output_attentions,
1422
+ output_hidden_states=output_hidden_states,
1423
+ return_dict=return_dict,
1424
+ )
1425
+
1426
+ hidden_states = outputs[0]
1427
+ if self.config.tie_word_embeddings:
1428
+ shared_embedding = self.roberta.variables["params"]["embeddings"]["word_embeddings"]["embedding"]
1429
+ else:
1430
+ shared_embedding = None
1431
+
1432
+ # Compute the prediction scores
1433
+ logits = self.lm_head(hidden_states, shared_embedding=shared_embedding)
1434
+
1435
+ if not return_dict:
1436
+ return (logits,) + outputs[1:]
1437
+
1438
+ return FlaxCausalLMOutputWithCrossAttentions(
1439
+ logits=logits,
1440
+ hidden_states=outputs.hidden_states,
1441
+ attentions=outputs.attentions,
1442
+ cross_attentions=outputs.cross_attentions,
1443
+ )
1444
+
1445
+
1446
+ @add_start_docstrings(
1447
+ """
1448
+ Roberta Model with a language modeling head on top (a linear layer on top of the hidden-states output) e.g for
1449
+ autoregressive tasks.
1450
+ """,
1451
+ ROBERTA_START_DOCSTRING,
1452
+ )
1453
+ class FlaxRobertaForCausalLM(FlaxRobertaPreTrainedModel):
1454
+ module_class = FlaxRobertaForCausalLMModule
1455
+
1456
+ def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array] = None):
1457
+ # initializing the cache
1458
+ batch_size, seq_length = input_ids.shape
1459
+
1460
+ past_key_values = self.init_cache(batch_size, max_length)
1461
+ # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
1462
+ # But since the decoder uses a causal mask, those positions are masked anyway.
1463
+ # Thus, we can create a single static attention_mask here, which is more efficient for compilation
1464
+ extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
1465
+ if attention_mask is not None:
1466
+ position_ids = attention_mask.cumsum(axis=-1) - 1
1467
+ extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0))
1468
+ else:
1469
+ position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length))
1470
+
1471
+ return {
1472
+ "past_key_values": past_key_values,
1473
+ "attention_mask": extended_attention_mask,
1474
+ "position_ids": position_ids,
1475
+ }
1476
+
1477
+ def update_inputs_for_generation(self, model_outputs, model_kwargs):
1478
+ model_kwargs["past_key_values"] = model_outputs.past_key_values
1479
+ model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1
1480
+ return model_kwargs
1481
+
1482
+
1483
+ append_call_sample_docstring(
1484
+ FlaxRobertaForCausalLM,
1485
+ _CHECKPOINT_FOR_DOC,
1486
+ FlaxCausalLMOutputWithCrossAttentions,
1487
+ _CONFIG_FOR_DOC,
1488
+ )
1489
+
1490
+
1491
+ __all__ = [
1492
+ "FlaxRobertaForCausalLM",
1493
+ "FlaxRobertaForMaskedLM",
1494
+ "FlaxRobertaForMultipleChoice",
1495
+ "FlaxRobertaForQuestionAnswering",
1496
+ "FlaxRobertaForSequenceClassification",
1497
+ "FlaxRobertaForTokenClassification",
1498
+ "FlaxRobertaModel",
1499
+ "FlaxRobertaPreTrainedModel",
1500
+ ]
docs/transformers/build/lib/transformers/models/roberta/modeling_roberta.py ADDED
@@ -0,0 +1,1698 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """PyTorch RoBERTa model."""
17
+
18
+ import math
19
+ from typing import List, Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.utils.checkpoint
23
+ from packaging import version
24
+ from torch import nn
25
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
26
+
27
+ from ...activations import ACT2FN, gelu
28
+ from ...generation import GenerationMixin
29
+ from ...modeling_attn_mask_utils import (
30
+ _prepare_4d_attention_mask_for_sdpa,
31
+ _prepare_4d_causal_attention_mask_for_sdpa,
32
+ )
33
+ from ...modeling_outputs import (
34
+ BaseModelOutputWithPastAndCrossAttentions,
35
+ BaseModelOutputWithPoolingAndCrossAttentions,
36
+ CausalLMOutputWithCrossAttentions,
37
+ MaskedLMOutput,
38
+ MultipleChoiceModelOutput,
39
+ QuestionAnsweringModelOutput,
40
+ SequenceClassifierOutput,
41
+ TokenClassifierOutput,
42
+ )
43
+ from ...modeling_utils import PreTrainedModel
44
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
45
+ from ...utils import (
46
+ add_code_sample_docstrings,
47
+ add_start_docstrings,
48
+ add_start_docstrings_to_model_forward,
49
+ get_torch_version,
50
+ logging,
51
+ replace_return_docstrings,
52
+ )
53
+ from .configuration_roberta import RobertaConfig
54
+
55
+
56
+ logger = logging.get_logger(__name__)
57
+
58
+ _CHECKPOINT_FOR_DOC = "FacebookAI/roberta-base"
59
+ _CONFIG_FOR_DOC = "RobertaConfig"
60
+
61
+
62
+ class RobertaEmbeddings(nn.Module):
63
+ """
64
+ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
65
+ """
66
+
67
+ # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__
68
+ def __init__(self, config):
69
+ super().__init__()
70
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
71
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
72
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
73
+
74
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
75
+ # any TensorFlow checkpoint file
76
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
77
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
78
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
79
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
80
+ self.register_buffer(
81
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
82
+ )
83
+ self.register_buffer(
84
+ "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
85
+ )
86
+
87
+ # End copy
88
+ self.padding_idx = config.pad_token_id
89
+ self.position_embeddings = nn.Embedding(
90
+ config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
91
+ )
92
+
93
+ def forward(
94
+ self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
95
+ ):
96
+ if position_ids is None:
97
+ if input_ids is not None:
98
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
99
+ position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length)
100
+ else:
101
+ position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
102
+
103
+ if input_ids is not None:
104
+ input_shape = input_ids.size()
105
+ else:
106
+ input_shape = inputs_embeds.size()[:-1]
107
+
108
+ seq_length = input_shape[1]
109
+
110
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
111
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
112
+ # issue #5664
113
+ if token_type_ids is None:
114
+ if hasattr(self, "token_type_ids"):
115
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
116
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
117
+ token_type_ids = buffered_token_type_ids_expanded
118
+ else:
119
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
120
+
121
+ if inputs_embeds is None:
122
+ inputs_embeds = self.word_embeddings(input_ids)
123
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
124
+
125
+ embeddings = inputs_embeds + token_type_embeddings
126
+ if self.position_embedding_type == "absolute":
127
+ position_embeddings = self.position_embeddings(position_ids)
128
+ embeddings += position_embeddings
129
+ embeddings = self.LayerNorm(embeddings)
130
+ embeddings = self.dropout(embeddings)
131
+ return embeddings
132
+
133
+ def create_position_ids_from_inputs_embeds(self, inputs_embeds):
134
+ """
135
+ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
136
+
137
+ Args:
138
+ inputs_embeds: torch.Tensor
139
+
140
+ Returns: torch.Tensor
141
+ """
142
+ input_shape = inputs_embeds.size()[:-1]
143
+ sequence_length = input_shape[1]
144
+
145
+ position_ids = torch.arange(
146
+ self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
147
+ )
148
+ return position_ids.unsqueeze(0).expand(input_shape)
149
+
150
+
151
+ # Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->Roberta
152
+ class RobertaSelfAttention(nn.Module):
153
+ def __init__(self, config, position_embedding_type=None):
154
+ super().__init__()
155
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
156
+ raise ValueError(
157
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
158
+ f"heads ({config.num_attention_heads})"
159
+ )
160
+
161
+ self.num_attention_heads = config.num_attention_heads
162
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
163
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
164
+
165
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
166
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
167
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
168
+
169
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
170
+ self.position_embedding_type = position_embedding_type or getattr(
171
+ config, "position_embedding_type", "absolute"
172
+ )
173
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
174
+ self.max_position_embeddings = config.max_position_embeddings
175
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
176
+
177
+ self.is_decoder = config.is_decoder
178
+
179
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
180
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
181
+ x = x.view(new_x_shape)
182
+ return x.permute(0, 2, 1, 3)
183
+
184
+ def forward(
185
+ self,
186
+ hidden_states: torch.Tensor,
187
+ attention_mask: Optional[torch.FloatTensor] = None,
188
+ head_mask: Optional[torch.FloatTensor] = None,
189
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
190
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
191
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
192
+ output_attentions: Optional[bool] = False,
193
+ ) -> Tuple[torch.Tensor]:
194
+ mixed_query_layer = self.query(hidden_states)
195
+
196
+ # If this is instantiated as a cross-attention module, the keys
197
+ # and values come from an encoder; the attention mask needs to be
198
+ # such that the encoder's padding tokens are not attended to.
199
+ is_cross_attention = encoder_hidden_states is not None
200
+
201
+ if is_cross_attention and past_key_value is not None:
202
+ # reuse k,v, cross_attentions
203
+ key_layer = past_key_value[0]
204
+ value_layer = past_key_value[1]
205
+ attention_mask = encoder_attention_mask
206
+ elif is_cross_attention:
207
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
208
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
209
+ attention_mask = encoder_attention_mask
210
+ elif past_key_value is not None:
211
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
212
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
213
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
214
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
215
+ else:
216
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
217
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
218
+
219
+ query_layer = self.transpose_for_scores(mixed_query_layer)
220
+
221
+ use_cache = past_key_value is not None
222
+ if self.is_decoder:
223
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
224
+ # Further calls to cross_attention layer can then reuse all cross-attention
225
+ # key/value_states (first "if" case)
226
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
227
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
228
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
229
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
230
+ past_key_value = (key_layer, value_layer)
231
+
232
+ # Take the dot product between "query" and "key" to get the raw attention scores.
233
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
234
+
235
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
236
+ query_length, key_length = query_layer.shape[2], key_layer.shape[2]
237
+ if use_cache:
238
+ position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
239
+ -1, 1
240
+ )
241
+ else:
242
+ position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
243
+ position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
244
+ distance = position_ids_l - position_ids_r
245
+
246
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
247
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
248
+
249
+ if self.position_embedding_type == "relative_key":
250
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
251
+ attention_scores = attention_scores + relative_position_scores
252
+ elif self.position_embedding_type == "relative_key_query":
253
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
254
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
255
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
256
+
257
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
258
+ if attention_mask is not None:
259
+ # Apply the attention mask is (precomputed for all layers in RobertaModel forward() function)
260
+ attention_scores = attention_scores + attention_mask
261
+
262
+ # Normalize the attention scores to probabilities.
263
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
264
+
265
+ # This is actually dropping out entire tokens to attend to, which might
266
+ # seem a bit unusual, but is taken from the original Transformer paper.
267
+ attention_probs = self.dropout(attention_probs)
268
+
269
+ # Mask heads if we want to
270
+ if head_mask is not None:
271
+ attention_probs = attention_probs * head_mask
272
+
273
+ context_layer = torch.matmul(attention_probs, value_layer)
274
+
275
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
276
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
277
+ context_layer = context_layer.view(new_context_layer_shape)
278
+
279
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
280
+
281
+ if self.is_decoder:
282
+ outputs = outputs + (past_key_value,)
283
+ return outputs
284
+
285
+
286
+ # Copied from transformers.models.bert.modeling_bert.BertSdpaSelfAttention with Bert->Roberta
287
+ class RobertaSdpaSelfAttention(RobertaSelfAttention):
288
+ def __init__(self, config, position_embedding_type=None):
289
+ super().__init__(config, position_embedding_type=position_embedding_type)
290
+ self.dropout_prob = config.attention_probs_dropout_prob
291
+ self.require_contiguous_qkv = version.parse(get_torch_version()) < version.parse("2.2.0")
292
+
293
+ # Adapted from RobertaSelfAttention
294
+ def forward(
295
+ self,
296
+ hidden_states: torch.Tensor,
297
+ attention_mask: Optional[torch.Tensor] = None,
298
+ head_mask: Optional[torch.FloatTensor] = None,
299
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
300
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
301
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
302
+ output_attentions: Optional[bool] = False,
303
+ ) -> Tuple[torch.Tensor]:
304
+ if self.position_embedding_type != "absolute" or output_attentions or head_mask is not None:
305
+ # TODO: Improve this warning with e.g. `model.config._attn_implementation = "manual"` once implemented.
306
+ logger.warning_once(
307
+ "RobertaSdpaSelfAttention is used but `torch.nn.functional.scaled_dot_product_attention` does not support "
308
+ "non-absolute `position_embedding_type` or `output_attentions=True` or `head_mask`. Falling back to "
309
+ "the manual attention implementation, but specifying the manual implementation will be required from "
310
+ "Transformers version v5.0.0 onwards. This warning can be removed using the argument "
311
+ '`attn_implementation="eager"` when loading the model.'
312
+ )
313
+ return super().forward(
314
+ hidden_states,
315
+ attention_mask,
316
+ head_mask,
317
+ encoder_hidden_states,
318
+ encoder_attention_mask,
319
+ past_key_value,
320
+ output_attentions,
321
+ )
322
+
323
+ bsz, tgt_len, _ = hidden_states.size()
324
+
325
+ query_layer = self.transpose_for_scores(self.query(hidden_states))
326
+
327
+ # If this is instantiated as a cross-attention module, the keys and values come from an encoder; the attention
328
+ # mask needs to be such that the encoder's padding tokens are not attended to.
329
+ is_cross_attention = encoder_hidden_states is not None
330
+
331
+ current_states = encoder_hidden_states if is_cross_attention else hidden_states
332
+ attention_mask = encoder_attention_mask if is_cross_attention else attention_mask
333
+
334
+ # Check `seq_length` of `past_key_value` == `len(current_states)` to support prefix tuning
335
+ if is_cross_attention and past_key_value and past_key_value[0].shape[2] == current_states.shape[1]:
336
+ key_layer, value_layer = past_key_value
337
+ else:
338
+ key_layer = self.transpose_for_scores(self.key(current_states))
339
+ value_layer = self.transpose_for_scores(self.value(current_states))
340
+ if past_key_value is not None and not is_cross_attention:
341
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
342
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
343
+
344
+ if self.is_decoder:
345
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
346
+ # Further calls to cross_attention layer can then reuse all cross-attention
347
+ # key/value_states (first "if" case)
348
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
349
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
350
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
351
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
352
+ past_key_value = (key_layer, value_layer)
353
+
354
+ # SDPA with memory-efficient backend is broken in torch==2.1.2 when using non-contiguous inputs and a custom
355
+ # attn_mask, so we need to call `.contiguous()` here. This was fixed in torch==2.2.0.
356
+ # Reference: https://github.com/pytorch/pytorch/issues/112577
357
+ if self.require_contiguous_qkv and query_layer.device.type == "cuda" and attention_mask is not None:
358
+ query_layer = query_layer.contiguous()
359
+ key_layer = key_layer.contiguous()
360
+ value_layer = value_layer.contiguous()
361
+
362
+ # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
363
+ # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
364
+ # The tgt_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create
365
+ # a causal mask in case tgt_len == 1.
366
+ is_causal = (
367
+ True if self.is_decoder and not is_cross_attention and attention_mask is None and tgt_len > 1 else False
368
+ )
369
+
370
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
371
+ query_layer,
372
+ key_layer,
373
+ value_layer,
374
+ attn_mask=attention_mask,
375
+ dropout_p=self.dropout_prob if self.training else 0.0,
376
+ is_causal=is_causal,
377
+ )
378
+
379
+ attn_output = attn_output.transpose(1, 2)
380
+ attn_output = attn_output.reshape(bsz, tgt_len, self.all_head_size)
381
+
382
+ outputs = (attn_output,)
383
+ if self.is_decoder:
384
+ outputs = outputs + (past_key_value,)
385
+ return outputs
386
+
387
+
388
+ # Copied from transformers.models.bert.modeling_bert.BertSelfOutput
389
+ class RobertaSelfOutput(nn.Module):
390
+ def __init__(self, config):
391
+ super().__init__()
392
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
393
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
394
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
395
+
396
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
397
+ hidden_states = self.dense(hidden_states)
398
+ hidden_states = self.dropout(hidden_states)
399
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
400
+ return hidden_states
401
+
402
+
403
+ ROBERTA_SELF_ATTENTION_CLASSES = {
404
+ "eager": RobertaSelfAttention,
405
+ "sdpa": RobertaSdpaSelfAttention,
406
+ }
407
+
408
+
409
+ # Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Roberta,BERT->ROBERTA
410
+ class RobertaAttention(nn.Module):
411
+ def __init__(self, config, position_embedding_type=None):
412
+ super().__init__()
413
+ self.self = ROBERTA_SELF_ATTENTION_CLASSES[config._attn_implementation](
414
+ config, position_embedding_type=position_embedding_type
415
+ )
416
+ self.output = RobertaSelfOutput(config)
417
+ self.pruned_heads = set()
418
+
419
+ def prune_heads(self, heads):
420
+ if len(heads) == 0:
421
+ return
422
+ heads, index = find_pruneable_heads_and_indices(
423
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
424
+ )
425
+
426
+ # Prune linear layers
427
+ self.self.query = prune_linear_layer(self.self.query, index)
428
+ self.self.key = prune_linear_layer(self.self.key, index)
429
+ self.self.value = prune_linear_layer(self.self.value, index)
430
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
431
+
432
+ # Update hyper params and store pruned heads
433
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
434
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
435
+ self.pruned_heads = self.pruned_heads.union(heads)
436
+
437
+ def forward(
438
+ self,
439
+ hidden_states: torch.Tensor,
440
+ attention_mask: Optional[torch.FloatTensor] = None,
441
+ head_mask: Optional[torch.FloatTensor] = None,
442
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
443
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
444
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
445
+ output_attentions: Optional[bool] = False,
446
+ ) -> Tuple[torch.Tensor]:
447
+ self_outputs = self.self(
448
+ hidden_states,
449
+ attention_mask,
450
+ head_mask,
451
+ encoder_hidden_states,
452
+ encoder_attention_mask,
453
+ past_key_value,
454
+ output_attentions,
455
+ )
456
+ attention_output = self.output(self_outputs[0], hidden_states)
457
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
458
+ return outputs
459
+
460
+
461
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate
462
+ class RobertaIntermediate(nn.Module):
463
+ def __init__(self, config):
464
+ super().__init__()
465
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
466
+ if isinstance(config.hidden_act, str):
467
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
468
+ else:
469
+ self.intermediate_act_fn = config.hidden_act
470
+
471
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
472
+ hidden_states = self.dense(hidden_states)
473
+ hidden_states = self.intermediate_act_fn(hidden_states)
474
+ return hidden_states
475
+
476
+
477
+ # Copied from transformers.models.bert.modeling_bert.BertOutput
478
+ class RobertaOutput(nn.Module):
479
+ def __init__(self, config):
480
+ super().__init__()
481
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
482
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
483
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
484
+
485
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
486
+ hidden_states = self.dense(hidden_states)
487
+ hidden_states = self.dropout(hidden_states)
488
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
489
+ return hidden_states
490
+
491
+
492
+ # Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Roberta
493
+ class RobertaLayer(nn.Module):
494
+ def __init__(self, config):
495
+ super().__init__()
496
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
497
+ self.seq_len_dim = 1
498
+ self.attention = RobertaAttention(config)
499
+ self.is_decoder = config.is_decoder
500
+ self.add_cross_attention = config.add_cross_attention
501
+ if self.add_cross_attention:
502
+ if not self.is_decoder:
503
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
504
+ self.crossattention = RobertaAttention(config, position_embedding_type="absolute")
505
+ self.intermediate = RobertaIntermediate(config)
506
+ self.output = RobertaOutput(config)
507
+
508
+ def forward(
509
+ self,
510
+ hidden_states: torch.Tensor,
511
+ attention_mask: Optional[torch.FloatTensor] = None,
512
+ head_mask: Optional[torch.FloatTensor] = None,
513
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
514
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
515
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
516
+ output_attentions: Optional[bool] = False,
517
+ ) -> Tuple[torch.Tensor]:
518
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
519
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
520
+ self_attention_outputs = self.attention(
521
+ hidden_states,
522
+ attention_mask,
523
+ head_mask,
524
+ output_attentions=output_attentions,
525
+ past_key_value=self_attn_past_key_value,
526
+ )
527
+ attention_output = self_attention_outputs[0]
528
+
529
+ # if decoder, the last output is tuple of self-attn cache
530
+ if self.is_decoder:
531
+ outputs = self_attention_outputs[1:-1]
532
+ present_key_value = self_attention_outputs[-1]
533
+ else:
534
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
535
+
536
+ cross_attn_present_key_value = None
537
+ if self.is_decoder and encoder_hidden_states is not None:
538
+ if not hasattr(self, "crossattention"):
539
+ raise ValueError(
540
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
541
+ " by setting `config.add_cross_attention=True`"
542
+ )
543
+
544
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
545
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
546
+ cross_attention_outputs = self.crossattention(
547
+ attention_output,
548
+ attention_mask,
549
+ head_mask,
550
+ encoder_hidden_states,
551
+ encoder_attention_mask,
552
+ cross_attn_past_key_value,
553
+ output_attentions,
554
+ )
555
+ attention_output = cross_attention_outputs[0]
556
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
557
+
558
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
559
+ cross_attn_present_key_value = cross_attention_outputs[-1]
560
+ present_key_value = present_key_value + cross_attn_present_key_value
561
+
562
+ layer_output = apply_chunking_to_forward(
563
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
564
+ )
565
+ outputs = (layer_output,) + outputs
566
+
567
+ # if decoder, return the attn key/values as the last output
568
+ if self.is_decoder:
569
+ outputs = outputs + (present_key_value,)
570
+
571
+ return outputs
572
+
573
+ def feed_forward_chunk(self, attention_output):
574
+ intermediate_output = self.intermediate(attention_output)
575
+ layer_output = self.output(intermediate_output, attention_output)
576
+ return layer_output
577
+
578
+
579
+ # Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Roberta
580
+ class RobertaEncoder(nn.Module):
581
+ def __init__(self, config):
582
+ super().__init__()
583
+ self.config = config
584
+ self.layer = nn.ModuleList([RobertaLayer(config) for _ in range(config.num_hidden_layers)])
585
+ self.gradient_checkpointing = False
586
+
587
+ def forward(
588
+ self,
589
+ hidden_states: torch.Tensor,
590
+ attention_mask: Optional[torch.FloatTensor] = None,
591
+ head_mask: Optional[torch.FloatTensor] = None,
592
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
593
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
594
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
595
+ use_cache: Optional[bool] = None,
596
+ output_attentions: Optional[bool] = False,
597
+ output_hidden_states: Optional[bool] = False,
598
+ return_dict: Optional[bool] = True,
599
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
600
+ all_hidden_states = () if output_hidden_states else None
601
+ all_self_attentions = () if output_attentions else None
602
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
603
+
604
+ if self.gradient_checkpointing and self.training:
605
+ if use_cache:
606
+ logger.warning_once(
607
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
608
+ )
609
+ use_cache = False
610
+
611
+ next_decoder_cache = () if use_cache else None
612
+ for i, layer_module in enumerate(self.layer):
613
+ if output_hidden_states:
614
+ all_hidden_states = all_hidden_states + (hidden_states,)
615
+
616
+ layer_head_mask = head_mask[i] if head_mask is not None else None
617
+ past_key_value = past_key_values[i] if past_key_values is not None else None
618
+
619
+ if self.gradient_checkpointing and self.training:
620
+ layer_outputs = self._gradient_checkpointing_func(
621
+ layer_module.__call__,
622
+ hidden_states,
623
+ attention_mask,
624
+ layer_head_mask,
625
+ encoder_hidden_states,
626
+ encoder_attention_mask,
627
+ past_key_value,
628
+ output_attentions,
629
+ )
630
+ else:
631
+ layer_outputs = layer_module(
632
+ hidden_states,
633
+ attention_mask,
634
+ layer_head_mask,
635
+ encoder_hidden_states,
636
+ encoder_attention_mask,
637
+ past_key_value,
638
+ output_attentions,
639
+ )
640
+
641
+ hidden_states = layer_outputs[0]
642
+ if use_cache:
643
+ next_decoder_cache += (layer_outputs[-1],)
644
+ if output_attentions:
645
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
646
+ if self.config.add_cross_attention:
647
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
648
+
649
+ if output_hidden_states:
650
+ all_hidden_states = all_hidden_states + (hidden_states,)
651
+
652
+ if not return_dict:
653
+ return tuple(
654
+ v
655
+ for v in [
656
+ hidden_states,
657
+ next_decoder_cache,
658
+ all_hidden_states,
659
+ all_self_attentions,
660
+ all_cross_attentions,
661
+ ]
662
+ if v is not None
663
+ )
664
+ return BaseModelOutputWithPastAndCrossAttentions(
665
+ last_hidden_state=hidden_states,
666
+ past_key_values=next_decoder_cache,
667
+ hidden_states=all_hidden_states,
668
+ attentions=all_self_attentions,
669
+ cross_attentions=all_cross_attentions,
670
+ )
671
+
672
+
673
+ # Copied from transformers.models.bert.modeling_bert.BertPooler
674
+ class RobertaPooler(nn.Module):
675
+ def __init__(self, config):
676
+ super().__init__()
677
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
678
+ self.activation = nn.Tanh()
679
+
680
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
681
+ # We "pool" the model by simply taking the hidden state corresponding
682
+ # to the first token.
683
+ first_token_tensor = hidden_states[:, 0]
684
+ pooled_output = self.dense(first_token_tensor)
685
+ pooled_output = self.activation(pooled_output)
686
+ return pooled_output
687
+
688
+
689
+ class RobertaPreTrainedModel(PreTrainedModel):
690
+ """
691
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
692
+ models.
693
+ """
694
+
695
+ config_class = RobertaConfig
696
+ base_model_prefix = "roberta"
697
+ supports_gradient_checkpointing = True
698
+ _no_split_modules = ["RobertaEmbeddings", "RobertaSelfAttention", "RobertaSdpaSelfAttention"]
699
+ _supports_sdpa = True
700
+
701
+ # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights with BertLMPredictionHead->RobertaLMHead
702
+ def _init_weights(self, module):
703
+ """Initialize the weights"""
704
+ if isinstance(module, nn.Linear):
705
+ # Slightly different from the TF version which uses truncated_normal for initialization
706
+ # cf https://github.com/pytorch/pytorch/pull/5617
707
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
708
+ if module.bias is not None:
709
+ module.bias.data.zero_()
710
+ elif isinstance(module, nn.Embedding):
711
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
712
+ if module.padding_idx is not None:
713
+ module.weight.data[module.padding_idx].zero_()
714
+ elif isinstance(module, nn.LayerNorm):
715
+ module.bias.data.zero_()
716
+ module.weight.data.fill_(1.0)
717
+ elif isinstance(module, RobertaLMHead):
718
+ module.bias.data.zero_()
719
+
720
+
721
+ ROBERTA_START_DOCSTRING = r"""
722
+
723
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
724
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
725
+ etc.)
726
+
727
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
728
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
729
+ and behavior.
730
+
731
+ Parameters:
732
+ config ([`RobertaConfig`]): Model configuration class with all the parameters of the
733
+ model. Initializing with a config file does not load the weights associated with the model, only the
734
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
735
+ """
736
+
737
+ ROBERTA_INPUTS_DOCSTRING = r"""
738
+ Args:
739
+ input_ids (`torch.LongTensor` of shape `({0})`):
740
+ Indices of input sequence tokens in the vocabulary.
741
+
742
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
743
+ [`PreTrainedTokenizer.__call__`] for details.
744
+
745
+ [What are input IDs?](../glossary#input-ids)
746
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
747
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
748
+
749
+ - 1 for tokens that are **not masked**,
750
+ - 0 for tokens that are **masked**.
751
+
752
+ [What are attention masks?](../glossary#attention-mask)
753
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
754
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`:
755
+
756
+ - 0 corresponds to a *sentence A* token,
757
+ - 1 corresponds to a *sentence B* token.
758
+ This parameter can only be used when the model is initialized with `type_vocab_size` parameter with value
759
+ >= 2. All the value in this tensor should be always < type_vocab_size.
760
+
761
+ [What are token type IDs?](../glossary#token-type-ids)
762
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
763
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
764
+ config.max_position_embeddings - 1]`.
765
+
766
+ [What are position IDs?](../glossary#position-ids)
767
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
768
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
769
+
770
+ - 1 indicates the head is **not masked**,
771
+ - 0 indicates the head is **masked**.
772
+
773
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
774
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
775
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
776
+ model's internal embedding lookup matrix.
777
+ output_attentions (`bool`, *optional*):
778
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
779
+ tensors for more detail.
780
+ output_hidden_states (`bool`, *optional*):
781
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
782
+ more detail.
783
+ return_dict (`bool`, *optional*):
784
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
785
+ """
786
+
787
+
788
+ @add_start_docstrings(
789
+ "The bare RoBERTa Model transformer outputting raw hidden-states without any specific head on top.",
790
+ ROBERTA_START_DOCSTRING,
791
+ )
792
+ # Copied from transformers.models.bert.modeling_bert.BertModel with Bert->Roberta, BERT->ROBERTA
793
+ class RobertaModel(RobertaPreTrainedModel):
794
+ """
795
+
796
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
797
+ cross-attention is added between the self-attention layers, following the architecture described in [Attention is
798
+ all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
799
+ Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
800
+
801
+ To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
802
+ to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
803
+ `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
804
+ """
805
+
806
+ _no_split_modules = ["RobertaEmbeddings", "RobertaLayer"]
807
+
808
+ def __init__(self, config, add_pooling_layer=True):
809
+ super().__init__(config)
810
+ self.config = config
811
+
812
+ self.embeddings = RobertaEmbeddings(config)
813
+ self.encoder = RobertaEncoder(config)
814
+
815
+ self.pooler = RobertaPooler(config) if add_pooling_layer else None
816
+
817
+ self.attn_implementation = config._attn_implementation
818
+ self.position_embedding_type = config.position_embedding_type
819
+
820
+ # Initialize weights and apply final processing
821
+ self.post_init()
822
+
823
+ def get_input_embeddings(self):
824
+ return self.embeddings.word_embeddings
825
+
826
+ def set_input_embeddings(self, value):
827
+ self.embeddings.word_embeddings = value
828
+
829
+ def _prune_heads(self, heads_to_prune):
830
+ """
831
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
832
+ class PreTrainedModel
833
+ """
834
+ for layer, heads in heads_to_prune.items():
835
+ self.encoder.layer[layer].attention.prune_heads(heads)
836
+
837
+ @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
838
+ @add_code_sample_docstrings(
839
+ checkpoint=_CHECKPOINT_FOR_DOC,
840
+ output_type=BaseModelOutputWithPoolingAndCrossAttentions,
841
+ config_class=_CONFIG_FOR_DOC,
842
+ )
843
+ def forward(
844
+ self,
845
+ input_ids: Optional[torch.Tensor] = None,
846
+ attention_mask: Optional[torch.Tensor] = None,
847
+ token_type_ids: Optional[torch.Tensor] = None,
848
+ position_ids: Optional[torch.Tensor] = None,
849
+ head_mask: Optional[torch.Tensor] = None,
850
+ inputs_embeds: Optional[torch.Tensor] = None,
851
+ encoder_hidden_states: Optional[torch.Tensor] = None,
852
+ encoder_attention_mask: Optional[torch.Tensor] = None,
853
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
854
+ use_cache: Optional[bool] = None,
855
+ output_attentions: Optional[bool] = None,
856
+ output_hidden_states: Optional[bool] = None,
857
+ return_dict: Optional[bool] = None,
858
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
859
+ r"""
860
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
861
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
862
+ the model is configured as a decoder.
863
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, target_length)`, *optional*):
864
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
865
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
866
+
867
+ - 1 for tokens that are **not masked**,
868
+ - 0 for tokens that are **masked**.
869
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
870
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
871
+
872
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
873
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
874
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
875
+ use_cache (`bool`, *optional*):
876
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
877
+ `past_key_values`).
878
+ """
879
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
880
+ output_hidden_states = (
881
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
882
+ )
883
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
884
+
885
+ if self.config.is_decoder:
886
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
887
+ else:
888
+ use_cache = False
889
+
890
+ if input_ids is not None and inputs_embeds is not None:
891
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
892
+ elif input_ids is not None:
893
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
894
+ input_shape = input_ids.size()
895
+ elif inputs_embeds is not None:
896
+ input_shape = inputs_embeds.size()[:-1]
897
+ else:
898
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
899
+
900
+ batch_size, seq_length = input_shape
901
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
902
+
903
+ # past_key_values_length
904
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
905
+
906
+ if token_type_ids is None:
907
+ if hasattr(self.embeddings, "token_type_ids"):
908
+ buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
909
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
910
+ token_type_ids = buffered_token_type_ids_expanded
911
+ else:
912
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
913
+
914
+ embedding_output = self.embeddings(
915
+ input_ids=input_ids,
916
+ position_ids=position_ids,
917
+ token_type_ids=token_type_ids,
918
+ inputs_embeds=inputs_embeds,
919
+ past_key_values_length=past_key_values_length,
920
+ )
921
+
922
+ if attention_mask is None:
923
+ attention_mask = torch.ones((batch_size, seq_length + past_key_values_length), device=device)
924
+
925
+ use_sdpa_attention_masks = (
926
+ self.attn_implementation == "sdpa"
927
+ and self.position_embedding_type == "absolute"
928
+ and head_mask is None
929
+ and not output_attentions
930
+ )
931
+
932
+ # Expand the attention mask
933
+ if use_sdpa_attention_masks and attention_mask.dim() == 2:
934
+ # Expand the attention mask for SDPA.
935
+ # [bsz, seq_len] -> [bsz, 1, seq_len, seq_len]
936
+ if self.config.is_decoder:
937
+ extended_attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(
938
+ attention_mask,
939
+ input_shape,
940
+ embedding_output,
941
+ past_key_values_length,
942
+ )
943
+ else:
944
+ extended_attention_mask = _prepare_4d_attention_mask_for_sdpa(
945
+ attention_mask, embedding_output.dtype, tgt_len=seq_length
946
+ )
947
+ else:
948
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
949
+ # ourselves in which case we just need to make it broadcastable to all heads.
950
+ extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape)
951
+
952
+ # If a 2D or 3D attention mask is provided for the cross-attention
953
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
954
+ if self.config.is_decoder and encoder_hidden_states is not None:
955
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
956
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
957
+ if encoder_attention_mask is None:
958
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
959
+
960
+ if use_sdpa_attention_masks and encoder_attention_mask.dim() == 2:
961
+ # Expand the attention mask for SDPA.
962
+ # [bsz, seq_len] -> [bsz, 1, seq_len, seq_len]
963
+ encoder_extended_attention_mask = _prepare_4d_attention_mask_for_sdpa(
964
+ encoder_attention_mask, embedding_output.dtype, tgt_len=seq_length
965
+ )
966
+ else:
967
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
968
+ else:
969
+ encoder_extended_attention_mask = None
970
+
971
+ # Prepare head mask if needed
972
+ # 1.0 in head_mask indicate we keep the head
973
+ # attention_probs has shape bsz x n_heads x N x N
974
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
975
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
976
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
977
+
978
+ encoder_outputs = self.encoder(
979
+ embedding_output,
980
+ attention_mask=extended_attention_mask,
981
+ head_mask=head_mask,
982
+ encoder_hidden_states=encoder_hidden_states,
983
+ encoder_attention_mask=encoder_extended_attention_mask,
984
+ past_key_values=past_key_values,
985
+ use_cache=use_cache,
986
+ output_attentions=output_attentions,
987
+ output_hidden_states=output_hidden_states,
988
+ return_dict=return_dict,
989
+ )
990
+ sequence_output = encoder_outputs[0]
991
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
992
+
993
+ if not return_dict:
994
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
995
+
996
+ return BaseModelOutputWithPoolingAndCrossAttentions(
997
+ last_hidden_state=sequence_output,
998
+ pooler_output=pooled_output,
999
+ past_key_values=encoder_outputs.past_key_values,
1000
+ hidden_states=encoder_outputs.hidden_states,
1001
+ attentions=encoder_outputs.attentions,
1002
+ cross_attentions=encoder_outputs.cross_attentions,
1003
+ )
1004
+
1005
+
1006
+ @add_start_docstrings(
1007
+ """RoBERTa Model with a `language modeling` head on top for CLM fine-tuning.""", ROBERTA_START_DOCSTRING
1008
+ )
1009
+ class RobertaForCausalLM(RobertaPreTrainedModel, GenerationMixin):
1010
+ _tied_weights_keys = ["lm_head.decoder.weight", "lm_head.decoder.bias"]
1011
+
1012
+ def __init__(self, config):
1013
+ super().__init__(config)
1014
+
1015
+ if not config.is_decoder:
1016
+ logger.warning("If you want to use `RobertaLMHeadModel` as a standalone, add `is_decoder=True.`")
1017
+
1018
+ self.roberta = RobertaModel(config, add_pooling_layer=False)
1019
+ self.lm_head = RobertaLMHead(config)
1020
+
1021
+ # Initialize weights and apply final processing
1022
+ self.post_init()
1023
+
1024
+ def get_output_embeddings(self):
1025
+ return self.lm_head.decoder
1026
+
1027
+ def set_output_embeddings(self, new_embeddings):
1028
+ self.lm_head.decoder = new_embeddings
1029
+
1030
+ @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1031
+ @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
1032
+ def forward(
1033
+ self,
1034
+ input_ids: Optional[torch.LongTensor] = None,
1035
+ attention_mask: Optional[torch.FloatTensor] = None,
1036
+ token_type_ids: Optional[torch.LongTensor] = None,
1037
+ position_ids: Optional[torch.LongTensor] = None,
1038
+ head_mask: Optional[torch.FloatTensor] = None,
1039
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1040
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
1041
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
1042
+ labels: Optional[torch.LongTensor] = None,
1043
+ past_key_values: Tuple[Tuple[torch.FloatTensor]] = None,
1044
+ use_cache: Optional[bool] = None,
1045
+ output_attentions: Optional[bool] = None,
1046
+ output_hidden_states: Optional[bool] = None,
1047
+ return_dict: Optional[bool] = None,
1048
+ **kwargs,
1049
+ ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
1050
+ r"""
1051
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1052
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
1053
+ the model is configured as a decoder.
1054
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
1055
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
1056
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1057
+
1058
+ - 1 for tokens that are **not masked**,
1059
+ - 0 for tokens that are **masked**.
1060
+
1061
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1062
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
1063
+ `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
1064
+ ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1065
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
1066
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1067
+
1068
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1069
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1070
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1071
+ use_cache (`bool`, *optional*):
1072
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1073
+ `past_key_values`).
1074
+
1075
+ Returns:
1076
+
1077
+ Example:
1078
+
1079
+ ```python
1080
+ >>> from transformers import AutoTokenizer, RobertaForCausalLM, AutoConfig
1081
+ >>> import torch
1082
+
1083
+ >>> tokenizer = AutoTokenizer.from_pretrained("FacebookAI/roberta-base")
1084
+ >>> config = AutoConfig.from_pretrained("FacebookAI/roberta-base")
1085
+ >>> config.is_decoder = True
1086
+ >>> model = RobertaForCausalLM.from_pretrained("FacebookAI/roberta-base", config=config)
1087
+
1088
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
1089
+ >>> outputs = model(**inputs)
1090
+
1091
+ >>> prediction_logits = outputs.logits
1092
+ ```"""
1093
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1094
+ if labels is not None:
1095
+ use_cache = False
1096
+
1097
+ outputs = self.roberta(
1098
+ input_ids,
1099
+ attention_mask=attention_mask,
1100
+ token_type_ids=token_type_ids,
1101
+ position_ids=position_ids,
1102
+ head_mask=head_mask,
1103
+ inputs_embeds=inputs_embeds,
1104
+ encoder_hidden_states=encoder_hidden_states,
1105
+ encoder_attention_mask=encoder_attention_mask,
1106
+ past_key_values=past_key_values,
1107
+ use_cache=use_cache,
1108
+ output_attentions=output_attentions,
1109
+ output_hidden_states=output_hidden_states,
1110
+ return_dict=return_dict,
1111
+ )
1112
+
1113
+ sequence_output = outputs[0]
1114
+ prediction_scores = self.lm_head(sequence_output)
1115
+
1116
+ lm_loss = None
1117
+ if labels is not None:
1118
+ # move labels to correct device to enable model parallelism
1119
+ labels = labels.to(prediction_scores.device)
1120
+ lm_loss = self.loss_function(
1121
+ prediction_scores,
1122
+ labels,
1123
+ vocab_size=self.config.vocab_size,
1124
+ **kwargs,
1125
+ )
1126
+
1127
+ if not return_dict:
1128
+ output = (prediction_scores,) + outputs[2:]
1129
+ return ((lm_loss,) + output) if lm_loss is not None else output
1130
+
1131
+ return CausalLMOutputWithCrossAttentions(
1132
+ loss=lm_loss,
1133
+ logits=prediction_scores,
1134
+ past_key_values=outputs.past_key_values,
1135
+ hidden_states=outputs.hidden_states,
1136
+ attentions=outputs.attentions,
1137
+ cross_attentions=outputs.cross_attentions,
1138
+ )
1139
+
1140
+ def _reorder_cache(self, past_key_values, beam_idx):
1141
+ reordered_past = ()
1142
+ for layer_past in past_key_values:
1143
+ reordered_past += (
1144
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1145
+ )
1146
+ return reordered_past
1147
+
1148
+
1149
+ @add_start_docstrings("""RoBERTa Model with a `language modeling` head on top.""", ROBERTA_START_DOCSTRING)
1150
+ class RobertaForMaskedLM(RobertaPreTrainedModel):
1151
+ _tied_weights_keys = ["lm_head.decoder.weight", "lm_head.decoder.bias"]
1152
+
1153
+ def __init__(self, config):
1154
+ super().__init__(config)
1155
+
1156
+ if config.is_decoder:
1157
+ logger.warning(
1158
+ "If you want to use `RobertaForMaskedLM` make sure `config.is_decoder=False` for "
1159
+ "bi-directional self-attention."
1160
+ )
1161
+
1162
+ self.roberta = RobertaModel(config, add_pooling_layer=False)
1163
+ self.lm_head = RobertaLMHead(config)
1164
+
1165
+ # Initialize weights and apply final processing
1166
+ self.post_init()
1167
+
1168
+ def get_output_embeddings(self):
1169
+ return self.lm_head.decoder
1170
+
1171
+ def set_output_embeddings(self, new_embeddings):
1172
+ self.lm_head.decoder = new_embeddings
1173
+
1174
+ @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1175
+ @add_code_sample_docstrings(
1176
+ checkpoint=_CHECKPOINT_FOR_DOC,
1177
+ output_type=MaskedLMOutput,
1178
+ config_class=_CONFIG_FOR_DOC,
1179
+ mask="<mask>",
1180
+ expected_output="' Paris'",
1181
+ expected_loss=0.1,
1182
+ )
1183
+ def forward(
1184
+ self,
1185
+ input_ids: Optional[torch.LongTensor] = None,
1186
+ attention_mask: Optional[torch.FloatTensor] = None,
1187
+ token_type_ids: Optional[torch.LongTensor] = None,
1188
+ position_ids: Optional[torch.LongTensor] = None,
1189
+ head_mask: Optional[torch.FloatTensor] = None,
1190
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1191
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
1192
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
1193
+ labels: Optional[torch.LongTensor] = None,
1194
+ output_attentions: Optional[bool] = None,
1195
+ output_hidden_states: Optional[bool] = None,
1196
+ return_dict: Optional[bool] = None,
1197
+ ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]:
1198
+ r"""
1199
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1200
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1201
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1202
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1203
+ kwargs (`Dict[str, any]`, *optional*, defaults to `{}`):
1204
+ Used to hide legacy arguments that have been deprecated.
1205
+ """
1206
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1207
+
1208
+ outputs = self.roberta(
1209
+ input_ids,
1210
+ attention_mask=attention_mask,
1211
+ token_type_ids=token_type_ids,
1212
+ position_ids=position_ids,
1213
+ head_mask=head_mask,
1214
+ inputs_embeds=inputs_embeds,
1215
+ encoder_hidden_states=encoder_hidden_states,
1216
+ encoder_attention_mask=encoder_attention_mask,
1217
+ output_attentions=output_attentions,
1218
+ output_hidden_states=output_hidden_states,
1219
+ return_dict=return_dict,
1220
+ )
1221
+ sequence_output = outputs[0]
1222
+ prediction_scores = self.lm_head(sequence_output)
1223
+
1224
+ masked_lm_loss = None
1225
+ if labels is not None:
1226
+ # move labels to correct device to enable model parallelism
1227
+ labels = labels.to(prediction_scores.device)
1228
+ loss_fct = CrossEntropyLoss()
1229
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1230
+
1231
+ if not return_dict:
1232
+ output = (prediction_scores,) + outputs[2:]
1233
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1234
+
1235
+ return MaskedLMOutput(
1236
+ loss=masked_lm_loss,
1237
+ logits=prediction_scores,
1238
+ hidden_states=outputs.hidden_states,
1239
+ attentions=outputs.attentions,
1240
+ )
1241
+
1242
+
1243
+ class RobertaLMHead(nn.Module):
1244
+ """Roberta Head for masked language modeling."""
1245
+
1246
+ def __init__(self, config):
1247
+ super().__init__()
1248
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
1249
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
1250
+
1251
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size)
1252
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
1253
+ self.decoder.bias = self.bias
1254
+
1255
+ def forward(self, features, **kwargs):
1256
+ x = self.dense(features)
1257
+ x = gelu(x)
1258
+ x = self.layer_norm(x)
1259
+
1260
+ # project back to size of vocabulary with bias
1261
+ x = self.decoder(x)
1262
+
1263
+ return x
1264
+
1265
+ def _tie_weights(self):
1266
+ # To tie those two weights if they get disconnected (on TPU or when the bias is resized)
1267
+ # For accelerate compatibility and to not break backward compatibility
1268
+ if self.decoder.bias.device.type == "meta":
1269
+ self.decoder.bias = self.bias
1270
+ else:
1271
+ self.bias = self.decoder.bias
1272
+
1273
+
1274
+ @add_start_docstrings(
1275
+ """
1276
+ RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the
1277
+ pooled output) e.g. for GLUE tasks.
1278
+ """,
1279
+ ROBERTA_START_DOCSTRING,
1280
+ )
1281
+ class RobertaForSequenceClassification(RobertaPreTrainedModel):
1282
+ def __init__(self, config):
1283
+ super().__init__(config)
1284
+ self.num_labels = config.num_labels
1285
+ self.config = config
1286
+
1287
+ self.roberta = RobertaModel(config, add_pooling_layer=False)
1288
+ self.classifier = RobertaClassificationHead(config)
1289
+
1290
+ # Initialize weights and apply final processing
1291
+ self.post_init()
1292
+
1293
+ @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1294
+ @add_code_sample_docstrings(
1295
+ checkpoint="cardiffnlp/twitter-roberta-base-emotion",
1296
+ output_type=SequenceClassifierOutput,
1297
+ config_class=_CONFIG_FOR_DOC,
1298
+ expected_output="'optimism'",
1299
+ expected_loss=0.08,
1300
+ )
1301
+ def forward(
1302
+ self,
1303
+ input_ids: Optional[torch.LongTensor] = None,
1304
+ attention_mask: Optional[torch.FloatTensor] = None,
1305
+ token_type_ids: Optional[torch.LongTensor] = None,
1306
+ position_ids: Optional[torch.LongTensor] = None,
1307
+ head_mask: Optional[torch.FloatTensor] = None,
1308
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1309
+ labels: Optional[torch.LongTensor] = None,
1310
+ output_attentions: Optional[bool] = None,
1311
+ output_hidden_states: Optional[bool] = None,
1312
+ return_dict: Optional[bool] = None,
1313
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
1314
+ r"""
1315
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1316
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1317
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1318
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1319
+ """
1320
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1321
+
1322
+ outputs = self.roberta(
1323
+ input_ids,
1324
+ attention_mask=attention_mask,
1325
+ token_type_ids=token_type_ids,
1326
+ position_ids=position_ids,
1327
+ head_mask=head_mask,
1328
+ inputs_embeds=inputs_embeds,
1329
+ output_attentions=output_attentions,
1330
+ output_hidden_states=output_hidden_states,
1331
+ return_dict=return_dict,
1332
+ )
1333
+ sequence_output = outputs[0]
1334
+ logits = self.classifier(sequence_output)
1335
+
1336
+ loss = None
1337
+ if labels is not None:
1338
+ # move labels to correct device to enable model parallelism
1339
+ labels = labels.to(logits.device)
1340
+ if self.config.problem_type is None:
1341
+ if self.num_labels == 1:
1342
+ self.config.problem_type = "regression"
1343
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1344
+ self.config.problem_type = "single_label_classification"
1345
+ else:
1346
+ self.config.problem_type = "multi_label_classification"
1347
+
1348
+ if self.config.problem_type == "regression":
1349
+ loss_fct = MSELoss()
1350
+ if self.num_labels == 1:
1351
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1352
+ else:
1353
+ loss = loss_fct(logits, labels)
1354
+ elif self.config.problem_type == "single_label_classification":
1355
+ loss_fct = CrossEntropyLoss()
1356
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1357
+ elif self.config.problem_type == "multi_label_classification":
1358
+ loss_fct = BCEWithLogitsLoss()
1359
+ loss = loss_fct(logits, labels)
1360
+
1361
+ if not return_dict:
1362
+ output = (logits,) + outputs[2:]
1363
+ return ((loss,) + output) if loss is not None else output
1364
+
1365
+ return SequenceClassifierOutput(
1366
+ loss=loss,
1367
+ logits=logits,
1368
+ hidden_states=outputs.hidden_states,
1369
+ attentions=outputs.attentions,
1370
+ )
1371
+
1372
+
1373
+ @add_start_docstrings(
1374
+ """
1375
+ Roberta Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
1376
+ softmax) e.g. for RocStories/SWAG tasks.
1377
+ """,
1378
+ ROBERTA_START_DOCSTRING,
1379
+ )
1380
+ class RobertaForMultipleChoice(RobertaPreTrainedModel):
1381
+ def __init__(self, config):
1382
+ super().__init__(config)
1383
+
1384
+ self.roberta = RobertaModel(config)
1385
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1386
+ self.classifier = nn.Linear(config.hidden_size, 1)
1387
+
1388
+ # Initialize weights and apply final processing
1389
+ self.post_init()
1390
+
1391
+ @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
1392
+ @add_code_sample_docstrings(
1393
+ checkpoint=_CHECKPOINT_FOR_DOC,
1394
+ output_type=MultipleChoiceModelOutput,
1395
+ config_class=_CONFIG_FOR_DOC,
1396
+ )
1397
+ def forward(
1398
+ self,
1399
+ input_ids: Optional[torch.LongTensor] = None,
1400
+ token_type_ids: Optional[torch.LongTensor] = None,
1401
+ attention_mask: Optional[torch.FloatTensor] = None,
1402
+ labels: Optional[torch.LongTensor] = None,
1403
+ position_ids: Optional[torch.LongTensor] = None,
1404
+ head_mask: Optional[torch.FloatTensor] = None,
1405
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1406
+ output_attentions: Optional[bool] = None,
1407
+ output_hidden_states: Optional[bool] = None,
1408
+ return_dict: Optional[bool] = None,
1409
+ ) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]:
1410
+ r"""
1411
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1412
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
1413
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
1414
+ `input_ids` above)
1415
+ """
1416
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1417
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
1418
+
1419
+ flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
1420
+ flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
1421
+ flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
1422
+ flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
1423
+ flat_inputs_embeds = (
1424
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
1425
+ if inputs_embeds is not None
1426
+ else None
1427
+ )
1428
+
1429
+ outputs = self.roberta(
1430
+ flat_input_ids,
1431
+ position_ids=flat_position_ids,
1432
+ token_type_ids=flat_token_type_ids,
1433
+ attention_mask=flat_attention_mask,
1434
+ head_mask=head_mask,
1435
+ inputs_embeds=flat_inputs_embeds,
1436
+ output_attentions=output_attentions,
1437
+ output_hidden_states=output_hidden_states,
1438
+ return_dict=return_dict,
1439
+ )
1440
+ pooled_output = outputs[1]
1441
+
1442
+ pooled_output = self.dropout(pooled_output)
1443
+ logits = self.classifier(pooled_output)
1444
+ reshaped_logits = logits.view(-1, num_choices)
1445
+
1446
+ loss = None
1447
+ if labels is not None:
1448
+ # move labels to correct device to enable model parallelism
1449
+ labels = labels.to(reshaped_logits.device)
1450
+ loss_fct = CrossEntropyLoss()
1451
+ loss = loss_fct(reshaped_logits, labels)
1452
+
1453
+ if not return_dict:
1454
+ output = (reshaped_logits,) + outputs[2:]
1455
+ return ((loss,) + output) if loss is not None else output
1456
+
1457
+ return MultipleChoiceModelOutput(
1458
+ loss=loss,
1459
+ logits=reshaped_logits,
1460
+ hidden_states=outputs.hidden_states,
1461
+ attentions=outputs.attentions,
1462
+ )
1463
+
1464
+
1465
+ @add_start_docstrings(
1466
+ """
1467
+ Roberta Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1468
+ Named-Entity-Recognition (NER) tasks.
1469
+ """,
1470
+ ROBERTA_START_DOCSTRING,
1471
+ )
1472
+ class RobertaForTokenClassification(RobertaPreTrainedModel):
1473
+ def __init__(self, config):
1474
+ super().__init__(config)
1475
+ self.num_labels = config.num_labels
1476
+
1477
+ self.roberta = RobertaModel(config, add_pooling_layer=False)
1478
+ classifier_dropout = (
1479
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1480
+ )
1481
+ self.dropout = nn.Dropout(classifier_dropout)
1482
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1483
+
1484
+ # Initialize weights and apply final processing
1485
+ self.post_init()
1486
+
1487
+ @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1488
+ @add_code_sample_docstrings(
1489
+ checkpoint="Jean-Baptiste/roberta-large-ner-english",
1490
+ output_type=TokenClassifierOutput,
1491
+ config_class=_CONFIG_FOR_DOC,
1492
+ expected_output="['O', 'ORG', 'ORG', 'O', 'O', 'O', 'O', 'O', 'LOC', 'O', 'LOC', 'LOC']",
1493
+ expected_loss=0.01,
1494
+ )
1495
+ def forward(
1496
+ self,
1497
+ input_ids: Optional[torch.LongTensor] = None,
1498
+ attention_mask: Optional[torch.FloatTensor] = None,
1499
+ token_type_ids: Optional[torch.LongTensor] = None,
1500
+ position_ids: Optional[torch.LongTensor] = None,
1501
+ head_mask: Optional[torch.FloatTensor] = None,
1502
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1503
+ labels: Optional[torch.LongTensor] = None,
1504
+ output_attentions: Optional[bool] = None,
1505
+ output_hidden_states: Optional[bool] = None,
1506
+ return_dict: Optional[bool] = None,
1507
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
1508
+ r"""
1509
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1510
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1511
+ """
1512
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1513
+
1514
+ outputs = self.roberta(
1515
+ input_ids,
1516
+ attention_mask=attention_mask,
1517
+ token_type_ids=token_type_ids,
1518
+ position_ids=position_ids,
1519
+ head_mask=head_mask,
1520
+ inputs_embeds=inputs_embeds,
1521
+ output_attentions=output_attentions,
1522
+ output_hidden_states=output_hidden_states,
1523
+ return_dict=return_dict,
1524
+ )
1525
+
1526
+ sequence_output = outputs[0]
1527
+
1528
+ sequence_output = self.dropout(sequence_output)
1529
+ logits = self.classifier(sequence_output)
1530
+
1531
+ loss = None
1532
+ if labels is not None:
1533
+ # move labels to correct device to enable model parallelism
1534
+ labels = labels.to(logits.device)
1535
+ loss_fct = CrossEntropyLoss()
1536
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1537
+
1538
+ if not return_dict:
1539
+ output = (logits,) + outputs[2:]
1540
+ return ((loss,) + output) if loss is not None else output
1541
+
1542
+ return TokenClassifierOutput(
1543
+ loss=loss,
1544
+ logits=logits,
1545
+ hidden_states=outputs.hidden_states,
1546
+ attentions=outputs.attentions,
1547
+ )
1548
+
1549
+
1550
+ class RobertaClassificationHead(nn.Module):
1551
+ """Head for sentence-level classification tasks."""
1552
+
1553
+ def __init__(self, config):
1554
+ super().__init__()
1555
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
1556
+ classifier_dropout = (
1557
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1558
+ )
1559
+ self.dropout = nn.Dropout(classifier_dropout)
1560
+ self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
1561
+
1562
+ def forward(self, features, **kwargs):
1563
+ x = features[:, 0, :] # take <s> token (equiv. to [CLS])
1564
+ x = self.dropout(x)
1565
+ x = self.dense(x)
1566
+ x = torch.tanh(x)
1567
+ x = self.dropout(x)
1568
+ x = self.out_proj(x)
1569
+ return x
1570
+
1571
+
1572
+ @add_start_docstrings(
1573
+ """
1574
+ Roberta Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1575
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1576
+ """,
1577
+ ROBERTA_START_DOCSTRING,
1578
+ )
1579
+ class RobertaForQuestionAnswering(RobertaPreTrainedModel):
1580
+ def __init__(self, config):
1581
+ super().__init__(config)
1582
+ self.num_labels = config.num_labels
1583
+
1584
+ self.roberta = RobertaModel(config, add_pooling_layer=False)
1585
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1586
+
1587
+ # Initialize weights and apply final processing
1588
+ self.post_init()
1589
+
1590
+ @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1591
+ @add_code_sample_docstrings(
1592
+ checkpoint="deepset/roberta-base-squad2",
1593
+ output_type=QuestionAnsweringModelOutput,
1594
+ config_class=_CONFIG_FOR_DOC,
1595
+ expected_output="' puppet'",
1596
+ expected_loss=0.86,
1597
+ )
1598
+ def forward(
1599
+ self,
1600
+ input_ids: Optional[torch.LongTensor] = None,
1601
+ attention_mask: Optional[torch.FloatTensor] = None,
1602
+ token_type_ids: Optional[torch.LongTensor] = None,
1603
+ position_ids: Optional[torch.LongTensor] = None,
1604
+ head_mask: Optional[torch.FloatTensor] = None,
1605
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1606
+ start_positions: Optional[torch.LongTensor] = None,
1607
+ end_positions: Optional[torch.LongTensor] = None,
1608
+ output_attentions: Optional[bool] = None,
1609
+ output_hidden_states: Optional[bool] = None,
1610
+ return_dict: Optional[bool] = None,
1611
+ ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]:
1612
+ r"""
1613
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1614
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1615
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1616
+ are not taken into account for computing the loss.
1617
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1618
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1619
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1620
+ are not taken into account for computing the loss.
1621
+ """
1622
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1623
+
1624
+ outputs = self.roberta(
1625
+ input_ids,
1626
+ attention_mask=attention_mask,
1627
+ token_type_ids=token_type_ids,
1628
+ position_ids=position_ids,
1629
+ head_mask=head_mask,
1630
+ inputs_embeds=inputs_embeds,
1631
+ output_attentions=output_attentions,
1632
+ output_hidden_states=output_hidden_states,
1633
+ return_dict=return_dict,
1634
+ )
1635
+
1636
+ sequence_output = outputs[0]
1637
+
1638
+ logits = self.qa_outputs(sequence_output)
1639
+ start_logits, end_logits = logits.split(1, dim=-1)
1640
+ start_logits = start_logits.squeeze(-1).contiguous()
1641
+ end_logits = end_logits.squeeze(-1).contiguous()
1642
+
1643
+ total_loss = None
1644
+ if start_positions is not None and end_positions is not None:
1645
+ # If we are on multi-GPU, split add a dimension
1646
+ if len(start_positions.size()) > 1:
1647
+ start_positions = start_positions.squeeze(-1)
1648
+ if len(end_positions.size()) > 1:
1649
+ end_positions = end_positions.squeeze(-1)
1650
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1651
+ ignored_index = start_logits.size(1)
1652
+ start_positions = start_positions.clamp(0, ignored_index)
1653
+ end_positions = end_positions.clamp(0, ignored_index)
1654
+
1655
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1656
+ start_loss = loss_fct(start_logits, start_positions)
1657
+ end_loss = loss_fct(end_logits, end_positions)
1658
+ total_loss = (start_loss + end_loss) / 2
1659
+
1660
+ if not return_dict:
1661
+ output = (start_logits, end_logits) + outputs[2:]
1662
+ return ((total_loss,) + output) if total_loss is not None else output
1663
+
1664
+ return QuestionAnsweringModelOutput(
1665
+ loss=total_loss,
1666
+ start_logits=start_logits,
1667
+ end_logits=end_logits,
1668
+ hidden_states=outputs.hidden_states,
1669
+ attentions=outputs.attentions,
1670
+ )
1671
+
1672
+
1673
+ def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
1674
+ """
1675
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
1676
+ are ignored. This is modified from fairseq's `utils.make_positions`.
1677
+
1678
+ Args:
1679
+ x: torch.Tensor x:
1680
+
1681
+ Returns: torch.Tensor
1682
+ """
1683
+ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
1684
+ mask = input_ids.ne(padding_idx).int()
1685
+ incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
1686
+ return incremental_indices.long() + padding_idx
1687
+
1688
+
1689
+ __all__ = [
1690
+ "RobertaForCausalLM",
1691
+ "RobertaForMaskedLM",
1692
+ "RobertaForMultipleChoice",
1693
+ "RobertaForQuestionAnswering",
1694
+ "RobertaForSequenceClassification",
1695
+ "RobertaForTokenClassification",
1696
+ "RobertaModel",
1697
+ "RobertaPreTrainedModel",
1698
+ ]
docs/transformers/build/lib/transformers/models/roberta/modeling_tf_roberta.py ADDED
@@ -0,0 +1,1783 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """TF 2.0 RoBERTa model."""
17
+
18
+ from __future__ import annotations
19
+
20
+ import math
21
+ import warnings
22
+ from typing import Optional, Tuple, Union
23
+
24
+ import numpy as np
25
+ import tensorflow as tf
26
+
27
+ from ...activations_tf import get_tf_activation
28
+ from ...modeling_tf_outputs import (
29
+ TFBaseModelOutputWithPastAndCrossAttentions,
30
+ TFBaseModelOutputWithPoolingAndCrossAttentions,
31
+ TFCausalLMOutputWithCrossAttentions,
32
+ TFMaskedLMOutput,
33
+ TFMultipleChoiceModelOutput,
34
+ TFQuestionAnsweringModelOutput,
35
+ TFSequenceClassifierOutput,
36
+ TFTokenClassifierOutput,
37
+ )
38
+ from ...modeling_tf_utils import (
39
+ TFCausalLanguageModelingLoss,
40
+ TFMaskedLanguageModelingLoss,
41
+ TFModelInputType,
42
+ TFMultipleChoiceLoss,
43
+ TFPreTrainedModel,
44
+ TFQuestionAnsweringLoss,
45
+ TFSequenceClassificationLoss,
46
+ TFTokenClassificationLoss,
47
+ get_initializer,
48
+ keras,
49
+ keras_serializable,
50
+ unpack_inputs,
51
+ )
52
+ from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
53
+ from ...utils import (
54
+ add_code_sample_docstrings,
55
+ add_start_docstrings,
56
+ add_start_docstrings_to_model_forward,
57
+ logging,
58
+ )
59
+ from .configuration_roberta import RobertaConfig
60
+
61
+
62
+ logger = logging.get_logger(__name__)
63
+
64
+ _CHECKPOINT_FOR_DOC = "FacebookAI/roberta-base"
65
+ _CONFIG_FOR_DOC = "RobertaConfig"
66
+
67
+
68
+ class TFRobertaEmbeddings(keras.layers.Layer):
69
+ """
70
+ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
71
+ """
72
+
73
+ def __init__(self, config, **kwargs):
74
+ super().__init__(**kwargs)
75
+
76
+ self.padding_idx = 1
77
+ self.config = config
78
+ self.hidden_size = config.hidden_size
79
+ self.max_position_embeddings = config.max_position_embeddings
80
+ self.initializer_range = config.initializer_range
81
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
82
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
83
+
84
+ def build(self, input_shape=None):
85
+ with tf.name_scope("word_embeddings"):
86
+ self.weight = self.add_weight(
87
+ name="weight",
88
+ shape=[self.config.vocab_size, self.hidden_size],
89
+ initializer=get_initializer(self.initializer_range),
90
+ )
91
+
92
+ with tf.name_scope("token_type_embeddings"):
93
+ self.token_type_embeddings = self.add_weight(
94
+ name="embeddings",
95
+ shape=[self.config.type_vocab_size, self.hidden_size],
96
+ initializer=get_initializer(self.initializer_range),
97
+ )
98
+
99
+ with tf.name_scope("position_embeddings"):
100
+ self.position_embeddings = self.add_weight(
101
+ name="embeddings",
102
+ shape=[self.max_position_embeddings, self.hidden_size],
103
+ initializer=get_initializer(self.initializer_range),
104
+ )
105
+
106
+ if self.built:
107
+ return
108
+ self.built = True
109
+ if getattr(self, "LayerNorm", None) is not None:
110
+ with tf.name_scope(self.LayerNorm.name):
111
+ self.LayerNorm.build([None, None, self.config.hidden_size])
112
+
113
+ def create_position_ids_from_input_ids(self, input_ids, past_key_values_length=0):
114
+ """
115
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding
116
+ symbols are ignored. This is modified from fairseq's `utils.make_positions`.
117
+
118
+ Args:
119
+ input_ids: tf.Tensor
120
+ Returns: tf.Tensor
121
+ """
122
+ mask = tf.cast(tf.math.not_equal(input_ids, self.padding_idx), dtype=input_ids.dtype)
123
+ incremental_indices = (tf.math.cumsum(mask, axis=1) + past_key_values_length) * mask
124
+
125
+ return incremental_indices + self.padding_idx
126
+
127
+ def call(
128
+ self,
129
+ input_ids=None,
130
+ position_ids=None,
131
+ token_type_ids=None,
132
+ inputs_embeds=None,
133
+ past_key_values_length=0,
134
+ training=False,
135
+ ):
136
+ """
137
+ Applies embedding based on inputs tensor.
138
+
139
+ Returns:
140
+ final_embeddings (`tf.Tensor`): output embedding tensor.
141
+ """
142
+ assert not (input_ids is None and inputs_embeds is None)
143
+
144
+ if input_ids is not None:
145
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
146
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
147
+
148
+ input_shape = shape_list(inputs_embeds)[:-1]
149
+
150
+ if token_type_ids is None:
151
+ token_type_ids = tf.fill(dims=input_shape, value=0)
152
+
153
+ if position_ids is None:
154
+ if input_ids is not None:
155
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
156
+ position_ids = self.create_position_ids_from_input_ids(
157
+ input_ids=input_ids, past_key_values_length=past_key_values_length
158
+ )
159
+ else:
160
+ position_ids = tf.expand_dims(
161
+ tf.range(start=self.padding_idx + 1, limit=input_shape[-1] + self.padding_idx + 1), axis=0
162
+ )
163
+
164
+ position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
165
+ token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
166
+ final_embeddings = inputs_embeds + position_embeds + token_type_embeds
167
+ final_embeddings = self.LayerNorm(inputs=final_embeddings)
168
+ final_embeddings = self.dropout(inputs=final_embeddings, training=training)
169
+
170
+ return final_embeddings
171
+
172
+
173
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertPooler with Bert->Roberta
174
+ class TFRobertaPooler(keras.layers.Layer):
175
+ def __init__(self, config: RobertaConfig, **kwargs):
176
+ super().__init__(**kwargs)
177
+
178
+ self.dense = keras.layers.Dense(
179
+ units=config.hidden_size,
180
+ kernel_initializer=get_initializer(config.initializer_range),
181
+ activation="tanh",
182
+ name="dense",
183
+ )
184
+ self.config = config
185
+
186
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
187
+ # We "pool" the model by simply taking the hidden state corresponding
188
+ # to the first token.
189
+ first_token_tensor = hidden_states[:, 0]
190
+ pooled_output = self.dense(inputs=first_token_tensor)
191
+
192
+ return pooled_output
193
+
194
+ def build(self, input_shape=None):
195
+ if self.built:
196
+ return
197
+ self.built = True
198
+ if getattr(self, "dense", None) is not None:
199
+ with tf.name_scope(self.dense.name):
200
+ self.dense.build([None, None, self.config.hidden_size])
201
+
202
+
203
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfAttention with Bert->Roberta
204
+ class TFRobertaSelfAttention(keras.layers.Layer):
205
+ def __init__(self, config: RobertaConfig, **kwargs):
206
+ super().__init__(**kwargs)
207
+
208
+ if config.hidden_size % config.num_attention_heads != 0:
209
+ raise ValueError(
210
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number "
211
+ f"of attention heads ({config.num_attention_heads})"
212
+ )
213
+
214
+ self.num_attention_heads = config.num_attention_heads
215
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
216
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
217
+ self.sqrt_att_head_size = math.sqrt(self.attention_head_size)
218
+
219
+ self.query = keras.layers.Dense(
220
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
221
+ )
222
+ self.key = keras.layers.Dense(
223
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key"
224
+ )
225
+ self.value = keras.layers.Dense(
226
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
227
+ )
228
+ self.dropout = keras.layers.Dropout(rate=config.attention_probs_dropout_prob)
229
+
230
+ self.is_decoder = config.is_decoder
231
+ self.config = config
232
+
233
+ def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor:
234
+ # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
235
+ tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size))
236
+
237
+ # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size]
238
+ return tf.transpose(tensor, perm=[0, 2, 1, 3])
239
+
240
+ def call(
241
+ self,
242
+ hidden_states: tf.Tensor,
243
+ attention_mask: tf.Tensor,
244
+ head_mask: tf.Tensor,
245
+ encoder_hidden_states: tf.Tensor,
246
+ encoder_attention_mask: tf.Tensor,
247
+ past_key_value: Tuple[tf.Tensor],
248
+ output_attentions: bool,
249
+ training: bool = False,
250
+ ) -> Tuple[tf.Tensor]:
251
+ batch_size = shape_list(hidden_states)[0]
252
+ mixed_query_layer = self.query(inputs=hidden_states)
253
+
254
+ # If this is instantiated as a cross-attention module, the keys
255
+ # and values come from an encoder; the attention mask needs to be
256
+ # such that the encoder's padding tokens are not attended to.
257
+ is_cross_attention = encoder_hidden_states is not None
258
+
259
+ if is_cross_attention and past_key_value is not None:
260
+ # reuse k,v, cross_attentions
261
+ key_layer = past_key_value[0]
262
+ value_layer = past_key_value[1]
263
+ attention_mask = encoder_attention_mask
264
+ elif is_cross_attention:
265
+ key_layer = self.transpose_for_scores(self.key(inputs=encoder_hidden_states), batch_size)
266
+ value_layer = self.transpose_for_scores(self.value(inputs=encoder_hidden_states), batch_size)
267
+ attention_mask = encoder_attention_mask
268
+ elif past_key_value is not None:
269
+ key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size)
270
+ value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size)
271
+ key_layer = tf.concat([past_key_value[0], key_layer], axis=2)
272
+ value_layer = tf.concat([past_key_value[1], value_layer], axis=2)
273
+ else:
274
+ key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size)
275
+ value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size)
276
+
277
+ query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
278
+
279
+ if self.is_decoder:
280
+ # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
281
+ # Further calls to cross_attention layer can then reuse all cross-attention
282
+ # key/value_states (first "if" case)
283
+ # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
284
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
285
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
286
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
287
+ past_key_value = (key_layer, value_layer)
288
+
289
+ # Take the dot product between "query" and "key" to get the raw attention scores.
290
+ # (batch size, num_heads, seq_len_q, seq_len_k)
291
+ attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
292
+ dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype)
293
+ attention_scores = tf.divide(attention_scores, dk)
294
+
295
+ if attention_mask is not None:
296
+ # Apply the attention mask is (precomputed for all layers in TFRobertaModel call() function)
297
+ attention_scores = tf.add(attention_scores, attention_mask)
298
+
299
+ # Normalize the attention scores to probabilities.
300
+ attention_probs = stable_softmax(logits=attention_scores, axis=-1)
301
+
302
+ # This is actually dropping out entire tokens to attend to, which might
303
+ # seem a bit unusual, but is taken from the original Transformer paper.
304
+ attention_probs = self.dropout(inputs=attention_probs, training=training)
305
+
306
+ # Mask heads if we want to
307
+ if head_mask is not None:
308
+ attention_probs = tf.multiply(attention_probs, head_mask)
309
+
310
+ attention_output = tf.matmul(attention_probs, value_layer)
311
+ attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3])
312
+
313
+ # (batch_size, seq_len_q, all_head_size)
314
+ attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.all_head_size))
315
+ outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
316
+
317
+ if self.is_decoder:
318
+ outputs = outputs + (past_key_value,)
319
+ return outputs
320
+
321
+ def build(self, input_shape=None):
322
+ if self.built:
323
+ return
324
+ self.built = True
325
+ if getattr(self, "query", None) is not None:
326
+ with tf.name_scope(self.query.name):
327
+ self.query.build([None, None, self.config.hidden_size])
328
+ if getattr(self, "key", None) is not None:
329
+ with tf.name_scope(self.key.name):
330
+ self.key.build([None, None, self.config.hidden_size])
331
+ if getattr(self, "value", None) is not None:
332
+ with tf.name_scope(self.value.name):
333
+ self.value.build([None, None, self.config.hidden_size])
334
+
335
+
336
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfOutput with Bert->Roberta
337
+ class TFRobertaSelfOutput(keras.layers.Layer):
338
+ def __init__(self, config: RobertaConfig, **kwargs):
339
+ super().__init__(**kwargs)
340
+
341
+ self.dense = keras.layers.Dense(
342
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
343
+ )
344
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
345
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
346
+ self.config = config
347
+
348
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
349
+ hidden_states = self.dense(inputs=hidden_states)
350
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
351
+ hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
352
+
353
+ return hidden_states
354
+
355
+ def build(self, input_shape=None):
356
+ if self.built:
357
+ return
358
+ self.built = True
359
+ if getattr(self, "dense", None) is not None:
360
+ with tf.name_scope(self.dense.name):
361
+ self.dense.build([None, None, self.config.hidden_size])
362
+ if getattr(self, "LayerNorm", None) is not None:
363
+ with tf.name_scope(self.LayerNorm.name):
364
+ self.LayerNorm.build([None, None, self.config.hidden_size])
365
+
366
+
367
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertAttention with Bert->Roberta
368
+ class TFRobertaAttention(keras.layers.Layer):
369
+ def __init__(self, config: RobertaConfig, **kwargs):
370
+ super().__init__(**kwargs)
371
+
372
+ self.self_attention = TFRobertaSelfAttention(config, name="self")
373
+ self.dense_output = TFRobertaSelfOutput(config, name="output")
374
+
375
+ def prune_heads(self, heads):
376
+ raise NotImplementedError
377
+
378
+ def call(
379
+ self,
380
+ input_tensor: tf.Tensor,
381
+ attention_mask: tf.Tensor,
382
+ head_mask: tf.Tensor,
383
+ encoder_hidden_states: tf.Tensor,
384
+ encoder_attention_mask: tf.Tensor,
385
+ past_key_value: Tuple[tf.Tensor],
386
+ output_attentions: bool,
387
+ training: bool = False,
388
+ ) -> Tuple[tf.Tensor]:
389
+ self_outputs = self.self_attention(
390
+ hidden_states=input_tensor,
391
+ attention_mask=attention_mask,
392
+ head_mask=head_mask,
393
+ encoder_hidden_states=encoder_hidden_states,
394
+ encoder_attention_mask=encoder_attention_mask,
395
+ past_key_value=past_key_value,
396
+ output_attentions=output_attentions,
397
+ training=training,
398
+ )
399
+ attention_output = self.dense_output(
400
+ hidden_states=self_outputs[0], input_tensor=input_tensor, training=training
401
+ )
402
+ # add attentions (possibly with past_key_value) if we output them
403
+ outputs = (attention_output,) + self_outputs[1:]
404
+
405
+ return outputs
406
+
407
+ def build(self, input_shape=None):
408
+ if self.built:
409
+ return
410
+ self.built = True
411
+ if getattr(self, "self_attention", None) is not None:
412
+ with tf.name_scope(self.self_attention.name):
413
+ self.self_attention.build(None)
414
+ if getattr(self, "dense_output", None) is not None:
415
+ with tf.name_scope(self.dense_output.name):
416
+ self.dense_output.build(None)
417
+
418
+
419
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertIntermediate with Bert->Roberta
420
+ class TFRobertaIntermediate(keras.layers.Layer):
421
+ def __init__(self, config: RobertaConfig, **kwargs):
422
+ super().__init__(**kwargs)
423
+
424
+ self.dense = keras.layers.Dense(
425
+ units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
426
+ )
427
+
428
+ if isinstance(config.hidden_act, str):
429
+ self.intermediate_act_fn = get_tf_activation(config.hidden_act)
430
+ else:
431
+ self.intermediate_act_fn = config.hidden_act
432
+ self.config = config
433
+
434
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
435
+ hidden_states = self.dense(inputs=hidden_states)
436
+ hidden_states = self.intermediate_act_fn(hidden_states)
437
+
438
+ return hidden_states
439
+
440
+ def build(self, input_shape=None):
441
+ if self.built:
442
+ return
443
+ self.built = True
444
+ if getattr(self, "dense", None) is not None:
445
+ with tf.name_scope(self.dense.name):
446
+ self.dense.build([None, None, self.config.hidden_size])
447
+
448
+
449
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertOutput with Bert->Roberta
450
+ class TFRobertaOutput(keras.layers.Layer):
451
+ def __init__(self, config: RobertaConfig, **kwargs):
452
+ super().__init__(**kwargs)
453
+
454
+ self.dense = keras.layers.Dense(
455
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
456
+ )
457
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
458
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
459
+ self.config = config
460
+
461
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
462
+ hidden_states = self.dense(inputs=hidden_states)
463
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
464
+ hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
465
+
466
+ return hidden_states
467
+
468
+ def build(self, input_shape=None):
469
+ if self.built:
470
+ return
471
+ self.built = True
472
+ if getattr(self, "dense", None) is not None:
473
+ with tf.name_scope(self.dense.name):
474
+ self.dense.build([None, None, self.config.intermediate_size])
475
+ if getattr(self, "LayerNorm", None) is not None:
476
+ with tf.name_scope(self.LayerNorm.name):
477
+ self.LayerNorm.build([None, None, self.config.hidden_size])
478
+
479
+
480
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertLayer with Bert->Roberta
481
+ class TFRobertaLayer(keras.layers.Layer):
482
+ def __init__(self, config: RobertaConfig, **kwargs):
483
+ super().__init__(**kwargs)
484
+
485
+ self.attention = TFRobertaAttention(config, name="attention")
486
+ self.is_decoder = config.is_decoder
487
+ self.add_cross_attention = config.add_cross_attention
488
+ if self.add_cross_attention:
489
+ if not self.is_decoder:
490
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
491
+ self.crossattention = TFRobertaAttention(config, name="crossattention")
492
+ self.intermediate = TFRobertaIntermediate(config, name="intermediate")
493
+ self.bert_output = TFRobertaOutput(config, name="output")
494
+
495
+ def call(
496
+ self,
497
+ hidden_states: tf.Tensor,
498
+ attention_mask: tf.Tensor,
499
+ head_mask: tf.Tensor,
500
+ encoder_hidden_states: tf.Tensor | None,
501
+ encoder_attention_mask: tf.Tensor | None,
502
+ past_key_value: Tuple[tf.Tensor] | None,
503
+ output_attentions: bool,
504
+ training: bool = False,
505
+ ) -> Tuple[tf.Tensor]:
506
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
507
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
508
+ self_attention_outputs = self.attention(
509
+ input_tensor=hidden_states,
510
+ attention_mask=attention_mask,
511
+ head_mask=head_mask,
512
+ encoder_hidden_states=None,
513
+ encoder_attention_mask=None,
514
+ past_key_value=self_attn_past_key_value,
515
+ output_attentions=output_attentions,
516
+ training=training,
517
+ )
518
+ attention_output = self_attention_outputs[0]
519
+
520
+ # if decoder, the last output is tuple of self-attn cache
521
+ if self.is_decoder:
522
+ outputs = self_attention_outputs[1:-1]
523
+ present_key_value = self_attention_outputs[-1]
524
+ else:
525
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
526
+
527
+ cross_attn_present_key_value = None
528
+ if self.is_decoder and encoder_hidden_states is not None:
529
+ if not hasattr(self, "crossattention"):
530
+ raise ValueError(
531
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
532
+ " by setting `config.add_cross_attention=True`"
533
+ )
534
+
535
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
536
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
537
+ cross_attention_outputs = self.crossattention(
538
+ input_tensor=attention_output,
539
+ attention_mask=attention_mask,
540
+ head_mask=head_mask,
541
+ encoder_hidden_states=encoder_hidden_states,
542
+ encoder_attention_mask=encoder_attention_mask,
543
+ past_key_value=cross_attn_past_key_value,
544
+ output_attentions=output_attentions,
545
+ training=training,
546
+ )
547
+ attention_output = cross_attention_outputs[0]
548
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
549
+
550
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
551
+ cross_attn_present_key_value = cross_attention_outputs[-1]
552
+ present_key_value = present_key_value + cross_attn_present_key_value
553
+
554
+ intermediate_output = self.intermediate(hidden_states=attention_output)
555
+ layer_output = self.bert_output(
556
+ hidden_states=intermediate_output, input_tensor=attention_output, training=training
557
+ )
558
+ outputs = (layer_output,) + outputs # add attentions if we output them
559
+
560
+ # if decoder, return the attn key/values as the last output
561
+ if self.is_decoder:
562
+ outputs = outputs + (present_key_value,)
563
+
564
+ return outputs
565
+
566
+ def build(self, input_shape=None):
567
+ if self.built:
568
+ return
569
+ self.built = True
570
+ if getattr(self, "attention", None) is not None:
571
+ with tf.name_scope(self.attention.name):
572
+ self.attention.build(None)
573
+ if getattr(self, "intermediate", None) is not None:
574
+ with tf.name_scope(self.intermediate.name):
575
+ self.intermediate.build(None)
576
+ if getattr(self, "bert_output", None) is not None:
577
+ with tf.name_scope(self.bert_output.name):
578
+ self.bert_output.build(None)
579
+ if getattr(self, "crossattention", None) is not None:
580
+ with tf.name_scope(self.crossattention.name):
581
+ self.crossattention.build(None)
582
+
583
+
584
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertEncoder with Bert->Roberta
585
+ class TFRobertaEncoder(keras.layers.Layer):
586
+ def __init__(self, config: RobertaConfig, **kwargs):
587
+ super().__init__(**kwargs)
588
+ self.config = config
589
+ self.layer = [TFRobertaLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
590
+
591
+ def call(
592
+ self,
593
+ hidden_states: tf.Tensor,
594
+ attention_mask: tf.Tensor,
595
+ head_mask: tf.Tensor,
596
+ encoder_hidden_states: tf.Tensor | None,
597
+ encoder_attention_mask: tf.Tensor | None,
598
+ past_key_values: Tuple[Tuple[tf.Tensor]] | None,
599
+ use_cache: Optional[bool],
600
+ output_attentions: bool,
601
+ output_hidden_states: bool,
602
+ return_dict: bool,
603
+ training: bool = False,
604
+ ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]:
605
+ all_hidden_states = () if output_hidden_states else None
606
+ all_attentions = () if output_attentions else None
607
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
608
+
609
+ next_decoder_cache = () if use_cache else None
610
+ for i, layer_module in enumerate(self.layer):
611
+ if output_hidden_states:
612
+ all_hidden_states = all_hidden_states + (hidden_states,)
613
+
614
+ past_key_value = past_key_values[i] if past_key_values is not None else None
615
+
616
+ layer_outputs = layer_module(
617
+ hidden_states=hidden_states,
618
+ attention_mask=attention_mask,
619
+ head_mask=head_mask[i],
620
+ encoder_hidden_states=encoder_hidden_states,
621
+ encoder_attention_mask=encoder_attention_mask,
622
+ past_key_value=past_key_value,
623
+ output_attentions=output_attentions,
624
+ training=training,
625
+ )
626
+ hidden_states = layer_outputs[0]
627
+
628
+ if use_cache:
629
+ next_decoder_cache += (layer_outputs[-1],)
630
+
631
+ if output_attentions:
632
+ all_attentions = all_attentions + (layer_outputs[1],)
633
+ if self.config.add_cross_attention and encoder_hidden_states is not None:
634
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
635
+
636
+ # Add last layer
637
+ if output_hidden_states:
638
+ all_hidden_states = all_hidden_states + (hidden_states,)
639
+
640
+ if not return_dict:
641
+ return tuple(
642
+ v for v in [hidden_states, all_hidden_states, all_attentions, all_cross_attentions] if v is not None
643
+ )
644
+
645
+ return TFBaseModelOutputWithPastAndCrossAttentions(
646
+ last_hidden_state=hidden_states,
647
+ past_key_values=next_decoder_cache,
648
+ hidden_states=all_hidden_states,
649
+ attentions=all_attentions,
650
+ cross_attentions=all_cross_attentions,
651
+ )
652
+
653
+ def build(self, input_shape=None):
654
+ if self.built:
655
+ return
656
+ self.built = True
657
+ if getattr(self, "layer", None) is not None:
658
+ for layer in self.layer:
659
+ with tf.name_scope(layer.name):
660
+ layer.build(None)
661
+
662
+
663
+ @keras_serializable
664
+ class TFRobertaMainLayer(keras.layers.Layer):
665
+ config_class = RobertaConfig
666
+
667
+ def __init__(self, config, add_pooling_layer=True, **kwargs):
668
+ super().__init__(**kwargs)
669
+
670
+ self.config = config
671
+ self.is_decoder = config.is_decoder
672
+
673
+ self.num_hidden_layers = config.num_hidden_layers
674
+ self.initializer_range = config.initializer_range
675
+ self.output_attentions = config.output_attentions
676
+ self.output_hidden_states = config.output_hidden_states
677
+ self.return_dict = config.use_return_dict
678
+ self.encoder = TFRobertaEncoder(config, name="encoder")
679
+ self.pooler = TFRobertaPooler(config, name="pooler") if add_pooling_layer else None
680
+ # The embeddings must be the last declaration in order to follow the weights order
681
+ self.embeddings = TFRobertaEmbeddings(config, name="embeddings")
682
+
683
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer.get_input_embeddings
684
+ def get_input_embeddings(self) -> keras.layers.Layer:
685
+ return self.embeddings
686
+
687
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer.set_input_embeddings
688
+ def set_input_embeddings(self, value: tf.Variable):
689
+ self.embeddings.weight = value
690
+ self.embeddings.vocab_size = shape_list(value)[0]
691
+
692
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer._prune_heads
693
+ def _prune_heads(self, heads_to_prune):
694
+ """
695
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
696
+ class PreTrainedModel
697
+ """
698
+ raise NotImplementedError
699
+
700
+ @unpack_inputs
701
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer.call
702
+ def call(
703
+ self,
704
+ input_ids: TFModelInputType | None = None,
705
+ attention_mask: np.ndarray | tf.Tensor | None = None,
706
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
707
+ position_ids: np.ndarray | tf.Tensor | None = None,
708
+ head_mask: np.ndarray | tf.Tensor | None = None,
709
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
710
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
711
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
712
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
713
+ use_cache: Optional[bool] = None,
714
+ output_attentions: Optional[bool] = None,
715
+ output_hidden_states: Optional[bool] = None,
716
+ return_dict: Optional[bool] = None,
717
+ training: bool = False,
718
+ ) -> Union[TFBaseModelOutputWithPoolingAndCrossAttentions, Tuple[tf.Tensor]]:
719
+ if not self.config.is_decoder:
720
+ use_cache = False
721
+
722
+ if input_ids is not None and inputs_embeds is not None:
723
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
724
+ elif input_ids is not None:
725
+ input_shape = shape_list(input_ids)
726
+ elif inputs_embeds is not None:
727
+ input_shape = shape_list(inputs_embeds)[:-1]
728
+ else:
729
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
730
+
731
+ batch_size, seq_length = input_shape
732
+
733
+ if past_key_values is None:
734
+ past_key_values_length = 0
735
+ past_key_values = [None] * len(self.encoder.layer)
736
+ else:
737
+ past_key_values_length = shape_list(past_key_values[0][0])[-2]
738
+
739
+ if attention_mask is None:
740
+ attention_mask = tf.fill(dims=(batch_size, seq_length + past_key_values_length), value=1)
741
+
742
+ if token_type_ids is None:
743
+ token_type_ids = tf.fill(dims=input_shape, value=0)
744
+
745
+ embedding_output = self.embeddings(
746
+ input_ids=input_ids,
747
+ position_ids=position_ids,
748
+ token_type_ids=token_type_ids,
749
+ inputs_embeds=inputs_embeds,
750
+ past_key_values_length=past_key_values_length,
751
+ training=training,
752
+ )
753
+
754
+ # We create a 3D attention mask from a 2D tensor mask.
755
+ # Sizes are [batch_size, 1, 1, to_seq_length]
756
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
757
+ # this attention mask is more simple than the triangular masking of causal attention
758
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
759
+ attention_mask_shape = shape_list(attention_mask)
760
+
761
+ mask_seq_length = seq_length + past_key_values_length
762
+ # Copied from `modeling_tf_t5.py`
763
+ # Provided a padding mask of dimensions [batch_size, mask_seq_length]
764
+ # - if the model is a decoder, apply a causal mask in addition to the padding mask
765
+ # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length]
766
+ if self.is_decoder:
767
+ seq_ids = tf.range(mask_seq_length)
768
+ causal_mask = tf.less_equal(
769
+ tf.tile(seq_ids[None, None, :], (batch_size, mask_seq_length, 1)),
770
+ seq_ids[None, :, None],
771
+ )
772
+ causal_mask = tf.cast(causal_mask, dtype=attention_mask.dtype)
773
+ extended_attention_mask = causal_mask * attention_mask[:, None, :]
774
+ attention_mask_shape = shape_list(extended_attention_mask)
775
+ extended_attention_mask = tf.reshape(
776
+ extended_attention_mask, (attention_mask_shape[0], 1, attention_mask_shape[1], attention_mask_shape[2])
777
+ )
778
+ if past_key_values[0] is not None:
779
+ # attention_mask needs to be sliced to the shape `[batch_size, 1, from_seq_length - cached_seq_length, to_seq_length]
780
+ extended_attention_mask = extended_attention_mask[:, :, -seq_length:, :]
781
+ else:
782
+ extended_attention_mask = tf.reshape(
783
+ attention_mask, (attention_mask_shape[0], 1, 1, attention_mask_shape[1])
784
+ )
785
+
786
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
787
+ # masked positions, this operation will create a tensor which is 0.0 for
788
+ # positions we want to attend and -10000.0 for masked positions.
789
+ # Since we are adding it to the raw scores before the softmax, this is
790
+ # effectively the same as removing these entirely.
791
+ extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype)
792
+ one_cst = tf.constant(1.0, dtype=embedding_output.dtype)
793
+ ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype)
794
+ extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst)
795
+
796
+ # Copied from `modeling_tf_t5.py` with -1e9 -> -10000
797
+ if self.is_decoder and encoder_attention_mask is not None:
798
+ # If a 2D ou 3D attention mask is provided for the cross-attention
799
+ # we need to make broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length]
800
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
801
+ encoder_attention_mask = tf.cast(encoder_attention_mask, dtype=extended_attention_mask.dtype)
802
+ num_dims_encoder_attention_mask = len(shape_list(encoder_attention_mask))
803
+ if num_dims_encoder_attention_mask == 3:
804
+ encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
805
+ if num_dims_encoder_attention_mask == 2:
806
+ encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
807
+
808
+ # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
809
+ # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow/transformer/transformer_layers.py#L270
810
+ # encoder_extended_attention_mask = tf.math.equal(encoder_extended_attention_mask,
811
+ # tf.transpose(encoder_extended_attention_mask, perm=(-1, -2)))
812
+
813
+ encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0
814
+ else:
815
+ encoder_extended_attention_mask = None
816
+
817
+ # Prepare head mask if needed
818
+ # 1.0 in head_mask indicate we keep the head
819
+ # attention_probs has shape bsz x n_heads x N x N
820
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
821
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
822
+ if head_mask is not None:
823
+ raise NotImplementedError
824
+ else:
825
+ head_mask = [None] * self.config.num_hidden_layers
826
+
827
+ encoder_outputs = self.encoder(
828
+ hidden_states=embedding_output,
829
+ attention_mask=extended_attention_mask,
830
+ head_mask=head_mask,
831
+ encoder_hidden_states=encoder_hidden_states,
832
+ encoder_attention_mask=encoder_extended_attention_mask,
833
+ past_key_values=past_key_values,
834
+ use_cache=use_cache,
835
+ output_attentions=output_attentions,
836
+ output_hidden_states=output_hidden_states,
837
+ return_dict=return_dict,
838
+ training=training,
839
+ )
840
+
841
+ sequence_output = encoder_outputs[0]
842
+ pooled_output = self.pooler(hidden_states=sequence_output) if self.pooler is not None else None
843
+
844
+ if not return_dict:
845
+ return (
846
+ sequence_output,
847
+ pooled_output,
848
+ ) + encoder_outputs[1:]
849
+
850
+ return TFBaseModelOutputWithPoolingAndCrossAttentions(
851
+ last_hidden_state=sequence_output,
852
+ pooler_output=pooled_output,
853
+ past_key_values=encoder_outputs.past_key_values,
854
+ hidden_states=encoder_outputs.hidden_states,
855
+ attentions=encoder_outputs.attentions,
856
+ cross_attentions=encoder_outputs.cross_attentions,
857
+ )
858
+
859
+ def build(self, input_shape=None):
860
+ if self.built:
861
+ return
862
+ self.built = True
863
+ if getattr(self, "encoder", None) is not None:
864
+ with tf.name_scope(self.encoder.name):
865
+ self.encoder.build(None)
866
+ if getattr(self, "pooler", None) is not None:
867
+ with tf.name_scope(self.pooler.name):
868
+ self.pooler.build(None)
869
+ if getattr(self, "embeddings", None) is not None:
870
+ with tf.name_scope(self.embeddings.name):
871
+ self.embeddings.build(None)
872
+
873
+
874
+ class TFRobertaPreTrainedModel(TFPreTrainedModel):
875
+ """
876
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
877
+ models.
878
+ """
879
+
880
+ config_class = RobertaConfig
881
+ base_model_prefix = "roberta"
882
+
883
+
884
+ ROBERTA_START_DOCSTRING = r"""
885
+
886
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
887
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
888
+ etc.)
889
+
890
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
891
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
892
+ behavior.
893
+
894
+ <Tip>
895
+
896
+ TensorFlow models and layers in `transformers` accept two formats as input:
897
+
898
+ - having all inputs as keyword arguments (like PyTorch models), or
899
+ - having all inputs as a list, tuple or dict in the first positional argument.
900
+
901
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
902
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
903
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
904
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
905
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
906
+ positional argument:
907
+
908
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
909
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
910
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
911
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
912
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
913
+
914
+ Note that when creating models and layers with
915
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
916
+ about any of this, as you can just pass inputs like you would to any other Python function!
917
+
918
+ </Tip>
919
+
920
+ Parameters:
921
+ config ([`RobertaConfig`]): Model configuration class with all the parameters of the
922
+ model. Initializing with a config file does not load the weights associated with the model, only the
923
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
924
+ """
925
+
926
+ ROBERTA_INPUTS_DOCSTRING = r"""
927
+ Args:
928
+ input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`):
929
+ Indices of input sequence tokens in the vocabulary.
930
+
931
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
932
+ [`PreTrainedTokenizer.encode`] for details.
933
+
934
+ [What are input IDs?](../glossary#input-ids)
935
+ attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
936
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
937
+
938
+ - 1 for tokens that are **not masked**,
939
+ - 0 for tokens that are **masked**.
940
+
941
+ [What are attention masks?](../glossary#attention-mask)
942
+ token_type_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
943
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
944
+ 1]`:
945
+
946
+ - 0 corresponds to a *sentence A* token,
947
+ - 1 corresponds to a *sentence B* token.
948
+
949
+ [What are token type IDs?](../glossary#token-type-ids)
950
+ position_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
951
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
952
+ config.max_position_embeddings - 1]`.
953
+
954
+ [What are position IDs?](../glossary#position-ids)
955
+ head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
956
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
957
+
958
+ - 1 indicates the head is **not masked**,
959
+ - 0 indicates the head is **masked**.
960
+
961
+ inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
962
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
963
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
964
+ model's internal embedding lookup matrix.
965
+ output_attentions (`bool`, *optional*):
966
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
967
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
968
+ config will be used instead.
969
+ output_hidden_states (`bool`, *optional*):
970
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
971
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
972
+ used instead.
973
+ return_dict (`bool`, *optional*):
974
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
975
+ eager mode, in graph mode the value will always be set to True.
976
+ training (`bool`, *optional*, defaults to `False`):
977
+ Whether or not to use the model in training mode (some modules like dropout modules have different
978
+ behaviors between training and evaluation).
979
+ """
980
+
981
+
982
+ @add_start_docstrings(
983
+ "The bare RoBERTa Model transformer outputting raw hidden-states without any specific head on top.",
984
+ ROBERTA_START_DOCSTRING,
985
+ )
986
+ class TFRobertaModel(TFRobertaPreTrainedModel):
987
+ def __init__(self, config, *inputs, **kwargs):
988
+ super().__init__(config, *inputs, **kwargs)
989
+ self.roberta = TFRobertaMainLayer(config, name="roberta")
990
+
991
+ @unpack_inputs
992
+ @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
993
+ @add_code_sample_docstrings(
994
+ checkpoint=_CHECKPOINT_FOR_DOC,
995
+ output_type=TFBaseModelOutputWithPoolingAndCrossAttentions,
996
+ config_class=_CONFIG_FOR_DOC,
997
+ )
998
+ def call(
999
+ self,
1000
+ input_ids: TFModelInputType | None = None,
1001
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1002
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1003
+ position_ids: np.ndarray | tf.Tensor | None = None,
1004
+ head_mask: np.ndarray | tf.Tensor | None = None,
1005
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1006
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
1007
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
1008
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
1009
+ use_cache: Optional[bool] = None,
1010
+ output_attentions: Optional[bool] = None,
1011
+ output_hidden_states: Optional[bool] = None,
1012
+ return_dict: Optional[bool] = None,
1013
+ training: Optional[bool] = False,
1014
+ ) -> Union[Tuple, TFBaseModelOutputWithPoolingAndCrossAttentions]:
1015
+ r"""
1016
+ encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1017
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
1018
+ the model is configured as a decoder.
1019
+ encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1020
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
1021
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1022
+
1023
+ - 1 for tokens that are **not masked**,
1024
+ - 0 for tokens that are **masked**.
1025
+
1026
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
1027
+ contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1028
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1029
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1030
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1031
+ use_cache (`bool`, *optional*, defaults to `True`):
1032
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1033
+ `past_key_values`). Set to `False` during training, `True` during generation
1034
+ """
1035
+ outputs = self.roberta(
1036
+ input_ids=input_ids,
1037
+ attention_mask=attention_mask,
1038
+ token_type_ids=token_type_ids,
1039
+ position_ids=position_ids,
1040
+ head_mask=head_mask,
1041
+ inputs_embeds=inputs_embeds,
1042
+ encoder_hidden_states=encoder_hidden_states,
1043
+ encoder_attention_mask=encoder_attention_mask,
1044
+ past_key_values=past_key_values,
1045
+ use_cache=use_cache,
1046
+ output_attentions=output_attentions,
1047
+ output_hidden_states=output_hidden_states,
1048
+ return_dict=return_dict,
1049
+ training=training,
1050
+ )
1051
+
1052
+ return outputs
1053
+
1054
+ def build(self, input_shape=None):
1055
+ if self.built:
1056
+ return
1057
+ self.built = True
1058
+ if getattr(self, "roberta", None) is not None:
1059
+ with tf.name_scope(self.roberta.name):
1060
+ self.roberta.build(None)
1061
+
1062
+
1063
+ class TFRobertaLMHead(keras.layers.Layer):
1064
+ """Roberta Head for masked language modeling."""
1065
+
1066
+ def __init__(self, config, input_embeddings, **kwargs):
1067
+ super().__init__(**kwargs)
1068
+
1069
+ self.config = config
1070
+ self.hidden_size = config.hidden_size
1071
+ self.dense = keras.layers.Dense(
1072
+ config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
1073
+ )
1074
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
1075
+ self.act = get_tf_activation("gelu")
1076
+
1077
+ # The output weights are the same as the input embeddings, but there is
1078
+ # an output-only bias for each token.
1079
+ self.decoder = input_embeddings
1080
+
1081
+ def build(self, input_shape=None):
1082
+ self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
1083
+
1084
+ if self.built:
1085
+ return
1086
+ self.built = True
1087
+ if getattr(self, "dense", None) is not None:
1088
+ with tf.name_scope(self.dense.name):
1089
+ self.dense.build([None, None, self.config.hidden_size])
1090
+ if getattr(self, "layer_norm", None) is not None:
1091
+ with tf.name_scope(self.layer_norm.name):
1092
+ self.layer_norm.build([None, None, self.config.hidden_size])
1093
+
1094
+ def get_output_embeddings(self):
1095
+ return self.decoder
1096
+
1097
+ def set_output_embeddings(self, value):
1098
+ self.decoder.weight = value
1099
+ self.decoder.vocab_size = shape_list(value)[0]
1100
+
1101
+ def get_bias(self):
1102
+ return {"bias": self.bias}
1103
+
1104
+ def set_bias(self, value):
1105
+ self.bias = value["bias"]
1106
+ self.config.vocab_size = shape_list(value["bias"])[0]
1107
+
1108
+ def call(self, hidden_states):
1109
+ hidden_states = self.dense(hidden_states)
1110
+ hidden_states = self.act(hidden_states)
1111
+ hidden_states = self.layer_norm(hidden_states)
1112
+
1113
+ # project back to size of vocabulary with bias
1114
+ seq_length = shape_list(tensor=hidden_states)[1]
1115
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size])
1116
+ hidden_states = tf.matmul(a=hidden_states, b=self.decoder.weight, transpose_b=True)
1117
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
1118
+ hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
1119
+
1120
+ return hidden_states
1121
+
1122
+
1123
+ @add_start_docstrings("""RoBERTa Model with a `language modeling` head on top.""", ROBERTA_START_DOCSTRING)
1124
+ class TFRobertaForMaskedLM(TFRobertaPreTrainedModel, TFMaskedLanguageModelingLoss):
1125
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1126
+ _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head.decoder.weight"]
1127
+
1128
+ def __init__(self, config, *inputs, **kwargs):
1129
+ super().__init__(config, *inputs, **kwargs)
1130
+
1131
+ self.roberta = TFRobertaMainLayer(config, add_pooling_layer=False, name="roberta")
1132
+ self.lm_head = TFRobertaLMHead(config, self.roberta.embeddings, name="lm_head")
1133
+
1134
+ def get_lm_head(self):
1135
+ return self.lm_head
1136
+
1137
+ def get_prefix_bias_name(self):
1138
+ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
1139
+ return self.name + "/" + self.lm_head.name
1140
+
1141
+ @unpack_inputs
1142
+ @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1143
+ @add_code_sample_docstrings(
1144
+ checkpoint=_CHECKPOINT_FOR_DOC,
1145
+ output_type=TFMaskedLMOutput,
1146
+ config_class=_CONFIG_FOR_DOC,
1147
+ mask="<mask>",
1148
+ expected_output="' Paris'",
1149
+ expected_loss=0.1,
1150
+ )
1151
+ def call(
1152
+ self,
1153
+ input_ids: TFModelInputType | None = None,
1154
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1155
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1156
+ position_ids: np.ndarray | tf.Tensor | None = None,
1157
+ head_mask: np.ndarray | tf.Tensor | None = None,
1158
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1159
+ output_attentions: Optional[bool] = None,
1160
+ output_hidden_states: Optional[bool] = None,
1161
+ return_dict: Optional[bool] = None,
1162
+ labels: np.ndarray | tf.Tensor | None = None,
1163
+ training: Optional[bool] = False,
1164
+ ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
1165
+ r"""
1166
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1167
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1168
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1169
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1170
+ """
1171
+ outputs = self.roberta(
1172
+ input_ids,
1173
+ attention_mask=attention_mask,
1174
+ token_type_ids=token_type_ids,
1175
+ position_ids=position_ids,
1176
+ head_mask=head_mask,
1177
+ inputs_embeds=inputs_embeds,
1178
+ output_attentions=output_attentions,
1179
+ output_hidden_states=output_hidden_states,
1180
+ return_dict=return_dict,
1181
+ training=training,
1182
+ )
1183
+
1184
+ sequence_output = outputs[0]
1185
+ prediction_scores = self.lm_head(sequence_output)
1186
+
1187
+ loss = None if labels is None else self.hf_compute_loss(labels, prediction_scores)
1188
+
1189
+ if not return_dict:
1190
+ output = (prediction_scores,) + outputs[2:]
1191
+ return ((loss,) + output) if loss is not None else output
1192
+
1193
+ return TFMaskedLMOutput(
1194
+ loss=loss,
1195
+ logits=prediction_scores,
1196
+ hidden_states=outputs.hidden_states,
1197
+ attentions=outputs.attentions,
1198
+ )
1199
+
1200
+ def build(self, input_shape=None):
1201
+ if self.built:
1202
+ return
1203
+ self.built = True
1204
+ if getattr(self, "roberta", None) is not None:
1205
+ with tf.name_scope(self.roberta.name):
1206
+ self.roberta.build(None)
1207
+ if getattr(self, "lm_head", None) is not None:
1208
+ with tf.name_scope(self.lm_head.name):
1209
+ self.lm_head.build(None)
1210
+
1211
+
1212
+ class TFRobertaForCausalLM(TFRobertaPreTrainedModel, TFCausalLanguageModelingLoss):
1213
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1214
+ _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head.decoder.weight"]
1215
+
1216
+ def __init__(self, config: RobertaConfig, *inputs, **kwargs):
1217
+ super().__init__(config, *inputs, **kwargs)
1218
+
1219
+ if not config.is_decoder:
1220
+ logger.warning("If you want to use `TFRobertaLMHeadModel` as a standalone, add `is_decoder=True.`")
1221
+
1222
+ self.roberta = TFRobertaMainLayer(config, add_pooling_layer=False, name="roberta")
1223
+ self.lm_head = TFRobertaLMHead(config, input_embeddings=self.roberta.embeddings, name="lm_head")
1224
+
1225
+ def get_lm_head(self):
1226
+ return self.lm_head
1227
+
1228
+ def get_prefix_bias_name(self):
1229
+ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
1230
+ return self.name + "/" + self.lm_head.name
1231
+
1232
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertLMHeadModel.prepare_inputs_for_generation
1233
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs):
1234
+ input_shape = input_ids.shape
1235
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
1236
+ if attention_mask is None:
1237
+ attention_mask = tf.ones(input_shape)
1238
+
1239
+ # cut decoder_input_ids if past is used
1240
+ if past_key_values is not None:
1241
+ input_ids = input_ids[:, -1:]
1242
+
1243
+ return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values}
1244
+
1245
+ @unpack_inputs
1246
+ @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1247
+ @add_code_sample_docstrings(
1248
+ checkpoint=_CHECKPOINT_FOR_DOC,
1249
+ output_type=TFCausalLMOutputWithCrossAttentions,
1250
+ config_class=_CONFIG_FOR_DOC,
1251
+ )
1252
+ def call(
1253
+ self,
1254
+ input_ids: TFModelInputType | None = None,
1255
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1256
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1257
+ position_ids: np.ndarray | tf.Tensor | None = None,
1258
+ head_mask: np.ndarray | tf.Tensor | None = None,
1259
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1260
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
1261
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
1262
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
1263
+ use_cache: Optional[bool] = None,
1264
+ output_attentions: Optional[bool] = None,
1265
+ output_hidden_states: Optional[bool] = None,
1266
+ return_dict: Optional[bool] = None,
1267
+ labels: np.ndarray | tf.Tensor | None = None,
1268
+ training: Optional[bool] = False,
1269
+ ) -> Union[TFCausalLMOutputWithCrossAttentions, Tuple[tf.Tensor]]:
1270
+ r"""
1271
+ encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1272
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
1273
+ the model is configured as a decoder.
1274
+ encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1275
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
1276
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1277
+
1278
+ - 1 for tokens that are **not masked**,
1279
+ - 0 for tokens that are **masked**.
1280
+
1281
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
1282
+ contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1283
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1284
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1285
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1286
+ use_cache (`bool`, *optional*, defaults to `True`):
1287
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1288
+ `past_key_values`). Set to `False` during training, `True` during generation
1289
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
1290
+ Labels for computing the cross entropy classification loss. Indices should be in `[0, ...,
1291
+ config.vocab_size - 1]`.
1292
+ """
1293
+ outputs = self.roberta(
1294
+ input_ids=input_ids,
1295
+ attention_mask=attention_mask,
1296
+ token_type_ids=token_type_ids,
1297
+ position_ids=position_ids,
1298
+ head_mask=head_mask,
1299
+ inputs_embeds=inputs_embeds,
1300
+ encoder_hidden_states=encoder_hidden_states,
1301
+ encoder_attention_mask=encoder_attention_mask,
1302
+ past_key_values=past_key_values,
1303
+ use_cache=use_cache,
1304
+ output_attentions=output_attentions,
1305
+ output_hidden_states=output_hidden_states,
1306
+ return_dict=return_dict,
1307
+ training=training,
1308
+ )
1309
+
1310
+ sequence_output = outputs[0]
1311
+ logits = self.lm_head(hidden_states=sequence_output, training=training)
1312
+ loss = None
1313
+
1314
+ if labels is not None:
1315
+ # shift labels to the left and cut last logit token
1316
+ shifted_logits = logits[:, :-1]
1317
+ labels = labels[:, 1:]
1318
+ loss = self.hf_compute_loss(labels=labels, logits=shifted_logits)
1319
+
1320
+ if not return_dict:
1321
+ output = (logits,) + outputs[2:]
1322
+ return ((loss,) + output) if loss is not None else output
1323
+
1324
+ return TFCausalLMOutputWithCrossAttentions(
1325
+ loss=loss,
1326
+ logits=logits,
1327
+ past_key_values=outputs.past_key_values,
1328
+ hidden_states=outputs.hidden_states,
1329
+ attentions=outputs.attentions,
1330
+ cross_attentions=outputs.cross_attentions,
1331
+ )
1332
+
1333
+ def build(self, input_shape=None):
1334
+ if self.built:
1335
+ return
1336
+ self.built = True
1337
+ if getattr(self, "roberta", None) is not None:
1338
+ with tf.name_scope(self.roberta.name):
1339
+ self.roberta.build(None)
1340
+ if getattr(self, "lm_head", None) is not None:
1341
+ with tf.name_scope(self.lm_head.name):
1342
+ self.lm_head.build(None)
1343
+
1344
+
1345
+ class TFRobertaClassificationHead(keras.layers.Layer):
1346
+ """Head for sentence-level classification tasks."""
1347
+
1348
+ def __init__(self, config, **kwargs):
1349
+ super().__init__(**kwargs)
1350
+ self.dense = keras.layers.Dense(
1351
+ config.hidden_size,
1352
+ kernel_initializer=get_initializer(config.initializer_range),
1353
+ activation="tanh",
1354
+ name="dense",
1355
+ )
1356
+ classifier_dropout = (
1357
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1358
+ )
1359
+ self.dropout = keras.layers.Dropout(classifier_dropout)
1360
+ self.out_proj = keras.layers.Dense(
1361
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="out_proj"
1362
+ )
1363
+ self.config = config
1364
+
1365
+ def call(self, features, training=False):
1366
+ x = features[:, 0, :] # take <s> token (equiv. to [CLS])
1367
+ x = self.dropout(x, training=training)
1368
+ x = self.dense(x)
1369
+ x = self.dropout(x, training=training)
1370
+ x = self.out_proj(x)
1371
+ return x
1372
+
1373
+ def build(self, input_shape=None):
1374
+ if self.built:
1375
+ return
1376
+ self.built = True
1377
+ if getattr(self, "dense", None) is not None:
1378
+ with tf.name_scope(self.dense.name):
1379
+ self.dense.build([None, None, self.config.hidden_size])
1380
+ if getattr(self, "out_proj", None) is not None:
1381
+ with tf.name_scope(self.out_proj.name):
1382
+ self.out_proj.build([None, None, self.config.hidden_size])
1383
+
1384
+
1385
+ @add_start_docstrings(
1386
+ """
1387
+ RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the
1388
+ pooled output) e.g. for GLUE tasks.
1389
+ """,
1390
+ ROBERTA_START_DOCSTRING,
1391
+ )
1392
+ class TFRobertaForSequenceClassification(TFRobertaPreTrainedModel, TFSequenceClassificationLoss):
1393
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1394
+ _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head"]
1395
+
1396
+ def __init__(self, config, *inputs, **kwargs):
1397
+ super().__init__(config, *inputs, **kwargs)
1398
+ self.num_labels = config.num_labels
1399
+
1400
+ self.roberta = TFRobertaMainLayer(config, add_pooling_layer=False, name="roberta")
1401
+ self.classifier = TFRobertaClassificationHead(config, name="classifier")
1402
+
1403
+ @unpack_inputs
1404
+ @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1405
+ @add_code_sample_docstrings(
1406
+ checkpoint="cardiffnlp/twitter-roberta-base-emotion",
1407
+ output_type=TFSequenceClassifierOutput,
1408
+ config_class=_CONFIG_FOR_DOC,
1409
+ expected_output="'optimism'",
1410
+ expected_loss=0.08,
1411
+ )
1412
+ def call(
1413
+ self,
1414
+ input_ids: TFModelInputType | None = None,
1415
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1416
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1417
+ position_ids: np.ndarray | tf.Tensor | None = None,
1418
+ head_mask: np.ndarray | tf.Tensor | None = None,
1419
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1420
+ output_attentions: Optional[bool] = None,
1421
+ output_hidden_states: Optional[bool] = None,
1422
+ return_dict: Optional[bool] = None,
1423
+ labels: np.ndarray | tf.Tensor | None = None,
1424
+ training: Optional[bool] = False,
1425
+ ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
1426
+ r"""
1427
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1428
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1429
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1430
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1431
+ """
1432
+ outputs = self.roberta(
1433
+ input_ids,
1434
+ attention_mask=attention_mask,
1435
+ token_type_ids=token_type_ids,
1436
+ position_ids=position_ids,
1437
+ head_mask=head_mask,
1438
+ inputs_embeds=inputs_embeds,
1439
+ output_attentions=output_attentions,
1440
+ output_hidden_states=output_hidden_states,
1441
+ return_dict=return_dict,
1442
+ training=training,
1443
+ )
1444
+ sequence_output = outputs[0]
1445
+ logits = self.classifier(sequence_output, training=training)
1446
+
1447
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
1448
+
1449
+ if not return_dict:
1450
+ output = (logits,) + outputs[2:]
1451
+ return ((loss,) + output) if loss is not None else output
1452
+
1453
+ return TFSequenceClassifierOutput(
1454
+ loss=loss,
1455
+ logits=logits,
1456
+ hidden_states=outputs.hidden_states,
1457
+ attentions=outputs.attentions,
1458
+ )
1459
+
1460
+ def build(self, input_shape=None):
1461
+ if self.built:
1462
+ return
1463
+ self.built = True
1464
+ if getattr(self, "roberta", None) is not None:
1465
+ with tf.name_scope(self.roberta.name):
1466
+ self.roberta.build(None)
1467
+ if getattr(self, "classifier", None) is not None:
1468
+ with tf.name_scope(self.classifier.name):
1469
+ self.classifier.build(None)
1470
+
1471
+
1472
+ @add_start_docstrings(
1473
+ """
1474
+ Roberta Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
1475
+ softmax) e.g. for RocStories/SWAG tasks.
1476
+ """,
1477
+ ROBERTA_START_DOCSTRING,
1478
+ )
1479
+ class TFRobertaForMultipleChoice(TFRobertaPreTrainedModel, TFMultipleChoiceLoss):
1480
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1481
+ _keys_to_ignore_on_load_unexpected = [r"lm_head"]
1482
+ _keys_to_ignore_on_load_missing = [r"dropout"]
1483
+
1484
+ def __init__(self, config, *inputs, **kwargs):
1485
+ super().__init__(config, *inputs, **kwargs)
1486
+
1487
+ self.roberta = TFRobertaMainLayer(config, name="roberta")
1488
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
1489
+ self.classifier = keras.layers.Dense(
1490
+ 1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
1491
+ )
1492
+ self.config = config
1493
+
1494
+ @unpack_inputs
1495
+ @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
1496
+ @add_code_sample_docstrings(
1497
+ checkpoint=_CHECKPOINT_FOR_DOC,
1498
+ output_type=TFMultipleChoiceModelOutput,
1499
+ config_class=_CONFIG_FOR_DOC,
1500
+ )
1501
+ def call(
1502
+ self,
1503
+ input_ids: TFModelInputType | None = None,
1504
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1505
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1506
+ position_ids: np.ndarray | tf.Tensor | None = None,
1507
+ head_mask: np.ndarray | tf.Tensor | None = None,
1508
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1509
+ output_attentions: Optional[bool] = None,
1510
+ output_hidden_states: Optional[bool] = None,
1511
+ return_dict: Optional[bool] = None,
1512
+ labels: np.ndarray | tf.Tensor | None = None,
1513
+ training: Optional[bool] = False,
1514
+ ) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]:
1515
+ r"""
1516
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1517
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
1518
+ where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)
1519
+ """
1520
+
1521
+ if input_ids is not None:
1522
+ num_choices = shape_list(input_ids)[1]
1523
+ seq_length = shape_list(input_ids)[2]
1524
+ else:
1525
+ num_choices = shape_list(inputs_embeds)[1]
1526
+ seq_length = shape_list(inputs_embeds)[2]
1527
+
1528
+ flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None
1529
+ flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None
1530
+ flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None
1531
+ flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None
1532
+ outputs = self.roberta(
1533
+ flat_input_ids,
1534
+ flat_attention_mask,
1535
+ flat_token_type_ids,
1536
+ flat_position_ids,
1537
+ head_mask,
1538
+ inputs_embeds,
1539
+ output_attentions,
1540
+ output_hidden_states,
1541
+ return_dict=return_dict,
1542
+ training=training,
1543
+ )
1544
+ pooled_output = outputs[1]
1545
+ pooled_output = self.dropout(pooled_output, training=training)
1546
+ logits = self.classifier(pooled_output)
1547
+ reshaped_logits = tf.reshape(logits, (-1, num_choices))
1548
+
1549
+ loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits)
1550
+
1551
+ if not return_dict:
1552
+ output = (reshaped_logits,) + outputs[2:]
1553
+ return ((loss,) + output) if loss is not None else output
1554
+
1555
+ return TFMultipleChoiceModelOutput(
1556
+ loss=loss,
1557
+ logits=reshaped_logits,
1558
+ hidden_states=outputs.hidden_states,
1559
+ attentions=outputs.attentions,
1560
+ )
1561
+
1562
+ def build(self, input_shape=None):
1563
+ if self.built:
1564
+ return
1565
+ self.built = True
1566
+ if getattr(self, "roberta", None) is not None:
1567
+ with tf.name_scope(self.roberta.name):
1568
+ self.roberta.build(None)
1569
+ if getattr(self, "classifier", None) is not None:
1570
+ with tf.name_scope(self.classifier.name):
1571
+ self.classifier.build([None, None, self.config.hidden_size])
1572
+
1573
+
1574
+ @add_start_docstrings(
1575
+ """
1576
+ RoBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1577
+ Named-Entity-Recognition (NER) tasks.
1578
+ """,
1579
+ ROBERTA_START_DOCSTRING,
1580
+ )
1581
+ class TFRobertaForTokenClassification(TFRobertaPreTrainedModel, TFTokenClassificationLoss):
1582
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1583
+ _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head"]
1584
+ _keys_to_ignore_on_load_missing = [r"dropout"]
1585
+
1586
+ def __init__(self, config, *inputs, **kwargs):
1587
+ super().__init__(config, *inputs, **kwargs)
1588
+ self.num_labels = config.num_labels
1589
+
1590
+ self.roberta = TFRobertaMainLayer(config, add_pooling_layer=False, name="roberta")
1591
+ classifier_dropout = (
1592
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1593
+ )
1594
+ self.dropout = keras.layers.Dropout(classifier_dropout)
1595
+ self.classifier = keras.layers.Dense(
1596
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
1597
+ )
1598
+ self.config = config
1599
+
1600
+ @unpack_inputs
1601
+ @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1602
+ @add_code_sample_docstrings(
1603
+ checkpoint="ydshieh/roberta-large-ner-english",
1604
+ output_type=TFTokenClassifierOutput,
1605
+ config_class=_CONFIG_FOR_DOC,
1606
+ expected_output="['O', 'ORG', 'ORG', 'O', 'O', 'O', 'O', 'O', 'LOC', 'O', 'LOC', 'LOC']",
1607
+ expected_loss=0.01,
1608
+ )
1609
+ def call(
1610
+ self,
1611
+ input_ids: TFModelInputType | None = None,
1612
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1613
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1614
+ position_ids: np.ndarray | tf.Tensor | None = None,
1615
+ head_mask: np.ndarray | tf.Tensor | None = None,
1616
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1617
+ output_attentions: Optional[bool] = None,
1618
+ output_hidden_states: Optional[bool] = None,
1619
+ return_dict: Optional[bool] = None,
1620
+ labels: np.ndarray | tf.Tensor | None = None,
1621
+ training: Optional[bool] = False,
1622
+ ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
1623
+ r"""
1624
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1625
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1626
+ """
1627
+ outputs = self.roberta(
1628
+ input_ids,
1629
+ attention_mask=attention_mask,
1630
+ token_type_ids=token_type_ids,
1631
+ position_ids=position_ids,
1632
+ head_mask=head_mask,
1633
+ inputs_embeds=inputs_embeds,
1634
+ output_attentions=output_attentions,
1635
+ output_hidden_states=output_hidden_states,
1636
+ return_dict=return_dict,
1637
+ training=training,
1638
+ )
1639
+ sequence_output = outputs[0]
1640
+
1641
+ sequence_output = self.dropout(sequence_output, training=training)
1642
+ logits = self.classifier(sequence_output)
1643
+
1644
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
1645
+
1646
+ if not return_dict:
1647
+ output = (logits,) + outputs[2:]
1648
+ return ((loss,) + output) if loss is not None else output
1649
+
1650
+ return TFTokenClassifierOutput(
1651
+ loss=loss,
1652
+ logits=logits,
1653
+ hidden_states=outputs.hidden_states,
1654
+ attentions=outputs.attentions,
1655
+ )
1656
+
1657
+ def build(self, input_shape=None):
1658
+ if self.built:
1659
+ return
1660
+ self.built = True
1661
+ if getattr(self, "roberta", None) is not None:
1662
+ with tf.name_scope(self.roberta.name):
1663
+ self.roberta.build(None)
1664
+ if getattr(self, "classifier", None) is not None:
1665
+ with tf.name_scope(self.classifier.name):
1666
+ self.classifier.build([None, None, self.config.hidden_size])
1667
+
1668
+
1669
+ @add_start_docstrings(
1670
+ """
1671
+ RoBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1672
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1673
+ """,
1674
+ ROBERTA_START_DOCSTRING,
1675
+ )
1676
+ class TFRobertaForQuestionAnswering(TFRobertaPreTrainedModel, TFQuestionAnsweringLoss):
1677
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1678
+ _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head"]
1679
+
1680
+ def __init__(self, config, *inputs, **kwargs):
1681
+ super().__init__(config, *inputs, **kwargs)
1682
+ self.num_labels = config.num_labels
1683
+
1684
+ self.roberta = TFRobertaMainLayer(config, add_pooling_layer=False, name="roberta")
1685
+ self.qa_outputs = keras.layers.Dense(
1686
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
1687
+ )
1688
+ self.config = config
1689
+
1690
+ @unpack_inputs
1691
+ @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1692
+ @add_code_sample_docstrings(
1693
+ checkpoint="ydshieh/roberta-base-squad2",
1694
+ output_type=TFQuestionAnsweringModelOutput,
1695
+ config_class=_CONFIG_FOR_DOC,
1696
+ expected_output="' puppet'",
1697
+ expected_loss=0.86,
1698
+ )
1699
+ def call(
1700
+ self,
1701
+ input_ids: TFModelInputType | None = None,
1702
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1703
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1704
+ position_ids: np.ndarray | tf.Tensor | None = None,
1705
+ head_mask: np.ndarray | tf.Tensor | None = None,
1706
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1707
+ output_attentions: Optional[bool] = None,
1708
+ output_hidden_states: Optional[bool] = None,
1709
+ return_dict: Optional[bool] = None,
1710
+ start_positions: np.ndarray | tf.Tensor | None = None,
1711
+ end_positions: np.ndarray | tf.Tensor | None = None,
1712
+ training: Optional[bool] = False,
1713
+ ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
1714
+ r"""
1715
+ start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1716
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1717
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1718
+ are not taken into account for computing the loss.
1719
+ end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1720
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1721
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1722
+ are not taken into account for computing the loss.
1723
+ """
1724
+ outputs = self.roberta(
1725
+ input_ids,
1726
+ attention_mask=attention_mask,
1727
+ token_type_ids=token_type_ids,
1728
+ position_ids=position_ids,
1729
+ head_mask=head_mask,
1730
+ inputs_embeds=inputs_embeds,
1731
+ output_attentions=output_attentions,
1732
+ output_hidden_states=output_hidden_states,
1733
+ return_dict=return_dict,
1734
+ training=training,
1735
+ )
1736
+ sequence_output = outputs[0]
1737
+
1738
+ logits = self.qa_outputs(sequence_output)
1739
+ start_logits, end_logits = tf.split(logits, 2, axis=-1)
1740
+ start_logits = tf.squeeze(start_logits, axis=-1)
1741
+ end_logits = tf.squeeze(end_logits, axis=-1)
1742
+
1743
+ loss = None
1744
+ if start_positions is not None and end_positions is not None:
1745
+ labels = {"start_position": start_positions}
1746
+ labels["end_position"] = end_positions
1747
+ loss = self.hf_compute_loss(labels, (start_logits, end_logits))
1748
+
1749
+ if not return_dict:
1750
+ output = (start_logits, end_logits) + outputs[2:]
1751
+ return ((loss,) + output) if loss is not None else output
1752
+
1753
+ return TFQuestionAnsweringModelOutput(
1754
+ loss=loss,
1755
+ start_logits=start_logits,
1756
+ end_logits=end_logits,
1757
+ hidden_states=outputs.hidden_states,
1758
+ attentions=outputs.attentions,
1759
+ )
1760
+
1761
+ def build(self, input_shape=None):
1762
+ if self.built:
1763
+ return
1764
+ self.built = True
1765
+ if getattr(self, "roberta", None) is not None:
1766
+ with tf.name_scope(self.roberta.name):
1767
+ self.roberta.build(None)
1768
+ if getattr(self, "qa_outputs", None) is not None:
1769
+ with tf.name_scope(self.qa_outputs.name):
1770
+ self.qa_outputs.build([None, None, self.config.hidden_size])
1771
+
1772
+
1773
+ __all__ = [
1774
+ "TFRobertaForCausalLM",
1775
+ "TFRobertaForMaskedLM",
1776
+ "TFRobertaForMultipleChoice",
1777
+ "TFRobertaForQuestionAnswering",
1778
+ "TFRobertaForSequenceClassification",
1779
+ "TFRobertaForTokenClassification",
1780
+ "TFRobertaMainLayer",
1781
+ "TFRobertaModel",
1782
+ "TFRobertaPreTrainedModel",
1783
+ ]
docs/transformers/build/lib/transformers/models/roberta/tokenization_roberta_fast.py ADDED
@@ -0,0 +1,264 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Fast Tokenization classes for RoBERTa."""
16
+
17
+ import json
18
+ from typing import List, Optional, Tuple
19
+
20
+ from tokenizers import processors
21
+
22
+ from ...tokenization_utils_base import AddedToken, BatchEncoding
23
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
24
+ from ...utils import logging
25
+ from .tokenization_roberta import RobertaTokenizer
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
31
+
32
+
33
+ class RobertaTokenizerFast(PreTrainedTokenizerFast):
34
+ """
35
+ Construct a "fast" RoBERTa tokenizer (backed by HuggingFace's *tokenizers* library), derived from the GPT-2
36
+ tokenizer, using byte-level Byte-Pair-Encoding.
37
+
38
+ This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
39
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
40
+
41
+ ```python
42
+ >>> from transformers import RobertaTokenizerFast
43
+
44
+ >>> tokenizer = RobertaTokenizerFast.from_pretrained("FacebookAI/roberta-base")
45
+ >>> tokenizer("Hello world")["input_ids"]
46
+ [0, 31414, 232, 2]
47
+
48
+ >>> tokenizer(" Hello world")["input_ids"]
49
+ [0, 20920, 232, 2]
50
+ ```
51
+
52
+ You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
53
+ call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
54
+
55
+ <Tip>
56
+
57
+ When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`.
58
+
59
+ </Tip>
60
+
61
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
62
+ refer to this superclass for more information regarding those methods.
63
+
64
+ Args:
65
+ vocab_file (`str`):
66
+ Path to the vocabulary file.
67
+ merges_file (`str`):
68
+ Path to the merges file.
69
+ errors (`str`, *optional*, defaults to `"replace"`):
70
+ Paradigm to follow when decoding bytes to UTF-8. See
71
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
72
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
73
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
74
+
75
+ <Tip>
76
+
77
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
78
+ sequence. The token used is the `cls_token`.
79
+
80
+ </Tip>
81
+
82
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
83
+ The end of sequence token.
84
+
85
+ <Tip>
86
+
87
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
88
+ The token used is the `sep_token`.
89
+
90
+ </Tip>
91
+
92
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
93
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
94
+ sequence classification or for a text and a question for question answering. It is also used as the last
95
+ token of a sequence built with special tokens.
96
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
97
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
98
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
99
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
100
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
101
+ token instead.
102
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
103
+ The token used for padding, for example when batching sequences of different lengths.
104
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
105
+ The token used for masking values. This is the token used when training this model with masked language
106
+ modeling. This is the token which the model will try to predict.
107
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
108
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
109
+ other word. (RoBERTa tokenizer detect beginning of words by the preceding space).
110
+ trim_offsets (`bool`, *optional*, defaults to `True`):
111
+ Whether the post processing step should trim offsets to avoid including whitespaces.
112
+ """
113
+
114
+ vocab_files_names = VOCAB_FILES_NAMES
115
+ model_input_names = ["input_ids", "attention_mask"]
116
+ slow_tokenizer_class = RobertaTokenizer
117
+
118
+ def __init__(
119
+ self,
120
+ vocab_file=None,
121
+ merges_file=None,
122
+ tokenizer_file=None,
123
+ errors="replace",
124
+ bos_token="<s>",
125
+ eos_token="</s>",
126
+ sep_token="</s>",
127
+ cls_token="<s>",
128
+ unk_token="<unk>",
129
+ pad_token="<pad>",
130
+ mask_token="<mask>",
131
+ add_prefix_space=False,
132
+ trim_offsets=True,
133
+ **kwargs,
134
+ ):
135
+ mask_token = (
136
+ AddedToken(mask_token, lstrip=True, rstrip=False, normalized=False)
137
+ if isinstance(mask_token, str)
138
+ else mask_token
139
+ )
140
+ super().__init__(
141
+ vocab_file,
142
+ merges_file,
143
+ tokenizer_file=tokenizer_file,
144
+ errors=errors,
145
+ bos_token=bos_token,
146
+ eos_token=eos_token,
147
+ sep_token=sep_token,
148
+ cls_token=cls_token,
149
+ unk_token=unk_token,
150
+ pad_token=pad_token,
151
+ mask_token=mask_token,
152
+ add_prefix_space=add_prefix_space,
153
+ trim_offsets=trim_offsets,
154
+ **kwargs,
155
+ )
156
+
157
+ tokenizer_component = "post_processor"
158
+ tokenizer_component_instance = getattr(self.backend_tokenizer, tokenizer_component, None)
159
+ if tokenizer_component_instance:
160
+ state = json.loads(tokenizer_component_instance.__getstate__())
161
+
162
+ # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
163
+ if "sep" in state:
164
+ state["sep"] = tuple(state["sep"])
165
+ if "cls" in state:
166
+ state["cls"] = tuple(state["cls"])
167
+
168
+ changes_to_apply = False
169
+
170
+ if state.get("add_prefix_space", add_prefix_space) != add_prefix_space:
171
+ state["add_prefix_space"] = add_prefix_space
172
+ changes_to_apply = True
173
+
174
+ if state.get("trim_offsets", trim_offsets) != trim_offsets:
175
+ state["trim_offsets"] = trim_offsets
176
+ changes_to_apply = True
177
+
178
+ if changes_to_apply:
179
+ component_class = getattr(processors, state.pop("type"))
180
+ new_value = component_class(**state)
181
+ setattr(self.backend_tokenizer, tokenizer_component, new_value)
182
+
183
+ @property
184
+ def mask_token(self) -> str:
185
+ """
186
+ `str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not
187
+ having been set.
188
+
189
+ Roberta tokenizer has a special mask token to be usable in the fill-mask pipeline. The mask token will greedily
190
+ comprise the space before the *<mask>*.
191
+ """
192
+ if self._mask_token is None:
193
+ if self.verbose:
194
+ logger.error("Using mask_token, but it is not set yet.")
195
+ return None
196
+ return str(self._mask_token)
197
+
198
+ @mask_token.setter
199
+ def mask_token(self, value):
200
+ """
201
+ Overriding the default behavior of the mask token to have it eat the space before it.
202
+
203
+ This is needed to preserve backward compatibility with all the previously used models based on Roberta.
204
+ """
205
+ # Mask token behave like a normal word, i.e. include the space before it
206
+ # So we set lstrip to True
207
+ value = AddedToken(value, lstrip=True, rstrip=False) if isinstance(value, str) else value
208
+ self._mask_token = value
209
+
210
+ def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding:
211
+ is_split_into_words = kwargs.get("is_split_into_words", False)
212
+ assert self.add_prefix_space or not is_split_into_words, (
213
+ f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
214
+ "to use it with pretokenized inputs."
215
+ )
216
+
217
+ return super()._batch_encode_plus(*args, **kwargs)
218
+
219
+ def _encode_plus(self, *args, **kwargs) -> BatchEncoding:
220
+ is_split_into_words = kwargs.get("is_split_into_words", False)
221
+
222
+ assert self.add_prefix_space or not is_split_into_words, (
223
+ f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
224
+ "to use it with pretokenized inputs."
225
+ )
226
+
227
+ return super()._encode_plus(*args, **kwargs)
228
+
229
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
230
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
231
+ return tuple(files)
232
+
233
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
234
+ output = [self.bos_token_id] + token_ids_0 + [self.eos_token_id]
235
+ if token_ids_1 is None:
236
+ return output
237
+
238
+ return output + [self.eos_token_id] + token_ids_1 + [self.eos_token_id]
239
+
240
+ def create_token_type_ids_from_sequences(
241
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
242
+ ) -> List[int]:
243
+ """
244
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. RoBERTa does not
245
+ make use of token type ids, therefore a list of zeros is returned.
246
+
247
+ Args:
248
+ token_ids_0 (`List[int]`):
249
+ List of IDs.
250
+ token_ids_1 (`List[int]`, *optional*):
251
+ Optional second list of IDs for sequence pairs.
252
+
253
+ Returns:
254
+ `List[int]`: List of zeros.
255
+ """
256
+ sep = [self.sep_token_id]
257
+ cls = [self.cls_token_id]
258
+
259
+ if token_ids_1 is None:
260
+ return len(cls + token_ids_0 + sep) * [0]
261
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
262
+
263
+
264
+ __all__ = ["RobertaTokenizerFast"]
docs/transformers/build/lib/transformers/models/roberta_prelayernorm/configuration_roberta_prelayernorm.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """RoBERTa-PreLayerNorm configuration"""
17
+
18
+ from collections import OrderedDict
19
+ from typing import Mapping
20
+
21
+ from ...configuration_utils import PretrainedConfig
22
+ from ...onnx import OnnxConfig
23
+ from ...utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+
29
+ # Copied from transformers.models.roberta.configuration_roberta.RobertaConfig with FacebookAI/roberta-base->andreasmadsen/efficient_mlm_m0.40,RoBERTa->RoBERTa-PreLayerNorm,Roberta->RobertaPreLayerNorm,roberta->roberta-prelayernorm
30
+ class RobertaPreLayerNormConfig(PretrainedConfig):
31
+ r"""
32
+ This is the configuration class to store the configuration of a [`RobertaPreLayerNormModel`] or a [`TFRobertaPreLayerNormModel`]. It is
33
+ used to instantiate a RoBERTa-PreLayerNorm model according to the specified arguments, defining the model architecture.
34
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the RoBERTa-PreLayerNorm
35
+ [andreasmadsen/efficient_mlm_m0.40](https://huggingface.co/andreasmadsen/efficient_mlm_m0.40) architecture.
36
+
37
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
38
+ documentation from [`PretrainedConfig`] for more information.
39
+
40
+
41
+ Args:
42
+ vocab_size (`int`, *optional*, defaults to 50265):
43
+ Vocabulary size of the RoBERTa-PreLayerNorm model. Defines the number of different tokens that can be represented by the
44
+ `inputs_ids` passed when calling [`RobertaPreLayerNormModel`] or [`TFRobertaPreLayerNormModel`].
45
+ hidden_size (`int`, *optional*, defaults to 768):
46
+ Dimensionality of the encoder layers and the pooler layer.
47
+ num_hidden_layers (`int`, *optional*, defaults to 12):
48
+ Number of hidden layers in the Transformer encoder.
49
+ num_attention_heads (`int`, *optional*, defaults to 12):
50
+ Number of attention heads for each attention layer in the Transformer encoder.
51
+ intermediate_size (`int`, *optional*, defaults to 3072):
52
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
53
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
54
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
55
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
56
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
57
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
58
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
59
+ The dropout ratio for the attention probabilities.
60
+ max_position_embeddings (`int`, *optional*, defaults to 512):
61
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
62
+ just in case (e.g., 512 or 1024 or 2048).
63
+ type_vocab_size (`int`, *optional*, defaults to 2):
64
+ The vocabulary size of the `token_type_ids` passed when calling [`RobertaPreLayerNormModel`] or [`TFRobertaPreLayerNormModel`].
65
+ initializer_range (`float`, *optional*, defaults to 0.02):
66
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
67
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
68
+ The epsilon used by the layer normalization layers.
69
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
70
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
71
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
72
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
73
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
74
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
75
+ is_decoder (`bool`, *optional*, defaults to `False`):
76
+ Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
77
+ use_cache (`bool`, *optional*, defaults to `True`):
78
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
79
+ relevant if `config.is_decoder=True`.
80
+ classifier_dropout (`float`, *optional*):
81
+ The dropout ratio for the classification head.
82
+
83
+ Examples:
84
+
85
+ ```python
86
+ >>> from transformers import RobertaPreLayerNormConfig, RobertaPreLayerNormModel
87
+
88
+ >>> # Initializing a RoBERTa-PreLayerNorm configuration
89
+ >>> configuration = RobertaPreLayerNormConfig()
90
+
91
+ >>> # Initializing a model (with random weights) from the configuration
92
+ >>> model = RobertaPreLayerNormModel(configuration)
93
+
94
+ >>> # Accessing the model configuration
95
+ >>> configuration = model.config
96
+ ```"""
97
+
98
+ model_type = "roberta-prelayernorm"
99
+
100
+ def __init__(
101
+ self,
102
+ vocab_size=50265,
103
+ hidden_size=768,
104
+ num_hidden_layers=12,
105
+ num_attention_heads=12,
106
+ intermediate_size=3072,
107
+ hidden_act="gelu",
108
+ hidden_dropout_prob=0.1,
109
+ attention_probs_dropout_prob=0.1,
110
+ max_position_embeddings=512,
111
+ type_vocab_size=2,
112
+ initializer_range=0.02,
113
+ layer_norm_eps=1e-12,
114
+ pad_token_id=1,
115
+ bos_token_id=0,
116
+ eos_token_id=2,
117
+ position_embedding_type="absolute",
118
+ use_cache=True,
119
+ classifier_dropout=None,
120
+ **kwargs,
121
+ ):
122
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
123
+
124
+ self.vocab_size = vocab_size
125
+ self.hidden_size = hidden_size
126
+ self.num_hidden_layers = num_hidden_layers
127
+ self.num_attention_heads = num_attention_heads
128
+ self.hidden_act = hidden_act
129
+ self.intermediate_size = intermediate_size
130
+ self.hidden_dropout_prob = hidden_dropout_prob
131
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
132
+ self.max_position_embeddings = max_position_embeddings
133
+ self.type_vocab_size = type_vocab_size
134
+ self.initializer_range = initializer_range
135
+ self.layer_norm_eps = layer_norm_eps
136
+ self.position_embedding_type = position_embedding_type
137
+ self.use_cache = use_cache
138
+ self.classifier_dropout = classifier_dropout
139
+
140
+
141
+ # Copied from transformers.models.roberta.configuration_roberta.RobertaOnnxConfig with Roberta->RobertaPreLayerNorm
142
+ class RobertaPreLayerNormOnnxConfig(OnnxConfig):
143
+ @property
144
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
145
+ if self.task == "multiple-choice":
146
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
147
+ else:
148
+ dynamic_axis = {0: "batch", 1: "sequence"}
149
+ return OrderedDict(
150
+ [
151
+ ("input_ids", dynamic_axis),
152
+ ("attention_mask", dynamic_axis),
153
+ ]
154
+ )
155
+
156
+
157
+ __all__ = ["RobertaPreLayerNormConfig", "RobertaPreLayerNormOnnxConfig"]
docs/transformers/build/lib/transformers/models/roberta_prelayernorm/modeling_tf_roberta_prelayernorm.py ADDED
@@ -0,0 +1,1808 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """TF 2.0 RoBERTa-PreLayerNorm model."""
17
+
18
+ from __future__ import annotations
19
+
20
+ import math
21
+ import warnings
22
+ from typing import Optional, Tuple, Union
23
+
24
+ import numpy as np
25
+ import tensorflow as tf
26
+
27
+ from ...activations_tf import get_tf_activation
28
+ from ...modeling_tf_outputs import (
29
+ TFBaseModelOutputWithPastAndCrossAttentions,
30
+ TFBaseModelOutputWithPoolingAndCrossAttentions,
31
+ TFCausalLMOutputWithCrossAttentions,
32
+ TFMaskedLMOutput,
33
+ TFMultipleChoiceModelOutput,
34
+ TFQuestionAnsweringModelOutput,
35
+ TFSequenceClassifierOutput,
36
+ TFTokenClassifierOutput,
37
+ )
38
+ from ...modeling_tf_utils import (
39
+ TFCausalLanguageModelingLoss,
40
+ TFMaskedLanguageModelingLoss,
41
+ TFModelInputType,
42
+ TFMultipleChoiceLoss,
43
+ TFPreTrainedModel,
44
+ TFQuestionAnsweringLoss,
45
+ TFSequenceClassificationLoss,
46
+ TFTokenClassificationLoss,
47
+ get_initializer,
48
+ keras,
49
+ keras_serializable,
50
+ unpack_inputs,
51
+ )
52
+ from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
53
+ from ...utils import (
54
+ add_code_sample_docstrings,
55
+ add_start_docstrings,
56
+ add_start_docstrings_to_model_forward,
57
+ logging,
58
+ )
59
+ from .configuration_roberta_prelayernorm import RobertaPreLayerNormConfig
60
+
61
+
62
+ logger = logging.get_logger(__name__)
63
+
64
+ _CHECKPOINT_FOR_DOC = "andreasmadsen/efficient_mlm_m0.40"
65
+ _CONFIG_FOR_DOC = "RobertaPreLayerNormConfig"
66
+
67
+
68
+ # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaEmbeddings with Roberta->RobertaPreLayerNorm
69
+ class TFRobertaPreLayerNormEmbeddings(keras.layers.Layer):
70
+ """
71
+ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
72
+ """
73
+
74
+ def __init__(self, config, **kwargs):
75
+ super().__init__(**kwargs)
76
+
77
+ self.padding_idx = 1
78
+ self.config = config
79
+ self.hidden_size = config.hidden_size
80
+ self.max_position_embeddings = config.max_position_embeddings
81
+ self.initializer_range = config.initializer_range
82
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
83
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
84
+
85
+ def build(self, input_shape=None):
86
+ with tf.name_scope("word_embeddings"):
87
+ self.weight = self.add_weight(
88
+ name="weight",
89
+ shape=[self.config.vocab_size, self.hidden_size],
90
+ initializer=get_initializer(self.initializer_range),
91
+ )
92
+
93
+ with tf.name_scope("token_type_embeddings"):
94
+ self.token_type_embeddings = self.add_weight(
95
+ name="embeddings",
96
+ shape=[self.config.type_vocab_size, self.hidden_size],
97
+ initializer=get_initializer(self.initializer_range),
98
+ )
99
+
100
+ with tf.name_scope("position_embeddings"):
101
+ self.position_embeddings = self.add_weight(
102
+ name="embeddings",
103
+ shape=[self.max_position_embeddings, self.hidden_size],
104
+ initializer=get_initializer(self.initializer_range),
105
+ )
106
+
107
+ if self.built:
108
+ return
109
+ self.built = True
110
+ if getattr(self, "LayerNorm", None) is not None:
111
+ with tf.name_scope(self.LayerNorm.name):
112
+ self.LayerNorm.build([None, None, self.config.hidden_size])
113
+
114
+ def create_position_ids_from_input_ids(self, input_ids, past_key_values_length=0):
115
+ """
116
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding
117
+ symbols are ignored. This is modified from fairseq's `utils.make_positions`.
118
+
119
+ Args:
120
+ input_ids: tf.Tensor
121
+ Returns: tf.Tensor
122
+ """
123
+ mask = tf.cast(tf.math.not_equal(input_ids, self.padding_idx), dtype=input_ids.dtype)
124
+ incremental_indices = (tf.math.cumsum(mask, axis=1) + past_key_values_length) * mask
125
+
126
+ return incremental_indices + self.padding_idx
127
+
128
+ def call(
129
+ self,
130
+ input_ids=None,
131
+ position_ids=None,
132
+ token_type_ids=None,
133
+ inputs_embeds=None,
134
+ past_key_values_length=0,
135
+ training=False,
136
+ ):
137
+ """
138
+ Applies embedding based on inputs tensor.
139
+
140
+ Returns:
141
+ final_embeddings (`tf.Tensor`): output embedding tensor.
142
+ """
143
+ assert not (input_ids is None and inputs_embeds is None)
144
+
145
+ if input_ids is not None:
146
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
147
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
148
+
149
+ input_shape = shape_list(inputs_embeds)[:-1]
150
+
151
+ if token_type_ids is None:
152
+ token_type_ids = tf.fill(dims=input_shape, value=0)
153
+
154
+ if position_ids is None:
155
+ if input_ids is not None:
156
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
157
+ position_ids = self.create_position_ids_from_input_ids(
158
+ input_ids=input_ids, past_key_values_length=past_key_values_length
159
+ )
160
+ else:
161
+ position_ids = tf.expand_dims(
162
+ tf.range(start=self.padding_idx + 1, limit=input_shape[-1] + self.padding_idx + 1), axis=0
163
+ )
164
+
165
+ position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
166
+ token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
167
+ final_embeddings = inputs_embeds + position_embeds + token_type_embeds
168
+ final_embeddings = self.LayerNorm(inputs=final_embeddings)
169
+ final_embeddings = self.dropout(inputs=final_embeddings, training=training)
170
+
171
+ return final_embeddings
172
+
173
+
174
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertPooler with Bert->RobertaPreLayerNorm
175
+ class TFRobertaPreLayerNormPooler(keras.layers.Layer):
176
+ def __init__(self, config: RobertaPreLayerNormConfig, **kwargs):
177
+ super().__init__(**kwargs)
178
+
179
+ self.dense = keras.layers.Dense(
180
+ units=config.hidden_size,
181
+ kernel_initializer=get_initializer(config.initializer_range),
182
+ activation="tanh",
183
+ name="dense",
184
+ )
185
+ self.config = config
186
+
187
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
188
+ # We "pool" the model by simply taking the hidden state corresponding
189
+ # to the first token.
190
+ first_token_tensor = hidden_states[:, 0]
191
+ pooled_output = self.dense(inputs=first_token_tensor)
192
+
193
+ return pooled_output
194
+
195
+ def build(self, input_shape=None):
196
+ if self.built:
197
+ return
198
+ self.built = True
199
+ if getattr(self, "dense", None) is not None:
200
+ with tf.name_scope(self.dense.name):
201
+ self.dense.build([None, None, self.config.hidden_size])
202
+
203
+
204
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfAttention with Bert->RobertaPreLayerNorm
205
+ class TFRobertaPreLayerNormSelfAttention(keras.layers.Layer):
206
+ def __init__(self, config: RobertaPreLayerNormConfig, **kwargs):
207
+ super().__init__(**kwargs)
208
+
209
+ if config.hidden_size % config.num_attention_heads != 0:
210
+ raise ValueError(
211
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number "
212
+ f"of attention heads ({config.num_attention_heads})"
213
+ )
214
+
215
+ self.num_attention_heads = config.num_attention_heads
216
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
217
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
218
+ self.sqrt_att_head_size = math.sqrt(self.attention_head_size)
219
+
220
+ self.query = keras.layers.Dense(
221
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
222
+ )
223
+ self.key = keras.layers.Dense(
224
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key"
225
+ )
226
+ self.value = keras.layers.Dense(
227
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
228
+ )
229
+ self.dropout = keras.layers.Dropout(rate=config.attention_probs_dropout_prob)
230
+
231
+ self.is_decoder = config.is_decoder
232
+ self.config = config
233
+
234
+ def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor:
235
+ # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
236
+ tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size))
237
+
238
+ # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size]
239
+ return tf.transpose(tensor, perm=[0, 2, 1, 3])
240
+
241
+ def call(
242
+ self,
243
+ hidden_states: tf.Tensor,
244
+ attention_mask: tf.Tensor,
245
+ head_mask: tf.Tensor,
246
+ encoder_hidden_states: tf.Tensor,
247
+ encoder_attention_mask: tf.Tensor,
248
+ past_key_value: Tuple[tf.Tensor],
249
+ output_attentions: bool,
250
+ training: bool = False,
251
+ ) -> Tuple[tf.Tensor]:
252
+ batch_size = shape_list(hidden_states)[0]
253
+ mixed_query_layer = self.query(inputs=hidden_states)
254
+
255
+ # If this is instantiated as a cross-attention module, the keys
256
+ # and values come from an encoder; the attention mask needs to be
257
+ # such that the encoder's padding tokens are not attended to.
258
+ is_cross_attention = encoder_hidden_states is not None
259
+
260
+ if is_cross_attention and past_key_value is not None:
261
+ # reuse k,v, cross_attentions
262
+ key_layer = past_key_value[0]
263
+ value_layer = past_key_value[1]
264
+ attention_mask = encoder_attention_mask
265
+ elif is_cross_attention:
266
+ key_layer = self.transpose_for_scores(self.key(inputs=encoder_hidden_states), batch_size)
267
+ value_layer = self.transpose_for_scores(self.value(inputs=encoder_hidden_states), batch_size)
268
+ attention_mask = encoder_attention_mask
269
+ elif past_key_value is not None:
270
+ key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size)
271
+ value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size)
272
+ key_layer = tf.concat([past_key_value[0], key_layer], axis=2)
273
+ value_layer = tf.concat([past_key_value[1], value_layer], axis=2)
274
+ else:
275
+ key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size)
276
+ value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size)
277
+
278
+ query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
279
+
280
+ if self.is_decoder:
281
+ # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
282
+ # Further calls to cross_attention layer can then reuse all cross-attention
283
+ # key/value_states (first "if" case)
284
+ # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
285
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
286
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
287
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
288
+ past_key_value = (key_layer, value_layer)
289
+
290
+ # Take the dot product between "query" and "key" to get the raw attention scores.
291
+ # (batch size, num_heads, seq_len_q, seq_len_k)
292
+ attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
293
+ dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype)
294
+ attention_scores = tf.divide(attention_scores, dk)
295
+
296
+ if attention_mask is not None:
297
+ # Apply the attention mask is (precomputed for all layers in TFRobertaPreLayerNormModel call() function)
298
+ attention_scores = tf.add(attention_scores, attention_mask)
299
+
300
+ # Normalize the attention scores to probabilities.
301
+ attention_probs = stable_softmax(logits=attention_scores, axis=-1)
302
+
303
+ # This is actually dropping out entire tokens to attend to, which might
304
+ # seem a bit unusual, but is taken from the original Transformer paper.
305
+ attention_probs = self.dropout(inputs=attention_probs, training=training)
306
+
307
+ # Mask heads if we want to
308
+ if head_mask is not None:
309
+ attention_probs = tf.multiply(attention_probs, head_mask)
310
+
311
+ attention_output = tf.matmul(attention_probs, value_layer)
312
+ attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3])
313
+
314
+ # (batch_size, seq_len_q, all_head_size)
315
+ attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.all_head_size))
316
+ outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
317
+
318
+ if self.is_decoder:
319
+ outputs = outputs + (past_key_value,)
320
+ return outputs
321
+
322
+ def build(self, input_shape=None):
323
+ if self.built:
324
+ return
325
+ self.built = True
326
+ if getattr(self, "query", None) is not None:
327
+ with tf.name_scope(self.query.name):
328
+ self.query.build([None, None, self.config.hidden_size])
329
+ if getattr(self, "key", None) is not None:
330
+ with tf.name_scope(self.key.name):
331
+ self.key.build([None, None, self.config.hidden_size])
332
+ if getattr(self, "value", None) is not None:
333
+ with tf.name_scope(self.value.name):
334
+ self.value.build([None, None, self.config.hidden_size])
335
+
336
+
337
+ class TFRobertaPreLayerNormSelfOutput(keras.layers.Layer):
338
+ def __init__(self, config: RobertaPreLayerNormConfig, **kwargs):
339
+ super().__init__(**kwargs)
340
+
341
+ self.dense = keras.layers.Dense(
342
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
343
+ )
344
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
345
+ self.config = config
346
+
347
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
348
+ hidden_states = self.dense(inputs=hidden_states)
349
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
350
+ hidden_states = hidden_states + input_tensor
351
+
352
+ return hidden_states
353
+
354
+ def build(self, input_shape=None):
355
+ if self.built:
356
+ return
357
+ self.built = True
358
+ if getattr(self, "dense", None) is not None:
359
+ with tf.name_scope(self.dense.name):
360
+ self.dense.build([None, None, self.config.hidden_size])
361
+
362
+
363
+ class TFRobertaPreLayerNormAttention(keras.layers.Layer):
364
+ def __init__(self, config: RobertaPreLayerNormConfig, **kwargs):
365
+ super().__init__(**kwargs)
366
+
367
+ self.self_attention = TFRobertaPreLayerNormSelfAttention(config, name="self")
368
+ self.dense_output = TFRobertaPreLayerNormSelfOutput(config, name="output")
369
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
370
+ self.config = config
371
+
372
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertAttention.prune_heads
373
+ def prune_heads(self, heads):
374
+ raise NotImplementedError
375
+
376
+ def call(
377
+ self,
378
+ input_tensor: tf.Tensor,
379
+ attention_mask: tf.Tensor,
380
+ head_mask: tf.Tensor,
381
+ encoder_hidden_states: tf.Tensor,
382
+ encoder_attention_mask: tf.Tensor,
383
+ past_key_value: Tuple[tf.Tensor],
384
+ output_attentions: bool,
385
+ training: bool = False,
386
+ ) -> Tuple[tf.Tensor]:
387
+ hidden_states_pre_layer_norm = self.LayerNorm(inputs=input_tensor)
388
+ self_outputs = self.self_attention(
389
+ hidden_states=hidden_states_pre_layer_norm,
390
+ attention_mask=attention_mask,
391
+ head_mask=head_mask,
392
+ encoder_hidden_states=encoder_hidden_states,
393
+ encoder_attention_mask=encoder_attention_mask,
394
+ past_key_value=past_key_value,
395
+ output_attentions=output_attentions,
396
+ training=training,
397
+ )
398
+ attention_output = self.dense_output(
399
+ hidden_states=self_outputs[0], input_tensor=input_tensor, training=training
400
+ )
401
+ # add attentions (possibly with past_key_value) if we output them
402
+ outputs = (attention_output,) + self_outputs[1:]
403
+
404
+ return outputs
405
+
406
+ def build(self, input_shape=None):
407
+ if self.built:
408
+ return
409
+ self.built = True
410
+ if getattr(self, "self_attention", None) is not None:
411
+ with tf.name_scope(self.self_attention.name):
412
+ self.self_attention.build(None)
413
+ if getattr(self, "dense_output", None) is not None:
414
+ with tf.name_scope(self.dense_output.name):
415
+ self.dense_output.build(None)
416
+ if getattr(self, "LayerNorm", None) is not None:
417
+ with tf.name_scope(self.LayerNorm.name):
418
+ self.LayerNorm.build([None, None, self.config.hidden_size])
419
+
420
+
421
+ class TFRobertaPreLayerNormIntermediate(keras.layers.Layer):
422
+ def __init__(self, config: RobertaPreLayerNormConfig, **kwargs):
423
+ super().__init__(**kwargs)
424
+
425
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
426
+ self.dense = keras.layers.Dense(
427
+ units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
428
+ )
429
+
430
+ if isinstance(config.hidden_act, str):
431
+ self.intermediate_act_fn = get_tf_activation(config.hidden_act)
432
+ else:
433
+ self.intermediate_act_fn = config.hidden_act
434
+ self.config = config
435
+
436
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
437
+ hidden_states = self.LayerNorm(inputs=hidden_states)
438
+ hidden_states = self.dense(inputs=hidden_states)
439
+ hidden_states = self.intermediate_act_fn(hidden_states)
440
+
441
+ return hidden_states
442
+
443
+ def build(self, input_shape=None):
444
+ if self.built:
445
+ return
446
+ self.built = True
447
+ if getattr(self, "LayerNorm", None) is not None:
448
+ with tf.name_scope(self.LayerNorm.name):
449
+ self.LayerNorm.build([None, None, self.config.hidden_size])
450
+ if getattr(self, "dense", None) is not None:
451
+ with tf.name_scope(self.dense.name):
452
+ self.dense.build([None, None, self.config.hidden_size])
453
+
454
+
455
+ class TFRobertaPreLayerNormOutput(keras.layers.Layer):
456
+ def __init__(self, config: RobertaPreLayerNormConfig, **kwargs):
457
+ super().__init__(**kwargs)
458
+
459
+ self.dense = keras.layers.Dense(
460
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
461
+ )
462
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
463
+ self.config = config
464
+
465
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
466
+ hidden_states = self.dense(inputs=hidden_states)
467
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
468
+ hidden_states = hidden_states + input_tensor
469
+
470
+ return hidden_states
471
+
472
+ def build(self, input_shape=None):
473
+ if self.built:
474
+ return
475
+ self.built = True
476
+ if getattr(self, "dense", None) is not None:
477
+ with tf.name_scope(self.dense.name):
478
+ self.dense.build([None, None, self.config.intermediate_size])
479
+
480
+
481
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertLayer with Bert->RobertaPreLayerNorm
482
+ class TFRobertaPreLayerNormLayer(keras.layers.Layer):
483
+ def __init__(self, config: RobertaPreLayerNormConfig, **kwargs):
484
+ super().__init__(**kwargs)
485
+
486
+ self.attention = TFRobertaPreLayerNormAttention(config, name="attention")
487
+ self.is_decoder = config.is_decoder
488
+ self.add_cross_attention = config.add_cross_attention
489
+ if self.add_cross_attention:
490
+ if not self.is_decoder:
491
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
492
+ self.crossattention = TFRobertaPreLayerNormAttention(config, name="crossattention")
493
+ self.intermediate = TFRobertaPreLayerNormIntermediate(config, name="intermediate")
494
+ self.bert_output = TFRobertaPreLayerNormOutput(config, name="output")
495
+
496
+ def call(
497
+ self,
498
+ hidden_states: tf.Tensor,
499
+ attention_mask: tf.Tensor,
500
+ head_mask: tf.Tensor,
501
+ encoder_hidden_states: tf.Tensor | None,
502
+ encoder_attention_mask: tf.Tensor | None,
503
+ past_key_value: Tuple[tf.Tensor] | None,
504
+ output_attentions: bool,
505
+ training: bool = False,
506
+ ) -> Tuple[tf.Tensor]:
507
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
508
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
509
+ self_attention_outputs = self.attention(
510
+ input_tensor=hidden_states,
511
+ attention_mask=attention_mask,
512
+ head_mask=head_mask,
513
+ encoder_hidden_states=None,
514
+ encoder_attention_mask=None,
515
+ past_key_value=self_attn_past_key_value,
516
+ output_attentions=output_attentions,
517
+ training=training,
518
+ )
519
+ attention_output = self_attention_outputs[0]
520
+
521
+ # if decoder, the last output is tuple of self-attn cache
522
+ if self.is_decoder:
523
+ outputs = self_attention_outputs[1:-1]
524
+ present_key_value = self_attention_outputs[-1]
525
+ else:
526
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
527
+
528
+ cross_attn_present_key_value = None
529
+ if self.is_decoder and encoder_hidden_states is not None:
530
+ if not hasattr(self, "crossattention"):
531
+ raise ValueError(
532
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
533
+ " by setting `config.add_cross_attention=True`"
534
+ )
535
+
536
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
537
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
538
+ cross_attention_outputs = self.crossattention(
539
+ input_tensor=attention_output,
540
+ attention_mask=attention_mask,
541
+ head_mask=head_mask,
542
+ encoder_hidden_states=encoder_hidden_states,
543
+ encoder_attention_mask=encoder_attention_mask,
544
+ past_key_value=cross_attn_past_key_value,
545
+ output_attentions=output_attentions,
546
+ training=training,
547
+ )
548
+ attention_output = cross_attention_outputs[0]
549
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
550
+
551
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
552
+ cross_attn_present_key_value = cross_attention_outputs[-1]
553
+ present_key_value = present_key_value + cross_attn_present_key_value
554
+
555
+ intermediate_output = self.intermediate(hidden_states=attention_output)
556
+ layer_output = self.bert_output(
557
+ hidden_states=intermediate_output, input_tensor=attention_output, training=training
558
+ )
559
+ outputs = (layer_output,) + outputs # add attentions if we output them
560
+
561
+ # if decoder, return the attn key/values as the last output
562
+ if self.is_decoder:
563
+ outputs = outputs + (present_key_value,)
564
+
565
+ return outputs
566
+
567
+ def build(self, input_shape=None):
568
+ if self.built:
569
+ return
570
+ self.built = True
571
+ if getattr(self, "attention", None) is not None:
572
+ with tf.name_scope(self.attention.name):
573
+ self.attention.build(None)
574
+ if getattr(self, "intermediate", None) is not None:
575
+ with tf.name_scope(self.intermediate.name):
576
+ self.intermediate.build(None)
577
+ if getattr(self, "bert_output", None) is not None:
578
+ with tf.name_scope(self.bert_output.name):
579
+ self.bert_output.build(None)
580
+ if getattr(self, "crossattention", None) is not None:
581
+ with tf.name_scope(self.crossattention.name):
582
+ self.crossattention.build(None)
583
+
584
+
585
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertEncoder with Bert->RobertaPreLayerNorm
586
+ class TFRobertaPreLayerNormEncoder(keras.layers.Layer):
587
+ def __init__(self, config: RobertaPreLayerNormConfig, **kwargs):
588
+ super().__init__(**kwargs)
589
+ self.config = config
590
+ self.layer = [TFRobertaPreLayerNormLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
591
+
592
+ def call(
593
+ self,
594
+ hidden_states: tf.Tensor,
595
+ attention_mask: tf.Tensor,
596
+ head_mask: tf.Tensor,
597
+ encoder_hidden_states: tf.Tensor | None,
598
+ encoder_attention_mask: tf.Tensor | None,
599
+ past_key_values: Tuple[Tuple[tf.Tensor]] | None,
600
+ use_cache: Optional[bool],
601
+ output_attentions: bool,
602
+ output_hidden_states: bool,
603
+ return_dict: bool,
604
+ training: bool = False,
605
+ ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]:
606
+ all_hidden_states = () if output_hidden_states else None
607
+ all_attentions = () if output_attentions else None
608
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
609
+
610
+ next_decoder_cache = () if use_cache else None
611
+ for i, layer_module in enumerate(self.layer):
612
+ if output_hidden_states:
613
+ all_hidden_states = all_hidden_states + (hidden_states,)
614
+
615
+ past_key_value = past_key_values[i] if past_key_values is not None else None
616
+
617
+ layer_outputs = layer_module(
618
+ hidden_states=hidden_states,
619
+ attention_mask=attention_mask,
620
+ head_mask=head_mask[i],
621
+ encoder_hidden_states=encoder_hidden_states,
622
+ encoder_attention_mask=encoder_attention_mask,
623
+ past_key_value=past_key_value,
624
+ output_attentions=output_attentions,
625
+ training=training,
626
+ )
627
+ hidden_states = layer_outputs[0]
628
+
629
+ if use_cache:
630
+ next_decoder_cache += (layer_outputs[-1],)
631
+
632
+ if output_attentions:
633
+ all_attentions = all_attentions + (layer_outputs[1],)
634
+ if self.config.add_cross_attention and encoder_hidden_states is not None:
635
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
636
+
637
+ # Add last layer
638
+ if output_hidden_states:
639
+ all_hidden_states = all_hidden_states + (hidden_states,)
640
+
641
+ if not return_dict:
642
+ return tuple(
643
+ v for v in [hidden_states, all_hidden_states, all_attentions, all_cross_attentions] if v is not None
644
+ )
645
+
646
+ return TFBaseModelOutputWithPastAndCrossAttentions(
647
+ last_hidden_state=hidden_states,
648
+ past_key_values=next_decoder_cache,
649
+ hidden_states=all_hidden_states,
650
+ attentions=all_attentions,
651
+ cross_attentions=all_cross_attentions,
652
+ )
653
+
654
+ def build(self, input_shape=None):
655
+ if self.built:
656
+ return
657
+ self.built = True
658
+ if getattr(self, "layer", None) is not None:
659
+ for layer in self.layer:
660
+ with tf.name_scope(layer.name):
661
+ layer.build(None)
662
+
663
+
664
+ @keras_serializable
665
+ class TFRobertaPreLayerNormMainLayer(keras.layers.Layer):
666
+ config_class = RobertaPreLayerNormConfig
667
+
668
+ def __init__(self, config, add_pooling_layer=True, **kwargs):
669
+ super().__init__(**kwargs)
670
+
671
+ self.config = config
672
+ self.is_decoder = config.is_decoder
673
+
674
+ self.num_hidden_layers = config.num_hidden_layers
675
+ self.initializer_range = config.initializer_range
676
+ self.output_attentions = config.output_attentions
677
+ self.output_hidden_states = config.output_hidden_states
678
+ self.return_dict = config.use_return_dict
679
+ self.encoder = TFRobertaPreLayerNormEncoder(config, name="encoder")
680
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
681
+ self.pooler = TFRobertaPreLayerNormPooler(config, name="pooler") if add_pooling_layer else None
682
+ # The embeddings must be the last declaration in order to follow the weights order
683
+ self.embeddings = TFRobertaPreLayerNormEmbeddings(config, name="embeddings")
684
+
685
+ def get_input_embeddings(self) -> keras.layers.Layer:
686
+ return self.embeddings
687
+
688
+ def set_input_embeddings(self, value: tf.Variable):
689
+ self.embeddings.weight = value
690
+ self.embeddings.vocab_size = shape_list(value)[0]
691
+
692
+ def _prune_heads(self, heads_to_prune):
693
+ """
694
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
695
+ class PreTrainedModel
696
+ """
697
+ raise NotImplementedError
698
+
699
+ @unpack_inputs
700
+ def call(
701
+ self,
702
+ input_ids: TFModelInputType | None = None,
703
+ attention_mask: np.ndarray | tf.Tensor | None = None,
704
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
705
+ position_ids: np.ndarray | tf.Tensor | None = None,
706
+ head_mask: np.ndarray | tf.Tensor | None = None,
707
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
708
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
709
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
710
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
711
+ use_cache: Optional[bool] = None,
712
+ output_attentions: Optional[bool] = None,
713
+ output_hidden_states: Optional[bool] = None,
714
+ return_dict: Optional[bool] = None,
715
+ training: bool = False,
716
+ ) -> Union[TFBaseModelOutputWithPoolingAndCrossAttentions, Tuple[tf.Tensor]]:
717
+ if not self.config.is_decoder:
718
+ use_cache = False
719
+
720
+ if input_ids is not None and inputs_embeds is not None:
721
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
722
+ elif input_ids is not None:
723
+ input_shape = shape_list(input_ids)
724
+ elif inputs_embeds is not None:
725
+ input_shape = shape_list(inputs_embeds)[:-1]
726
+ else:
727
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
728
+
729
+ batch_size, seq_length = input_shape
730
+
731
+ if past_key_values is None:
732
+ past_key_values_length = 0
733
+ past_key_values = [None] * len(self.encoder.layer)
734
+ else:
735
+ past_key_values_length = shape_list(past_key_values[0][0])[-2]
736
+
737
+ if attention_mask is None:
738
+ attention_mask = tf.fill(dims=(batch_size, seq_length + past_key_values_length), value=1)
739
+
740
+ if token_type_ids is None:
741
+ token_type_ids = tf.fill(dims=input_shape, value=0)
742
+
743
+ embedding_output = self.embeddings(
744
+ input_ids=input_ids,
745
+ position_ids=position_ids,
746
+ token_type_ids=token_type_ids,
747
+ inputs_embeds=inputs_embeds,
748
+ past_key_values_length=past_key_values_length,
749
+ training=training,
750
+ )
751
+
752
+ # We create a 3D attention mask from a 2D tensor mask.
753
+ # Sizes are [batch_size, 1, 1, to_seq_length]
754
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
755
+ # this attention mask is more simple than the triangular masking of causal attention
756
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
757
+ attention_mask_shape = shape_list(attention_mask)
758
+
759
+ mask_seq_length = seq_length + past_key_values_length
760
+ # Provided a padding mask of dimensions [batch_size, mask_seq_length]
761
+ # - if the model is a decoder, apply a causal mask in addition to the padding mask
762
+ # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length]
763
+ if self.is_decoder:
764
+ seq_ids = tf.range(mask_seq_length)
765
+ causal_mask = tf.less_equal(
766
+ tf.tile(seq_ids[None, None, :], (batch_size, mask_seq_length, 1)),
767
+ seq_ids[None, :, None],
768
+ )
769
+ causal_mask = tf.cast(causal_mask, dtype=attention_mask.dtype)
770
+ extended_attention_mask = causal_mask * attention_mask[:, None, :]
771
+ attention_mask_shape = shape_list(extended_attention_mask)
772
+ extended_attention_mask = tf.reshape(
773
+ extended_attention_mask, (attention_mask_shape[0], 1, attention_mask_shape[1], attention_mask_shape[2])
774
+ )
775
+ if past_key_values[0] is not None:
776
+ # attention_mask needs to be sliced to the shape `[batch_size, 1, from_seq_length - cached_seq_length, to_seq_length]
777
+ extended_attention_mask = extended_attention_mask[:, :, -seq_length:, :]
778
+ else:
779
+ extended_attention_mask = tf.reshape(
780
+ attention_mask, (attention_mask_shape[0], 1, 1, attention_mask_shape[1])
781
+ )
782
+
783
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
784
+ # masked positions, this operation will create a tensor which is 0.0 for
785
+ # positions we want to attend and -10000.0 for masked positions.
786
+ # Since we are adding it to the raw scores before the softmax, this is
787
+ # effectively the same as removing these entirely.
788
+ extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype)
789
+ one_cst = tf.constant(1.0, dtype=embedding_output.dtype)
790
+ ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype)
791
+ extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst)
792
+
793
+ if self.is_decoder and encoder_attention_mask is not None:
794
+ # If a 2D ou 3D attention mask is provided for the cross-attention
795
+ # we need to make broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length]
796
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
797
+ encoder_attention_mask = tf.cast(encoder_attention_mask, dtype=extended_attention_mask.dtype)
798
+ num_dims_encoder_attention_mask = len(shape_list(encoder_attention_mask))
799
+ if num_dims_encoder_attention_mask == 3:
800
+ encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
801
+ if num_dims_encoder_attention_mask == 2:
802
+ encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
803
+
804
+ # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
805
+ # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow/transformer/transformer_layers.py#L270
806
+ # encoder_extended_attention_mask = tf.math.equal(encoder_extended_attention_mask,
807
+ # tf.transpose(encoder_extended_attention_mask, perm=(-1, -2)))
808
+
809
+ encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0
810
+ else:
811
+ encoder_extended_attention_mask = None
812
+
813
+ # Prepare head mask if needed
814
+ # 1.0 in head_mask indicate we keep the head
815
+ # attention_probs has shape bsz x n_heads x N x N
816
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
817
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
818
+ if head_mask is not None:
819
+ raise NotImplementedError
820
+ else:
821
+ head_mask = [None] * self.config.num_hidden_layers
822
+
823
+ encoder_outputs = self.encoder(
824
+ hidden_states=embedding_output,
825
+ attention_mask=extended_attention_mask,
826
+ head_mask=head_mask,
827
+ encoder_hidden_states=encoder_hidden_states,
828
+ encoder_attention_mask=encoder_extended_attention_mask,
829
+ past_key_values=past_key_values,
830
+ use_cache=use_cache,
831
+ output_attentions=output_attentions,
832
+ output_hidden_states=output_hidden_states,
833
+ return_dict=return_dict,
834
+ training=training,
835
+ )
836
+
837
+ sequence_output = encoder_outputs[0]
838
+ sequence_output = self.LayerNorm(inputs=sequence_output)
839
+ pooled_output = self.pooler(hidden_states=sequence_output) if self.pooler is not None else None
840
+
841
+ if not return_dict:
842
+ return (
843
+ sequence_output,
844
+ pooled_output,
845
+ ) + encoder_outputs[1:]
846
+
847
+ return TFBaseModelOutputWithPoolingAndCrossAttentions(
848
+ last_hidden_state=sequence_output,
849
+ pooler_output=pooled_output,
850
+ past_key_values=encoder_outputs.past_key_values,
851
+ hidden_states=encoder_outputs.hidden_states,
852
+ attentions=encoder_outputs.attentions,
853
+ cross_attentions=encoder_outputs.cross_attentions,
854
+ )
855
+
856
+ def build(self, input_shape=None):
857
+ if self.built:
858
+ return
859
+ self.built = True
860
+ if getattr(self, "encoder", None) is not None:
861
+ with tf.name_scope(self.encoder.name):
862
+ self.encoder.build(None)
863
+ if getattr(self, "LayerNorm", None) is not None:
864
+ with tf.name_scope(self.LayerNorm.name):
865
+ self.LayerNorm.build([None, None, self.config.hidden_size])
866
+ if getattr(self, "pooler", None) is not None:
867
+ with tf.name_scope(self.pooler.name):
868
+ self.pooler.build(None)
869
+ if getattr(self, "embeddings", None) is not None:
870
+ with tf.name_scope(self.embeddings.name):
871
+ self.embeddings.build(None)
872
+
873
+
874
+ # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaPreTrainedModel with Roberta->RobertaPreLayerNorm,roberta->roberta_prelayernorm
875
+ class TFRobertaPreLayerNormPreTrainedModel(TFPreTrainedModel):
876
+ """
877
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
878
+ models.
879
+ """
880
+
881
+ config_class = RobertaPreLayerNormConfig
882
+ base_model_prefix = "roberta_prelayernorm"
883
+
884
+
885
+ ROBERTA_PRELAYERNORM_START_DOCSTRING = r"""
886
+
887
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
888
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
889
+ etc.)
890
+
891
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
892
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
893
+ behavior.
894
+
895
+ <Tip>
896
+
897
+ TensorFlow models and layers in `transformers` accept two formats as input:
898
+
899
+ - having all inputs as keyword arguments (like PyTorch models), or
900
+ - having all inputs as a list, tuple or dict in the first positional argument.
901
+
902
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
903
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
904
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
905
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
906
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
907
+ positional argument:
908
+
909
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
910
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
911
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
912
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
913
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
914
+
915
+ Note that when creating models and layers with
916
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
917
+ about any of this, as you can just pass inputs like you would to any other Python function!
918
+
919
+ </Tip>
920
+
921
+ Parameters:
922
+ config ([`RobertaPreLayerNormConfig`]): Model configuration class with all the parameters of the
923
+ model. Initializing with a config file does not load the weights associated with the model, only the
924
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
925
+ """
926
+
927
+ ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING = r"""
928
+ Args:
929
+ input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`):
930
+ Indices of input sequence tokens in the vocabulary.
931
+
932
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
933
+ [`PreTrainedTokenizer.encode`] for details.
934
+
935
+ [What are input IDs?](../glossary#input-ids)
936
+ attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
937
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
938
+
939
+ - 1 for tokens that are **not masked**,
940
+ - 0 for tokens that are **masked**.
941
+
942
+ [What are attention masks?](../glossary#attention-mask)
943
+ token_type_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
944
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
945
+ 1]`:
946
+
947
+ - 0 corresponds to a *sentence A* token,
948
+ - 1 corresponds to a *sentence B* token.
949
+
950
+ [What are token type IDs?](../glossary#token-type-ids)
951
+ position_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
952
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
953
+ config.max_position_embeddings - 1]`.
954
+
955
+ [What are position IDs?](../glossary#position-ids)
956
+ head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
957
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
958
+
959
+ - 1 indicates the head is **not masked**,
960
+ - 0 indicates the head is **masked**.
961
+
962
+ inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
963
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
964
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
965
+ model's internal embedding lookup matrix.
966
+ output_attentions (`bool`, *optional*):
967
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
968
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
969
+ config will be used instead.
970
+ output_hidden_states (`bool`, *optional*):
971
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
972
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
973
+ used instead.
974
+ return_dict (`bool`, *optional*):
975
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
976
+ eager mode, in graph mode the value will always be set to True.
977
+ training (`bool`, *optional*, defaults to `False`):
978
+ Whether or not to use the model in training mode (some modules like dropout modules have different
979
+ behaviors between training and evaluation).
980
+ """
981
+
982
+
983
+ @add_start_docstrings(
984
+ "The bare RoBERTa-PreLayerNorm Model transformer outputting raw hidden-states without any specific head on top.",
985
+ ROBERTA_PRELAYERNORM_START_DOCSTRING,
986
+ )
987
+ # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaModel with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta->roberta_prelayernorm
988
+ class TFRobertaPreLayerNormModel(TFRobertaPreLayerNormPreTrainedModel):
989
+ def __init__(self, config, *inputs, **kwargs):
990
+ super().__init__(config, *inputs, **kwargs)
991
+ self.roberta_prelayernorm = TFRobertaPreLayerNormMainLayer(config, name="roberta_prelayernorm")
992
+
993
+ @unpack_inputs
994
+ @add_start_docstrings_to_model_forward(ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
995
+ @add_code_sample_docstrings(
996
+ checkpoint=_CHECKPOINT_FOR_DOC,
997
+ output_type=TFBaseModelOutputWithPoolingAndCrossAttentions,
998
+ config_class=_CONFIG_FOR_DOC,
999
+ )
1000
+ def call(
1001
+ self,
1002
+ input_ids: TFModelInputType | None = None,
1003
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1004
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1005
+ position_ids: np.ndarray | tf.Tensor | None = None,
1006
+ head_mask: np.ndarray | tf.Tensor | None = None,
1007
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1008
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
1009
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
1010
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
1011
+ use_cache: Optional[bool] = None,
1012
+ output_attentions: Optional[bool] = None,
1013
+ output_hidden_states: Optional[bool] = None,
1014
+ return_dict: Optional[bool] = None,
1015
+ training: Optional[bool] = False,
1016
+ ) -> Union[Tuple, TFBaseModelOutputWithPoolingAndCrossAttentions]:
1017
+ r"""
1018
+ encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1019
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
1020
+ the model is configured as a decoder.
1021
+ encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1022
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
1023
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1024
+
1025
+ - 1 for tokens that are **not masked**,
1026
+ - 0 for tokens that are **masked**.
1027
+
1028
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
1029
+ contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1030
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1031
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1032
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1033
+ use_cache (`bool`, *optional*, defaults to `True`):
1034
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1035
+ `past_key_values`). Set to `False` during training, `True` during generation
1036
+ """
1037
+ outputs = self.roberta_prelayernorm(
1038
+ input_ids=input_ids,
1039
+ attention_mask=attention_mask,
1040
+ token_type_ids=token_type_ids,
1041
+ position_ids=position_ids,
1042
+ head_mask=head_mask,
1043
+ inputs_embeds=inputs_embeds,
1044
+ encoder_hidden_states=encoder_hidden_states,
1045
+ encoder_attention_mask=encoder_attention_mask,
1046
+ past_key_values=past_key_values,
1047
+ use_cache=use_cache,
1048
+ output_attentions=output_attentions,
1049
+ output_hidden_states=output_hidden_states,
1050
+ return_dict=return_dict,
1051
+ training=training,
1052
+ )
1053
+
1054
+ return outputs
1055
+
1056
+ def build(self, input_shape=None):
1057
+ if self.built:
1058
+ return
1059
+ self.built = True
1060
+ if getattr(self, "roberta_prelayernorm", None) is not None:
1061
+ with tf.name_scope(self.roberta_prelayernorm.name):
1062
+ self.roberta_prelayernorm.build(None)
1063
+
1064
+
1065
+ # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaLMHead with Roberta->RobertaPreLayerNorm
1066
+ class TFRobertaPreLayerNormLMHead(keras.layers.Layer):
1067
+ """RobertaPreLayerNorm Head for masked language modeling."""
1068
+
1069
+ def __init__(self, config, input_embeddings, **kwargs):
1070
+ super().__init__(**kwargs)
1071
+
1072
+ self.config = config
1073
+ self.hidden_size = config.hidden_size
1074
+ self.dense = keras.layers.Dense(
1075
+ config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
1076
+ )
1077
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
1078
+ self.act = get_tf_activation("gelu")
1079
+
1080
+ # The output weights are the same as the input embeddings, but there is
1081
+ # an output-only bias for each token.
1082
+ self.decoder = input_embeddings
1083
+
1084
+ def build(self, input_shape=None):
1085
+ self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
1086
+
1087
+ if self.built:
1088
+ return
1089
+ self.built = True
1090
+ if getattr(self, "dense", None) is not None:
1091
+ with tf.name_scope(self.dense.name):
1092
+ self.dense.build([None, None, self.config.hidden_size])
1093
+ if getattr(self, "layer_norm", None) is not None:
1094
+ with tf.name_scope(self.layer_norm.name):
1095
+ self.layer_norm.build([None, None, self.config.hidden_size])
1096
+
1097
+ def get_output_embeddings(self):
1098
+ return self.decoder
1099
+
1100
+ def set_output_embeddings(self, value):
1101
+ self.decoder.weight = value
1102
+ self.decoder.vocab_size = shape_list(value)[0]
1103
+
1104
+ def get_bias(self):
1105
+ return {"bias": self.bias}
1106
+
1107
+ def set_bias(self, value):
1108
+ self.bias = value["bias"]
1109
+ self.config.vocab_size = shape_list(value["bias"])[0]
1110
+
1111
+ def call(self, hidden_states):
1112
+ hidden_states = self.dense(hidden_states)
1113
+ hidden_states = self.act(hidden_states)
1114
+ hidden_states = self.layer_norm(hidden_states)
1115
+
1116
+ # project back to size of vocabulary with bias
1117
+ seq_length = shape_list(tensor=hidden_states)[1]
1118
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size])
1119
+ hidden_states = tf.matmul(a=hidden_states, b=self.decoder.weight, transpose_b=True)
1120
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
1121
+ hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
1122
+
1123
+ return hidden_states
1124
+
1125
+
1126
+ @add_start_docstrings(
1127
+ """RoBERTa-PreLayerNorm Model with a `language modeling` head on top.""", ROBERTA_PRELAYERNORM_START_DOCSTRING
1128
+ )
1129
+ class TFRobertaPreLayerNormForMaskedLM(TFRobertaPreLayerNormPreTrainedModel, TFMaskedLanguageModelingLoss):
1130
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1131
+ _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head.decoder.weight"]
1132
+
1133
+ # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaForMaskedLM.__init__ with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta->roberta_prelayernorm
1134
+ def __init__(self, config, *inputs, **kwargs):
1135
+ super().__init__(config, *inputs, **kwargs)
1136
+
1137
+ self.roberta_prelayernorm = TFRobertaPreLayerNormMainLayer(
1138
+ config, add_pooling_layer=False, name="roberta_prelayernorm"
1139
+ )
1140
+ self.lm_head = TFRobertaPreLayerNormLMHead(config, self.roberta_prelayernorm.embeddings, name="lm_head")
1141
+
1142
+ def get_lm_head(self):
1143
+ return self.lm_head
1144
+
1145
+ def get_prefix_bias_name(self):
1146
+ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
1147
+ return self.name + "/" + self.lm_head.name
1148
+
1149
+ @unpack_inputs
1150
+ @add_start_docstrings_to_model_forward(ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1151
+ @add_code_sample_docstrings(
1152
+ checkpoint=_CHECKPOINT_FOR_DOC,
1153
+ output_type=TFMaskedLMOutput,
1154
+ config_class=_CONFIG_FOR_DOC,
1155
+ mask="<mask>",
1156
+ expected_output="' Paris'",
1157
+ expected_loss=0.69,
1158
+ )
1159
+ # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaForMaskedLM.call with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta->roberta_prelayernorm
1160
+ def call(
1161
+ self,
1162
+ input_ids: TFModelInputType | None = None,
1163
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1164
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1165
+ position_ids: np.ndarray | tf.Tensor | None = None,
1166
+ head_mask: np.ndarray | tf.Tensor | None = None,
1167
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1168
+ output_attentions: Optional[bool] = None,
1169
+ output_hidden_states: Optional[bool] = None,
1170
+ return_dict: Optional[bool] = None,
1171
+ labels: np.ndarray | tf.Tensor | None = None,
1172
+ training: Optional[bool] = False,
1173
+ ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
1174
+ r"""
1175
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1176
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1177
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1178
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1179
+ """
1180
+ outputs = self.roberta_prelayernorm(
1181
+ input_ids,
1182
+ attention_mask=attention_mask,
1183
+ token_type_ids=token_type_ids,
1184
+ position_ids=position_ids,
1185
+ head_mask=head_mask,
1186
+ inputs_embeds=inputs_embeds,
1187
+ output_attentions=output_attentions,
1188
+ output_hidden_states=output_hidden_states,
1189
+ return_dict=return_dict,
1190
+ training=training,
1191
+ )
1192
+
1193
+ sequence_output = outputs[0]
1194
+ prediction_scores = self.lm_head(sequence_output)
1195
+
1196
+ loss = None if labels is None else self.hf_compute_loss(labels, prediction_scores)
1197
+
1198
+ if not return_dict:
1199
+ output = (prediction_scores,) + outputs[2:]
1200
+ return ((loss,) + output) if loss is not None else output
1201
+
1202
+ return TFMaskedLMOutput(
1203
+ loss=loss,
1204
+ logits=prediction_scores,
1205
+ hidden_states=outputs.hidden_states,
1206
+ attentions=outputs.attentions,
1207
+ )
1208
+
1209
+ def build(self, input_shape=None):
1210
+ if self.built:
1211
+ return
1212
+ self.built = True
1213
+ if getattr(self, "roberta_prelayernorm", None) is not None:
1214
+ with tf.name_scope(self.roberta_prelayernorm.name):
1215
+ self.roberta_prelayernorm.build(None)
1216
+ if getattr(self, "lm_head", None) is not None:
1217
+ with tf.name_scope(self.lm_head.name):
1218
+ self.lm_head.build(None)
1219
+
1220
+
1221
+ # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaForCausalLM with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta->roberta_prelayernorm
1222
+ class TFRobertaPreLayerNormForCausalLM(TFRobertaPreLayerNormPreTrainedModel, TFCausalLanguageModelingLoss):
1223
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1224
+ _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head.decoder.weight"]
1225
+
1226
+ def __init__(self, config: RobertaPreLayerNormConfig, *inputs, **kwargs):
1227
+ super().__init__(config, *inputs, **kwargs)
1228
+
1229
+ if not config.is_decoder:
1230
+ logger.warning(
1231
+ "If you want to use `TFRobertaPreLayerNormLMHeadModel` as a standalone, add `is_decoder=True.`"
1232
+ )
1233
+
1234
+ self.roberta_prelayernorm = TFRobertaPreLayerNormMainLayer(
1235
+ config, add_pooling_layer=False, name="roberta_prelayernorm"
1236
+ )
1237
+ self.lm_head = TFRobertaPreLayerNormLMHead(
1238
+ config, input_embeddings=self.roberta_prelayernorm.embeddings, name="lm_head"
1239
+ )
1240
+
1241
+ def get_lm_head(self):
1242
+ return self.lm_head
1243
+
1244
+ def get_prefix_bias_name(self):
1245
+ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
1246
+ return self.name + "/" + self.lm_head.name
1247
+
1248
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertLMHeadModel.prepare_inputs_for_generation
1249
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs):
1250
+ input_shape = input_ids.shape
1251
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
1252
+ if attention_mask is None:
1253
+ attention_mask = tf.ones(input_shape)
1254
+
1255
+ # cut decoder_input_ids if past is used
1256
+ if past_key_values is not None:
1257
+ input_ids = input_ids[:, -1:]
1258
+
1259
+ return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values}
1260
+
1261
+ @unpack_inputs
1262
+ @add_start_docstrings_to_model_forward(ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1263
+ @add_code_sample_docstrings(
1264
+ checkpoint=_CHECKPOINT_FOR_DOC,
1265
+ output_type=TFCausalLMOutputWithCrossAttentions,
1266
+ config_class=_CONFIG_FOR_DOC,
1267
+ )
1268
+ def call(
1269
+ self,
1270
+ input_ids: TFModelInputType | None = None,
1271
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1272
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1273
+ position_ids: np.ndarray | tf.Tensor | None = None,
1274
+ head_mask: np.ndarray | tf.Tensor | None = None,
1275
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1276
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
1277
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
1278
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
1279
+ use_cache: Optional[bool] = None,
1280
+ output_attentions: Optional[bool] = None,
1281
+ output_hidden_states: Optional[bool] = None,
1282
+ return_dict: Optional[bool] = None,
1283
+ labels: np.ndarray | tf.Tensor | None = None,
1284
+ training: Optional[bool] = False,
1285
+ ) -> Union[TFCausalLMOutputWithCrossAttentions, Tuple[tf.Tensor]]:
1286
+ r"""
1287
+ encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1288
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
1289
+ the model is configured as a decoder.
1290
+ encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1291
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
1292
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1293
+
1294
+ - 1 for tokens that are **not masked**,
1295
+ - 0 for tokens that are **masked**.
1296
+
1297
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
1298
+ contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1299
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1300
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1301
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1302
+ use_cache (`bool`, *optional*, defaults to `True`):
1303
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1304
+ `past_key_values`). Set to `False` during training, `True` during generation
1305
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
1306
+ Labels for computing the cross entropy classification loss. Indices should be in `[0, ...,
1307
+ config.vocab_size - 1]`.
1308
+ """
1309
+ outputs = self.roberta_prelayernorm(
1310
+ input_ids=input_ids,
1311
+ attention_mask=attention_mask,
1312
+ token_type_ids=token_type_ids,
1313
+ position_ids=position_ids,
1314
+ head_mask=head_mask,
1315
+ inputs_embeds=inputs_embeds,
1316
+ encoder_hidden_states=encoder_hidden_states,
1317
+ encoder_attention_mask=encoder_attention_mask,
1318
+ past_key_values=past_key_values,
1319
+ use_cache=use_cache,
1320
+ output_attentions=output_attentions,
1321
+ output_hidden_states=output_hidden_states,
1322
+ return_dict=return_dict,
1323
+ training=training,
1324
+ )
1325
+
1326
+ sequence_output = outputs[0]
1327
+ logits = self.lm_head(hidden_states=sequence_output, training=training)
1328
+ loss = None
1329
+
1330
+ if labels is not None:
1331
+ # shift labels to the left and cut last logit token
1332
+ shifted_logits = logits[:, :-1]
1333
+ labels = labels[:, 1:]
1334
+ loss = self.hf_compute_loss(labels=labels, logits=shifted_logits)
1335
+
1336
+ if not return_dict:
1337
+ output = (logits,) + outputs[2:]
1338
+ return ((loss,) + output) if loss is not None else output
1339
+
1340
+ return TFCausalLMOutputWithCrossAttentions(
1341
+ loss=loss,
1342
+ logits=logits,
1343
+ past_key_values=outputs.past_key_values,
1344
+ hidden_states=outputs.hidden_states,
1345
+ attentions=outputs.attentions,
1346
+ cross_attentions=outputs.cross_attentions,
1347
+ )
1348
+
1349
+ def build(self, input_shape=None):
1350
+ if self.built:
1351
+ return
1352
+ self.built = True
1353
+ if getattr(self, "roberta_prelayernorm", None) is not None:
1354
+ with tf.name_scope(self.roberta_prelayernorm.name):
1355
+ self.roberta_prelayernorm.build(None)
1356
+ if getattr(self, "lm_head", None) is not None:
1357
+ with tf.name_scope(self.lm_head.name):
1358
+ self.lm_head.build(None)
1359
+
1360
+
1361
+ # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaClassificationHead with Roberta->RobertaPreLayerNorm
1362
+ class TFRobertaPreLayerNormClassificationHead(keras.layers.Layer):
1363
+ """Head for sentence-level classification tasks."""
1364
+
1365
+ def __init__(self, config, **kwargs):
1366
+ super().__init__(**kwargs)
1367
+ self.dense = keras.layers.Dense(
1368
+ config.hidden_size,
1369
+ kernel_initializer=get_initializer(config.initializer_range),
1370
+ activation="tanh",
1371
+ name="dense",
1372
+ )
1373
+ classifier_dropout = (
1374
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1375
+ )
1376
+ self.dropout = keras.layers.Dropout(classifier_dropout)
1377
+ self.out_proj = keras.layers.Dense(
1378
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="out_proj"
1379
+ )
1380
+ self.config = config
1381
+
1382
+ def call(self, features, training=False):
1383
+ x = features[:, 0, :] # take <s> token (equiv. to [CLS])
1384
+ x = self.dropout(x, training=training)
1385
+ x = self.dense(x)
1386
+ x = self.dropout(x, training=training)
1387
+ x = self.out_proj(x)
1388
+ return x
1389
+
1390
+ def build(self, input_shape=None):
1391
+ if self.built:
1392
+ return
1393
+ self.built = True
1394
+ if getattr(self, "dense", None) is not None:
1395
+ with tf.name_scope(self.dense.name):
1396
+ self.dense.build([None, None, self.config.hidden_size])
1397
+ if getattr(self, "out_proj", None) is not None:
1398
+ with tf.name_scope(self.out_proj.name):
1399
+ self.out_proj.build([None, None, self.config.hidden_size])
1400
+
1401
+
1402
+ @add_start_docstrings(
1403
+ """
1404
+ RoBERTa-PreLayerNorm Model transformer with a sequence classification/regression head on top (a linear layer on top
1405
+ of the pooled output) e.g. for GLUE tasks.
1406
+ """,
1407
+ ROBERTA_PRELAYERNORM_START_DOCSTRING,
1408
+ )
1409
+ class TFRobertaPreLayerNormForSequenceClassification(
1410
+ TFRobertaPreLayerNormPreTrainedModel, TFSequenceClassificationLoss
1411
+ ):
1412
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1413
+ _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head"]
1414
+
1415
+ def __init__(self, config, *inputs, **kwargs):
1416
+ super().__init__(config, *inputs, **kwargs)
1417
+ self.num_labels = config.num_labels
1418
+
1419
+ self.roberta_prelayernorm = TFRobertaPreLayerNormMainLayer(
1420
+ config, add_pooling_layer=False, name="roberta_prelayernorm"
1421
+ )
1422
+ self.classifier = TFRobertaPreLayerNormClassificationHead(config, name="classifier")
1423
+
1424
+ @unpack_inputs
1425
+ @add_start_docstrings_to_model_forward(ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1426
+ @add_code_sample_docstrings(
1427
+ checkpoint=_CHECKPOINT_FOR_DOC,
1428
+ output_type=TFSequenceClassifierOutput,
1429
+ config_class=_CONFIG_FOR_DOC,
1430
+ )
1431
+ # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaForSequenceClassification.call with roberta->roberta_prelayernorm
1432
+ def call(
1433
+ self,
1434
+ input_ids: TFModelInputType | None = None,
1435
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1436
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1437
+ position_ids: np.ndarray | tf.Tensor | None = None,
1438
+ head_mask: np.ndarray | tf.Tensor | None = None,
1439
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1440
+ output_attentions: Optional[bool] = None,
1441
+ output_hidden_states: Optional[bool] = None,
1442
+ return_dict: Optional[bool] = None,
1443
+ labels: np.ndarray | tf.Tensor | None = None,
1444
+ training: Optional[bool] = False,
1445
+ ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
1446
+ r"""
1447
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1448
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1449
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1450
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1451
+ """
1452
+ outputs = self.roberta_prelayernorm(
1453
+ input_ids,
1454
+ attention_mask=attention_mask,
1455
+ token_type_ids=token_type_ids,
1456
+ position_ids=position_ids,
1457
+ head_mask=head_mask,
1458
+ inputs_embeds=inputs_embeds,
1459
+ output_attentions=output_attentions,
1460
+ output_hidden_states=output_hidden_states,
1461
+ return_dict=return_dict,
1462
+ training=training,
1463
+ )
1464
+ sequence_output = outputs[0]
1465
+ logits = self.classifier(sequence_output, training=training)
1466
+
1467
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
1468
+
1469
+ if not return_dict:
1470
+ output = (logits,) + outputs[2:]
1471
+ return ((loss,) + output) if loss is not None else output
1472
+
1473
+ return TFSequenceClassifierOutput(
1474
+ loss=loss,
1475
+ logits=logits,
1476
+ hidden_states=outputs.hidden_states,
1477
+ attentions=outputs.attentions,
1478
+ )
1479
+
1480
+ def build(self, input_shape=None):
1481
+ if self.built:
1482
+ return
1483
+ self.built = True
1484
+ if getattr(self, "roberta_prelayernorm", None) is not None:
1485
+ with tf.name_scope(self.roberta_prelayernorm.name):
1486
+ self.roberta_prelayernorm.build(None)
1487
+ if getattr(self, "classifier", None) is not None:
1488
+ with tf.name_scope(self.classifier.name):
1489
+ self.classifier.build(None)
1490
+
1491
+
1492
+ @add_start_docstrings(
1493
+ """
1494
+ RobertaPreLayerNorm Model with a multiple choice classification head on top (a linear layer on top of the pooled
1495
+ output and a softmax) e.g. for RocStories/SWAG tasks.
1496
+ """,
1497
+ ROBERTA_PRELAYERNORM_START_DOCSTRING,
1498
+ )
1499
+ # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaForMultipleChoice with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta->roberta_prelayernorm
1500
+ class TFRobertaPreLayerNormForMultipleChoice(TFRobertaPreLayerNormPreTrainedModel, TFMultipleChoiceLoss):
1501
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1502
+ _keys_to_ignore_on_load_unexpected = [r"lm_head"]
1503
+ _keys_to_ignore_on_load_missing = [r"dropout"]
1504
+
1505
+ def __init__(self, config, *inputs, **kwargs):
1506
+ super().__init__(config, *inputs, **kwargs)
1507
+
1508
+ self.roberta_prelayernorm = TFRobertaPreLayerNormMainLayer(config, name="roberta_prelayernorm")
1509
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
1510
+ self.classifier = keras.layers.Dense(
1511
+ 1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
1512
+ )
1513
+ self.config = config
1514
+
1515
+ @unpack_inputs
1516
+ @add_start_docstrings_to_model_forward(
1517
+ ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
1518
+ )
1519
+ @add_code_sample_docstrings(
1520
+ checkpoint=_CHECKPOINT_FOR_DOC,
1521
+ output_type=TFMultipleChoiceModelOutput,
1522
+ config_class=_CONFIG_FOR_DOC,
1523
+ )
1524
+ def call(
1525
+ self,
1526
+ input_ids: TFModelInputType | None = None,
1527
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1528
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1529
+ position_ids: np.ndarray | tf.Tensor | None = None,
1530
+ head_mask: np.ndarray | tf.Tensor | None = None,
1531
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1532
+ output_attentions: Optional[bool] = None,
1533
+ output_hidden_states: Optional[bool] = None,
1534
+ return_dict: Optional[bool] = None,
1535
+ labels: np.ndarray | tf.Tensor | None = None,
1536
+ training: Optional[bool] = False,
1537
+ ) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]:
1538
+ r"""
1539
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1540
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
1541
+ where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)
1542
+ """
1543
+
1544
+ if input_ids is not None:
1545
+ num_choices = shape_list(input_ids)[1]
1546
+ seq_length = shape_list(input_ids)[2]
1547
+ else:
1548
+ num_choices = shape_list(inputs_embeds)[1]
1549
+ seq_length = shape_list(inputs_embeds)[2]
1550
+
1551
+ flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None
1552
+ flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None
1553
+ flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None
1554
+ flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None
1555
+ outputs = self.roberta_prelayernorm(
1556
+ flat_input_ids,
1557
+ flat_attention_mask,
1558
+ flat_token_type_ids,
1559
+ flat_position_ids,
1560
+ head_mask,
1561
+ inputs_embeds,
1562
+ output_attentions,
1563
+ output_hidden_states,
1564
+ return_dict=return_dict,
1565
+ training=training,
1566
+ )
1567
+ pooled_output = outputs[1]
1568
+ pooled_output = self.dropout(pooled_output, training=training)
1569
+ logits = self.classifier(pooled_output)
1570
+ reshaped_logits = tf.reshape(logits, (-1, num_choices))
1571
+
1572
+ loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits)
1573
+
1574
+ if not return_dict:
1575
+ output = (reshaped_logits,) + outputs[2:]
1576
+ return ((loss,) + output) if loss is not None else output
1577
+
1578
+ return TFMultipleChoiceModelOutput(
1579
+ loss=loss,
1580
+ logits=reshaped_logits,
1581
+ hidden_states=outputs.hidden_states,
1582
+ attentions=outputs.attentions,
1583
+ )
1584
+
1585
+ def build(self, input_shape=None):
1586
+ if self.built:
1587
+ return
1588
+ self.built = True
1589
+ if getattr(self, "roberta_prelayernorm", None) is not None:
1590
+ with tf.name_scope(self.roberta_prelayernorm.name):
1591
+ self.roberta_prelayernorm.build(None)
1592
+ if getattr(self, "classifier", None) is not None:
1593
+ with tf.name_scope(self.classifier.name):
1594
+ self.classifier.build([None, None, self.config.hidden_size])
1595
+
1596
+
1597
+ @add_start_docstrings(
1598
+ """
1599
+ RoBERTa-PreLayerNorm Model with a token classification head on top (a linear layer on top of the hidden-states
1600
+ output) e.g. for Named-Entity-Recognition (NER) tasks.
1601
+ """,
1602
+ ROBERTA_PRELAYERNORM_START_DOCSTRING,
1603
+ )
1604
+ class TFRobertaPreLayerNormForTokenClassification(TFRobertaPreLayerNormPreTrainedModel, TFTokenClassificationLoss):
1605
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1606
+ _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head"]
1607
+ _keys_to_ignore_on_load_missing = [r"dropout"]
1608
+
1609
+ def __init__(self, config, *inputs, **kwargs):
1610
+ super().__init__(config, *inputs, **kwargs)
1611
+ self.num_labels = config.num_labels
1612
+
1613
+ self.roberta_prelayernorm = TFRobertaPreLayerNormMainLayer(
1614
+ config, add_pooling_layer=False, name="roberta_prelayernorm"
1615
+ )
1616
+ classifier_dropout = (
1617
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1618
+ )
1619
+ self.dropout = keras.layers.Dropout(classifier_dropout)
1620
+ self.classifier = keras.layers.Dense(
1621
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
1622
+ )
1623
+ self.config = config
1624
+
1625
+ @unpack_inputs
1626
+ @add_start_docstrings_to_model_forward(ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1627
+ @add_code_sample_docstrings(
1628
+ checkpoint=_CHECKPOINT_FOR_DOC,
1629
+ output_type=TFTokenClassifierOutput,
1630
+ config_class=_CONFIG_FOR_DOC,
1631
+ )
1632
+ # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaForTokenClassification.call with roberta->roberta_prelayernorm
1633
+ def call(
1634
+ self,
1635
+ input_ids: TFModelInputType | None = None,
1636
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1637
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1638
+ position_ids: np.ndarray | tf.Tensor | None = None,
1639
+ head_mask: np.ndarray | tf.Tensor | None = None,
1640
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1641
+ output_attentions: Optional[bool] = None,
1642
+ output_hidden_states: Optional[bool] = None,
1643
+ return_dict: Optional[bool] = None,
1644
+ labels: np.ndarray | tf.Tensor | None = None,
1645
+ training: Optional[bool] = False,
1646
+ ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
1647
+ r"""
1648
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1649
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1650
+ """
1651
+ outputs = self.roberta_prelayernorm(
1652
+ input_ids,
1653
+ attention_mask=attention_mask,
1654
+ token_type_ids=token_type_ids,
1655
+ position_ids=position_ids,
1656
+ head_mask=head_mask,
1657
+ inputs_embeds=inputs_embeds,
1658
+ output_attentions=output_attentions,
1659
+ output_hidden_states=output_hidden_states,
1660
+ return_dict=return_dict,
1661
+ training=training,
1662
+ )
1663
+ sequence_output = outputs[0]
1664
+
1665
+ sequence_output = self.dropout(sequence_output, training=training)
1666
+ logits = self.classifier(sequence_output)
1667
+
1668
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
1669
+
1670
+ if not return_dict:
1671
+ output = (logits,) + outputs[2:]
1672
+ return ((loss,) + output) if loss is not None else output
1673
+
1674
+ return TFTokenClassifierOutput(
1675
+ loss=loss,
1676
+ logits=logits,
1677
+ hidden_states=outputs.hidden_states,
1678
+ attentions=outputs.attentions,
1679
+ )
1680
+
1681
+ def build(self, input_shape=None):
1682
+ if self.built:
1683
+ return
1684
+ self.built = True
1685
+ if getattr(self, "roberta_prelayernorm", None) is not None:
1686
+ with tf.name_scope(self.roberta_prelayernorm.name):
1687
+ self.roberta_prelayernorm.build(None)
1688
+ if getattr(self, "classifier", None) is not None:
1689
+ with tf.name_scope(self.classifier.name):
1690
+ self.classifier.build([None, None, self.config.hidden_size])
1691
+
1692
+
1693
+ @add_start_docstrings(
1694
+ """
1695
+ RoBERTa-PreLayerNorm Model with a span classification head on top for extractive question-answering tasks like
1696
+ SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1697
+ """,
1698
+ ROBERTA_PRELAYERNORM_START_DOCSTRING,
1699
+ )
1700
+ class TFRobertaPreLayerNormForQuestionAnswering(TFRobertaPreLayerNormPreTrainedModel, TFQuestionAnsweringLoss):
1701
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1702
+ _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head"]
1703
+
1704
+ def __init__(self, config, *inputs, **kwargs):
1705
+ super().__init__(config, *inputs, **kwargs)
1706
+ self.num_labels = config.num_labels
1707
+
1708
+ self.roberta_prelayernorm = TFRobertaPreLayerNormMainLayer(
1709
+ config, add_pooling_layer=False, name="roberta_prelayernorm"
1710
+ )
1711
+ self.qa_outputs = keras.layers.Dense(
1712
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
1713
+ )
1714
+ self.config = config
1715
+
1716
+ @unpack_inputs
1717
+ @add_start_docstrings_to_model_forward(ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1718
+ @add_code_sample_docstrings(
1719
+ checkpoint=_CHECKPOINT_FOR_DOC,
1720
+ output_type=TFQuestionAnsweringModelOutput,
1721
+ config_class=_CONFIG_FOR_DOC,
1722
+ )
1723
+ # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaForQuestionAnswering.call with roberta->roberta_prelayernorm
1724
+ def call(
1725
+ self,
1726
+ input_ids: TFModelInputType | None = None,
1727
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1728
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1729
+ position_ids: np.ndarray | tf.Tensor | None = None,
1730
+ head_mask: np.ndarray | tf.Tensor | None = None,
1731
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1732
+ output_attentions: Optional[bool] = None,
1733
+ output_hidden_states: Optional[bool] = None,
1734
+ return_dict: Optional[bool] = None,
1735
+ start_positions: np.ndarray | tf.Tensor | None = None,
1736
+ end_positions: np.ndarray | tf.Tensor | None = None,
1737
+ training: Optional[bool] = False,
1738
+ ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
1739
+ r"""
1740
+ start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1741
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1742
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1743
+ are not taken into account for computing the loss.
1744
+ end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1745
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1746
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1747
+ are not taken into account for computing the loss.
1748
+ """
1749
+ outputs = self.roberta_prelayernorm(
1750
+ input_ids,
1751
+ attention_mask=attention_mask,
1752
+ token_type_ids=token_type_ids,
1753
+ position_ids=position_ids,
1754
+ head_mask=head_mask,
1755
+ inputs_embeds=inputs_embeds,
1756
+ output_attentions=output_attentions,
1757
+ output_hidden_states=output_hidden_states,
1758
+ return_dict=return_dict,
1759
+ training=training,
1760
+ )
1761
+ sequence_output = outputs[0]
1762
+
1763
+ logits = self.qa_outputs(sequence_output)
1764
+ start_logits, end_logits = tf.split(logits, 2, axis=-1)
1765
+ start_logits = tf.squeeze(start_logits, axis=-1)
1766
+ end_logits = tf.squeeze(end_logits, axis=-1)
1767
+
1768
+ loss = None
1769
+ if start_positions is not None and end_positions is not None:
1770
+ labels = {"start_position": start_positions}
1771
+ labels["end_position"] = end_positions
1772
+ loss = self.hf_compute_loss(labels, (start_logits, end_logits))
1773
+
1774
+ if not return_dict:
1775
+ output = (start_logits, end_logits) + outputs[2:]
1776
+ return ((loss,) + output) if loss is not None else output
1777
+
1778
+ return TFQuestionAnsweringModelOutput(
1779
+ loss=loss,
1780
+ start_logits=start_logits,
1781
+ end_logits=end_logits,
1782
+ hidden_states=outputs.hidden_states,
1783
+ attentions=outputs.attentions,
1784
+ )
1785
+
1786
+ def build(self, input_shape=None):
1787
+ if self.built:
1788
+ return
1789
+ self.built = True
1790
+ if getattr(self, "roberta_prelayernorm", None) is not None:
1791
+ with tf.name_scope(self.roberta_prelayernorm.name):
1792
+ self.roberta_prelayernorm.build(None)
1793
+ if getattr(self, "qa_outputs", None) is not None:
1794
+ with tf.name_scope(self.qa_outputs.name):
1795
+ self.qa_outputs.build([None, None, self.config.hidden_size])
1796
+
1797
+
1798
+ __all__ = [
1799
+ "TFRobertaPreLayerNormForCausalLM",
1800
+ "TFRobertaPreLayerNormForMaskedLM",
1801
+ "TFRobertaPreLayerNormForMultipleChoice",
1802
+ "TFRobertaPreLayerNormForQuestionAnswering",
1803
+ "TFRobertaPreLayerNormForSequenceClassification",
1804
+ "TFRobertaPreLayerNormForTokenClassification",
1805
+ "TFRobertaPreLayerNormMainLayer",
1806
+ "TFRobertaPreLayerNormModel",
1807
+ "TFRobertaPreLayerNormPreTrainedModel",
1808
+ ]