ZTWHHH commited on
Commit
c927fb1
·
verified ·
1 Parent(s): bc1bb63

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. evalkit_internvl/lib/python3.10/site-packages/transformers/models/conditional_detr/__pycache__/__init__.cpython-310.pyc +0 -0
  2. evalkit_internvl/lib/python3.10/site-packages/transformers/models/conditional_detr/__pycache__/convert_conditional_detr_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  3. evalkit_internvl/lib/python3.10/site-packages/transformers/models/conditional_detr/__pycache__/image_processing_conditional_detr.cpython-310.pyc +0 -0
  4. evalkit_internvl/lib/python3.10/site-packages/transformers/models/data2vec/__init__.py +135 -0
  5. evalkit_internvl/lib/python3.10/site-packages/transformers/models/data2vec/__pycache__/__init__.cpython-310.pyc +0 -0
  6. evalkit_internvl/lib/python3.10/site-packages/transformers/models/data2vec/__pycache__/configuration_data2vec_text.cpython-310.pyc +0 -0
  7. evalkit_internvl/lib/python3.10/site-packages/transformers/models/data2vec/__pycache__/configuration_data2vec_vision.cpython-310.pyc +0 -0
  8. evalkit_internvl/lib/python3.10/site-packages/transformers/models/data2vec/__pycache__/convert_data2vec_text_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  9. evalkit_internvl/lib/python3.10/site-packages/transformers/models/data2vec/__pycache__/modeling_data2vec_audio.cpython-310.pyc +0 -0
  10. evalkit_internvl/lib/python3.10/site-packages/transformers/models/data2vec/__pycache__/modeling_data2vec_vision.cpython-310.pyc +0 -0
  11. evalkit_internvl/lib/python3.10/site-packages/transformers/models/data2vec/__pycache__/modeling_tf_data2vec_vision.cpython-310.pyc +0 -0
  12. evalkit_internvl/lib/python3.10/site-packages/transformers/models/data2vec/configuration_data2vec_audio.py +290 -0
  13. evalkit_internvl/lib/python3.10/site-packages/transformers/models/data2vec/configuration_data2vec_text.py +154 -0
  14. evalkit_internvl/lib/python3.10/site-packages/transformers/models/data2vec/configuration_data2vec_vision.py +196 -0
  15. evalkit_internvl/lib/python3.10/site-packages/transformers/models/data2vec/convert_data2vec_audio_original_pytorch_checkpoint_to_pytorch.py +286 -0
  16. evalkit_internvl/lib/python3.10/site-packages/transformers/models/data2vec/modeling_data2vec_audio.py +1509 -0
  17. evalkit_internvl/lib/python3.10/site-packages/transformers/models/data2vec/modeling_data2vec_text.py +1560 -0
  18. evalkit_internvl/lib/python3.10/site-packages/transformers/models/data2vec/modeling_tf_data2vec_vision.py +1725 -0
  19. evalkit_internvl/lib/python3.10/site-packages/transformers/models/pix2struct/__init__.py +86 -0
  20. evalkit_internvl/lib/python3.10/site-packages/transformers/models/pix2struct/__pycache__/configuration_pix2struct.cpython-310.pyc +0 -0
  21. evalkit_internvl/lib/python3.10/site-packages/transformers/models/pix2struct/configuration_pix2struct.py +390 -0
  22. evalkit_internvl/lib/python3.10/site-packages/transformers/models/pix2struct/convert_pix2struct_original_pytorch_to_hf.py +155 -0
  23. evalkit_internvl/lib/python3.10/site-packages/transformers/models/pix2struct/image_processing_pix2struct.py +460 -0
  24. evalkit_internvl/lib/python3.10/site-packages/transformers/models/pix2struct/modeling_pix2struct.py +1805 -0
  25. evalkit_internvl/lib/python3.10/site-packages/transformers/models/pix2struct/processing_pix2struct.py +163 -0
  26. evalkit_internvl/lib/python3.10/site-packages/transformers/models/vivit/__init__.py +78 -0
  27. evalkit_internvl/lib/python3.10/site-packages/transformers/models/vivit/__pycache__/configuration_vivit.cpython-310.pyc +0 -0
  28. evalkit_internvl/lib/python3.10/site-packages/transformers/models/vivit/__pycache__/convert_vivit_flax_to_pytorch.cpython-310.pyc +0 -0
  29. evalkit_internvl/lib/python3.10/site-packages/transformers/models/vivit/__pycache__/image_processing_vivit.cpython-310.pyc +0 -0
  30. evalkit_internvl/lib/python3.10/site-packages/transformers/models/vivit/__pycache__/modeling_vivit.cpython-310.pyc +0 -0
  31. evalkit_internvl/lib/python3.10/site-packages/transformers/models/vivit/configuration_vivit.py +123 -0
  32. evalkit_internvl/lib/python3.10/site-packages/transformers/models/vivit/convert_vivit_flax_to_pytorch.py +230 -0
  33. evalkit_internvl/lib/python3.10/site-packages/transformers/models/vivit/image_processing_vivit.py +400 -0
  34. evalkit_internvl/lib/python3.10/site-packages/transformers/models/vivit/modeling_vivit.py +745 -0
  35. evalkit_tf437/lib/python3.10/site-packages/filelock/__init__.py +70 -0
  36. evalkit_tf437/lib/python3.10/site-packages/filelock/__pycache__/_unix.cpython-310.pyc +0 -0
  37. evalkit_tf437/lib/python3.10/site-packages/filelock/__pycache__/_util.cpython-310.pyc +0 -0
  38. evalkit_tf437/lib/python3.10/site-packages/filelock/__pycache__/_windows.cpython-310.pyc +0 -0
  39. evalkit_tf437/lib/python3.10/site-packages/filelock/__pycache__/version.cpython-310.pyc +0 -0
  40. evalkit_tf437/lib/python3.10/site-packages/filelock/_api.py +403 -0
  41. evalkit_tf437/lib/python3.10/site-packages/filelock/_error.py +30 -0
  42. evalkit_tf437/lib/python3.10/site-packages/filelock/_soft.py +47 -0
  43. evalkit_tf437/lib/python3.10/site-packages/filelock/_unix.py +68 -0
  44. evalkit_tf437/lib/python3.10/site-packages/filelock/_util.py +52 -0
  45. evalkit_tf437/lib/python3.10/site-packages/filelock/_windows.py +65 -0
  46. evalkit_tf437/lib/python3.10/site-packages/filelock/asyncio.py +342 -0
  47. evalkit_tf437/lib/python3.10/site-packages/filelock/py.typed +0 -0
  48. evalkit_tf437/lib/python3.10/site-packages/filelock/version.py +16 -0
  49. evalkit_tf437/lib/python3.10/site-packages/pip/__pycache__/__init__.cpython-310.pyc +0 -0
  50. evalkit_tf437/lib/python3.10/site-packages/pip/__pycache__/__main__.cpython-310.pyc +0 -0
evalkit_internvl/lib/python3.10/site-packages/transformers/models/conditional_detr/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.42 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/transformers/models/conditional_detr/__pycache__/convert_conditional_detr_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (9.32 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/transformers/models/conditional_detr/__pycache__/image_processing_conditional_detr.cpython-310.pyc ADDED
Binary file (56.3 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/transformers/models/data2vec/__init__.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
18
+
19
+
20
+ _import_structure = {
21
+ "configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"],
22
+ "configuration_data2vec_text": [
23
+ "DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
24
+ "Data2VecTextConfig",
25
+ "Data2VecTextOnnxConfig",
26
+ ],
27
+ "configuration_data2vec_vision": [
28
+ "DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP",
29
+ "Data2VecVisionConfig",
30
+ "Data2VecVisionOnnxConfig",
31
+ ],
32
+ }
33
+
34
+ try:
35
+ if not is_torch_available():
36
+ raise OptionalDependencyNotAvailable()
37
+ except OptionalDependencyNotAvailable:
38
+ pass
39
+ else:
40
+ _import_structure["modeling_data2vec_audio"] = [
41
+ "DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST",
42
+ "Data2VecAudioForAudioFrameClassification",
43
+ "Data2VecAudioForCTC",
44
+ "Data2VecAudioForSequenceClassification",
45
+ "Data2VecAudioForXVector",
46
+ "Data2VecAudioModel",
47
+ "Data2VecAudioPreTrainedModel",
48
+ ]
49
+ _import_structure["modeling_data2vec_text"] = [
50
+ "DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
51
+ "Data2VecTextForCausalLM",
52
+ "Data2VecTextForMaskedLM",
53
+ "Data2VecTextForMultipleChoice",
54
+ "Data2VecTextForQuestionAnswering",
55
+ "Data2VecTextForSequenceClassification",
56
+ "Data2VecTextForTokenClassification",
57
+ "Data2VecTextModel",
58
+ "Data2VecTextPreTrainedModel",
59
+ ]
60
+ _import_structure["modeling_data2vec_vision"] = [
61
+ "DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST",
62
+ "Data2VecVisionForImageClassification",
63
+ "Data2VecVisionForMaskedImageModeling",
64
+ "Data2VecVisionForSemanticSegmentation",
65
+ "Data2VecVisionModel",
66
+ "Data2VecVisionPreTrainedModel",
67
+ ]
68
+
69
+ if is_tf_available():
70
+ _import_structure["modeling_tf_data2vec_vision"] = [
71
+ "TFData2VecVisionForImageClassification",
72
+ "TFData2VecVisionForSemanticSegmentation",
73
+ "TFData2VecVisionModel",
74
+ "TFData2VecVisionPreTrainedModel",
75
+ ]
76
+
77
+ if TYPE_CHECKING:
78
+ from .configuration_data2vec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, Data2VecAudioConfig
79
+ from .configuration_data2vec_text import (
80
+ DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
81
+ Data2VecTextConfig,
82
+ Data2VecTextOnnxConfig,
83
+ )
84
+ from .configuration_data2vec_vision import (
85
+ DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
86
+ Data2VecVisionConfig,
87
+ Data2VecVisionOnnxConfig,
88
+ )
89
+
90
+ try:
91
+ if not is_torch_available():
92
+ raise OptionalDependencyNotAvailable()
93
+ except OptionalDependencyNotAvailable:
94
+ pass
95
+ else:
96
+ from .modeling_data2vec_audio import (
97
+ DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
98
+ Data2VecAudioForAudioFrameClassification,
99
+ Data2VecAudioForCTC,
100
+ Data2VecAudioForSequenceClassification,
101
+ Data2VecAudioForXVector,
102
+ Data2VecAudioModel,
103
+ Data2VecAudioPreTrainedModel,
104
+ )
105
+ from .modeling_data2vec_text import (
106
+ DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
107
+ Data2VecTextForCausalLM,
108
+ Data2VecTextForMaskedLM,
109
+ Data2VecTextForMultipleChoice,
110
+ Data2VecTextForQuestionAnswering,
111
+ Data2VecTextForSequenceClassification,
112
+ Data2VecTextForTokenClassification,
113
+ Data2VecTextModel,
114
+ Data2VecTextPreTrainedModel,
115
+ )
116
+ from .modeling_data2vec_vision import (
117
+ DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
118
+ Data2VecVisionForImageClassification,
119
+ Data2VecVisionForMaskedImageModeling,
120
+ Data2VecVisionForSemanticSegmentation,
121
+ Data2VecVisionModel,
122
+ Data2VecVisionPreTrainedModel,
123
+ )
124
+ if is_tf_available():
125
+ from .modeling_tf_data2vec_vision import (
126
+ TFData2VecVisionForImageClassification,
127
+ TFData2VecVisionForSemanticSegmentation,
128
+ TFData2VecVisionModel,
129
+ TFData2VecVisionPreTrainedModel,
130
+ )
131
+
132
+ else:
133
+ import sys
134
+
135
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
evalkit_internvl/lib/python3.10/site-packages/transformers/models/data2vec/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.47 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/transformers/models/data2vec/__pycache__/configuration_data2vec_text.cpython-310.pyc ADDED
Binary file (6.76 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/transformers/models/data2vec/__pycache__/configuration_data2vec_vision.cpython-310.pyc ADDED
Binary file (8.19 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/transformers/models/data2vec/__pycache__/convert_data2vec_text_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (5.13 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/transformers/models/data2vec/__pycache__/modeling_data2vec_audio.cpython-310.pyc ADDED
Binary file (40.7 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/transformers/models/data2vec/__pycache__/modeling_data2vec_vision.cpython-310.pyc ADDED
Binary file (38.8 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/transformers/models/data2vec/__pycache__/modeling_tf_data2vec_vision.cpython-310.pyc ADDED
Binary file (52.3 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/transformers/models/data2vec/configuration_data2vec_audio.py ADDED
@@ -0,0 +1,290 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Data2VecText configuration"""
16
+
17
+ import math
18
+
19
+ from ...configuration_utils import PretrainedConfig
20
+ from ...utils import logging
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+ DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP = {
26
+ "facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json",
27
+ # See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
28
+ }
29
+
30
+
31
+ class Data2VecAudioConfig(PretrainedConfig):
32
+ r"""
33
+ This is the configuration class to store the configuration of a [`Data2VecAudioModel`]. It is used to instantiate
34
+ an Data2VecAudio model according to the specified arguments, defining the model architecture. Instantiating a
35
+ configuration with the defaults will yield a similar configuration to that of the Data2VecAudio
36
+ [facebook/data2vec-audio-base-960h](https://huggingface.co/facebook/data2vec-audio-base-960h) architecture.
37
+
38
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
39
+ documentation from [`PretrainedConfig`] for more information.
40
+
41
+
42
+ Args:
43
+ vocab_size (`int`, *optional*, defaults to 32):
44
+ Vocabulary size of the Data2VecAudio model. Defines the number of different tokens that can be represented
45
+ by the `inputs_ids` passed when calling [`Data2VecAudioModel`] or [`TFData2VecAudioModel`]. Vocabulary size
46
+ of the model. Defines the different tokens that can be represented by the *inputs_ids* passed to the
47
+ forward method of [`Data2VecAudioModel`].
48
+ hidden_size (`int`, *optional*, defaults to 768):
49
+ Dimensionality of the encoder layers and the pooler layer.
50
+ num_hidden_layers (`int`, *optional*, defaults to 12):
51
+ Number of hidden layers in the Transformer encoder.
52
+ num_attention_heads (`int`, *optional*, defaults to 12):
53
+ Number of attention heads for each attention layer in the Transformer encoder.
54
+ intermediate_size (`int`, *optional*, defaults to 3072):
55
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
56
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
57
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
58
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
59
+ hidden_dropout (`float`, *optional*, defaults to 0.1):
60
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
61
+ activation_dropout (`float`, *optional*, defaults to 0.1):
62
+ The dropout ratio for activations inside the fully connected layer.
63
+ attention_dropout (`float`, *optional*, defaults to 0.1):
64
+ The dropout ratio for the attention probabilities.
65
+ final_dropout (`float`, *optional*, defaults to 0.1):
66
+ The dropout probability for the final projection layer of [`Data2VecAudioForCTC`].
67
+ layerdrop (`float`, *optional*, defaults to 0.1):
68
+ The LayerDrop probability. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more
69
+ details.
70
+ initializer_range (`float`, *optional*, defaults to 0.02):
71
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
72
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
73
+ The epsilon used by the layer normalization layers.
74
+ feat_proj_dropout (`float`, *optional*, defaults to 0.0):
75
+ The dropout probability for output of the feature encoder.
76
+ feat_extract_activation (`str, `optional`, defaults to `"gelu"`):
77
+ The non-linear activation function (function or string) in the 1D convolutional layers of the feature
78
+ extractor. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported.
79
+ conv_dim (`Tuple[int]` or `List[int]`, *optional*, defaults to `(512, 512, 512, 512, 512, 512, 512)`):
80
+ A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the
81
+ feature encoder. The length of *conv_dim* defines the number of 1D convolutional layers.
82
+ conv_stride (`Tuple[int]` or `List[int]`, *optional*, defaults to `(5, 2, 2, 2, 2, 2, 2)`):
83
+ A tuple of integers defining the stride of each 1D convolutional layer in the feature encoder. The length
84
+ of *conv_stride* defines the number of convolutional layers and has to match the length of *conv_dim*.
85
+ conv_kernel (`Tuple[int]` or `List[int]`, *optional*, defaults to `(10, 3, 3, 3, 3, 3, 3)`):
86
+ A tuple of integers defining the kernel size of each 1D convolutional layer in the feature encoder. The
87
+ length of *conv_kernel* defines the number of convolutional layers and has to match the length of
88
+ *conv_dim*.
89
+ conv_bias (`bool`, *optional*, defaults to `False`):
90
+ Whether the 1D convolutional layers have a bias.
91
+ num_conv_pos_embeddings (`int`, *optional*, defaults to 128):
92
+ Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional
93
+ embeddings layer.
94
+ num_conv_pos_embedding_groups (`int`, *optional*, defaults to 16):
95
+ Number of groups of 1D convolutional positional embeddings layer.
96
+ mask_time_prob (`float`, *optional*, defaults to 0.05):
97
+ Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking
98
+ procecure generates ''mask_time_prob*len(time_axis)/mask_time_length'' independent masks over the axis. If
99
+ reasoning from the propability of each feature vector to be chosen as the start of the vector span to be
100
+ masked, *mask_time_prob* should be `prob_vector_start*mask_time_length`. Note that overlap may decrease the
101
+ mask_time_length (`int`, *optional*, defaults to 10):
102
+ Length of vector span along the time axis.
103
+ mask_time_min_masks (`int`, *optional*, defaults to 2),:
104
+ The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step,
105
+ irrespectively of `mask_feature_prob`. Only relevant if ''mask_time_prob*len(time_axis)/mask_time_length <
106
+ mask_time_min_masks''
107
+ mask_feature_prob (`float`, *optional*, defaults to 0.0):
108
+ Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The
109
+ masking procecure generates ''mask_feature_prob*len(feature_axis)/mask_time_length'' independent masks over
110
+ the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector
111
+ span to be masked, *mask_feature_prob* should be `prob_vector_start*mask_feature_length`. Note that overlap
112
+ may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is
113
+ True`.
114
+ mask_feature_length (`int`, *optional*, defaults to 10):
115
+ Length of vector span along the feature axis.
116
+ mask_feature_min_masks (`int`, *optional*, defaults to 0),:
117
+ The minimum number of masks of length `mask_feature_length` generated along the feature axis, each time
118
+ step, irrespectively of `mask_feature_prob`. Only relevant if
119
+ ''mask_feature_prob*len(feature_axis)/mask_feature_length < mask_feature_min_masks''
120
+ ctc_loss_reduction (`str`, *optional*, defaults to `"sum"`):
121
+ Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an
122
+ instance of [`Data2VecAudioForCTC`].
123
+ ctc_zero_infinity (`bool`, *optional*, defaults to `False`):
124
+ Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly
125
+ occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance
126
+ of [`Data2VecAudioForCTC`].
127
+ use_weighted_layer_sum (`bool`, *optional*, defaults to `False`):
128
+ Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an
129
+ instance of [`Data2VecAudioForSequenceClassification`].
130
+ classifier_proj_size (`int`, *optional*, defaults to 256):
131
+ Dimensionality of the projection before token mean-pooling for classification.
132
+ tdnn_dim (`Tuple[int]` or `List[int]`, *optional*, defaults to `(512, 512, 512, 512, 1500)`):
133
+ A tuple of integers defining the number of output channels of each 1D convolutional layer in the *TDNN*
134
+ module of the *XVector* model. The length of *tdnn_dim* defines the number of *TDNN* layers.
135
+ tdnn_kernel (`Tuple[int]` or `List[int]`, *optional*, defaults to `(5, 3, 3, 1, 1)`):
136
+ A tuple of integers defining the kernel size of each 1D convolutional layer in the *TDNN* module of the
137
+ *XVector* model. The length of *tdnn_kernel* has to match the length of *tdnn_dim*.
138
+ tdnn_dilation (`Tuple[int]` or `List[int]`, *optional*, defaults to `(1, 2, 3, 1, 1)`):
139
+ A tuple of integers defining the dilation factor of each 1D convolutional layer in *TDNN* module of the
140
+ *XVector* model. The length of *tdnn_dilation* has to match the length of *tdnn_dim*.
141
+ xvector_output_dim (`int`, *optional*, defaults to 512):
142
+ Dimensionality of the *XVector* embedding vectors.
143
+ add_adapter (`bool`, *optional*, defaults to `False`):
144
+ Whether a convolutional network should be stacked on top of the Data2VecAudio Encoder. Can be very useful
145
+ for warm-starting Data2VecAudio for SpeechEncoderDecoder models.
146
+ adapter_kernel_size (`int`, *optional*, defaults to 3):
147
+ Kernel size of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`.
148
+ adapter_stride (`int`, *optional*, defaults to 2):
149
+ Stride of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`.
150
+ num_adapter_layers (`int`, *optional*, defaults to 3):
151
+ Number of convolutional layers that should be used in the adapter network. Only relevant if `add_adapter is
152
+ True`.
153
+ output_hidden_size (`int`, *optional*):
154
+ Dimensionality of the encoder output layer. If not defined, this defaults to *hidden-size*. Only relevant
155
+ if `add_adapter is True`.
156
+
157
+ Example:
158
+
159
+ ```python
160
+ >>> from transformers import Data2VecAudioConfig, Data2VecAudioModel
161
+
162
+ >>> # Initializing a Data2VecAudio facebook/data2vec-audio-base-960h style configuration
163
+ >>> configuration = Data2VecAudioConfig()
164
+
165
+ >>> # Initializing a model (with random weights) from the facebook/data2vec-audio-base-960h style configuration
166
+ >>> model = Data2VecAudioModel(configuration)
167
+
168
+ >>> # Accessing the model configuration
169
+ >>> configuration = model.config
170
+ ```"""
171
+
172
+ model_type = "data2vec-audio"
173
+
174
+ def __init__(
175
+ self,
176
+ vocab_size=32,
177
+ hidden_size=768,
178
+ num_hidden_layers=12,
179
+ num_attention_heads=12,
180
+ intermediate_size=3072,
181
+ hidden_act="gelu",
182
+ hidden_dropout=0.1,
183
+ activation_dropout=0.1,
184
+ attention_dropout=0.1,
185
+ feat_proj_dropout=0.0,
186
+ final_dropout=0.1,
187
+ layerdrop=0.1,
188
+ initializer_range=0.02,
189
+ layer_norm_eps=1e-5,
190
+ feat_extract_activation="gelu",
191
+ conv_dim=(512, 512, 512, 512, 512, 512, 512),
192
+ conv_stride=(5, 2, 2, 2, 2, 2, 2),
193
+ conv_kernel=(10, 3, 3, 3, 3, 2, 2),
194
+ conv_bias=False,
195
+ num_conv_pos_embedding_groups=16,
196
+ conv_pos_kernel_size=19,
197
+ num_conv_pos_embeddings=5,
198
+ mask_time_prob=0.05,
199
+ mask_time_length=10,
200
+ mask_time_min_masks=2,
201
+ mask_feature_prob=0.0,
202
+ mask_feature_length=10,
203
+ mask_feature_min_masks=0,
204
+ ctc_loss_reduction="sum",
205
+ ctc_zero_infinity=False,
206
+ use_weighted_layer_sum=False,
207
+ classifier_proj_size=256,
208
+ tdnn_dim=(512, 512, 512, 512, 1500),
209
+ tdnn_kernel=(5, 3, 3, 1, 1),
210
+ tdnn_dilation=(1, 2, 3, 1, 1),
211
+ xvector_output_dim=512,
212
+ pad_token_id=0,
213
+ bos_token_id=1,
214
+ eos_token_id=2,
215
+ add_adapter=False,
216
+ adapter_kernel_size=3,
217
+ adapter_stride=2,
218
+ num_adapter_layers=3,
219
+ output_hidden_size=None,
220
+ **kwargs,
221
+ ):
222
+ super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id)
223
+ self.hidden_size = hidden_size
224
+ self.feat_extract_activation = feat_extract_activation
225
+ self.conv_dim = list(conv_dim)
226
+ self.conv_stride = list(conv_stride)
227
+ self.conv_kernel = list(conv_kernel)
228
+ self.conv_bias = conv_bias
229
+ self.num_conv_pos_embeddings = num_conv_pos_embeddings
230
+ self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups
231
+ self.conv_pos_kernel_size = conv_pos_kernel_size
232
+ self.num_feat_extract_layers = len(self.conv_dim)
233
+ self.num_hidden_layers = num_hidden_layers
234
+ self.intermediate_size = intermediate_size
235
+ self.hidden_act = hidden_act
236
+ self.num_attention_heads = num_attention_heads
237
+ self.hidden_dropout = hidden_dropout
238
+ self.attention_dropout = attention_dropout
239
+ self.activation_dropout = activation_dropout
240
+ self.feat_proj_dropout = feat_proj_dropout
241
+ self.final_dropout = final_dropout
242
+ self.layerdrop = layerdrop
243
+ self.layer_norm_eps = layer_norm_eps
244
+ self.initializer_range = initializer_range
245
+ self.vocab_size = vocab_size
246
+ self.use_weighted_layer_sum = use_weighted_layer_sum
247
+
248
+ if (
249
+ (len(self.conv_stride) != self.num_feat_extract_layers)
250
+ or (len(self.conv_kernel) != self.num_feat_extract_layers)
251
+ or (len(self.conv_dim) != self.num_feat_extract_layers)
252
+ ):
253
+ raise ValueError(
254
+ "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
255
+ " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
256
+ f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"
257
+ f" `len(config.conv_kernel) = {len(self.conv_kernel)}`."
258
+ )
259
+
260
+ # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
261
+ self.mask_time_prob = mask_time_prob
262
+ self.mask_time_length = mask_time_length
263
+ self.mask_time_min_masks = mask_time_min_masks
264
+ self.mask_feature_prob = mask_feature_prob
265
+ self.mask_feature_length = mask_feature_length
266
+ self.mask_feature_min_masks = mask_feature_min_masks
267
+
268
+ # ctc loss
269
+ self.ctc_loss_reduction = ctc_loss_reduction
270
+ self.ctc_zero_infinity = ctc_zero_infinity
271
+
272
+ # adapter
273
+ self.add_adapter = add_adapter
274
+ self.adapter_kernel_size = adapter_kernel_size
275
+ self.adapter_stride = adapter_stride
276
+ self.num_adapter_layers = num_adapter_layers
277
+ self.output_hidden_size = output_hidden_size or hidden_size
278
+
279
+ # SequenceClassification-specific parameter. Feel free to ignore for other classes.
280
+ self.classifier_proj_size = classifier_proj_size
281
+
282
+ # XVector-specific parameters. Feel free to ignore for other classes.
283
+ self.tdnn_dim = list(tdnn_dim)
284
+ self.tdnn_kernel = list(tdnn_kernel)
285
+ self.tdnn_dilation = list(tdnn_dilation)
286
+ self.xvector_output_dim = xvector_output_dim
287
+
288
+ @property
289
+ def inputs_to_logits_ratio(self):
290
+ return math.prod(self.conv_stride)
evalkit_internvl/lib/python3.10/site-packages/transformers/models/data2vec/configuration_data2vec_text.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Data2VecText configuration"""
16
+ from collections import OrderedDict
17
+ from typing import Mapping
18
+
19
+ from ...configuration_utils import PretrainedConfig
20
+ from ...onnx import OnnxConfig
21
+ from ...utils import logging
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+ DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
27
+ "facebook/data2vec-text-base": "https://huggingface.co/data2vec/resolve/main/config.json",
28
+ }
29
+
30
+
31
+ class Data2VecTextConfig(PretrainedConfig):
32
+ r"""
33
+ This is the configuration class to store the configuration of a [`Data2VecTextModel`] and [`Data2VecTextModel`]. It
34
+ is used to instantiate a Data2VecText model according to the specified arguments, defining the model architecture.
35
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the Data2VecText
36
+ [facebook/data2vec-text-base](https://huggingface.co/facebook/data2vec-text-base) architecture.
37
+
38
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
39
+ documentation from [`PretrainedConfig`] for more information.
40
+
41
+
42
+ Args:
43
+ vocab_size (`int`, *optional*, defaults to 30522):
44
+ Vocabulary size of the DATA2VEC model. Defines the number of different tokens that can be represented by
45
+ the `inputs_ids` passed when calling [`Data2VecModel`].
46
+ hidden_size (`int`, *optional*, defaults to 768):
47
+ Dimensionality of the encoder layers and the pooler layer.
48
+ num_hidden_layers (`int`, *optional*, defaults to 12):
49
+ Number of hidden layers in the Transformer encoder.
50
+ num_attention_heads (`int`, *optional*, defaults to 12):
51
+ Number of attention heads for each attention layer in the Transformer encoder.
52
+ intermediate_size (`int`, *optional*, defaults to 3072):
53
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
54
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
55
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
56
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
57
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
58
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
59
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
60
+ The dropout ratio for the attention probabilities.
61
+ max_position_embeddings (`int`, *optional*, defaults to 512):
62
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
63
+ just in case (e.g., 512 or 1024 or 2048).
64
+ type_vocab_size (`int`, *optional*, defaults to 2):
65
+ The vocabulary size of the `token_type_ids` passed when calling [`Data2VecModel`].
66
+ initializer_range (`float`, *optional*, defaults to 0.02):
67
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
68
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
69
+ The epsilon used by the layer normalization layers.
70
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
71
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
72
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
73
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
74
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
75
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
76
+ is_decoder (`bool`, *optional*, defaults to `False`):
77
+ Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
78
+ use_cache (`bool`, *optional*, defaults to `True`):
79
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
80
+ relevant if `config.is_decoder=True`.
81
+ classifier_dropout (`float`, *optional*):
82
+ The dropout ratio for the classification head.
83
+
84
+ Examples:
85
+
86
+ ```python
87
+ >>> from transformers import Data2VecTextConfig, Data2VecTextModel
88
+
89
+ >>> # Initializing a Data2VecText facebook/data2vec-text-base style configuration
90
+ >>> configuration = Data2VecTextConfig()
91
+
92
+ >>> # Initializing a model (with random weights) from the facebook/data2vec-text-base style configuration
93
+ >>> model = Data2VecTextModel(configuration)
94
+
95
+ >>> # Accessing the model configuration
96
+ >>> configuration = model.config
97
+ ```"""
98
+
99
+ model_type = "data2vec-text"
100
+
101
+ def __init__(
102
+ self,
103
+ vocab_size=30522,
104
+ hidden_size=768,
105
+ num_hidden_layers=12,
106
+ num_attention_heads=12,
107
+ intermediate_size=3072,
108
+ hidden_act="gelu",
109
+ hidden_dropout_prob=0.1,
110
+ attention_probs_dropout_prob=0.1,
111
+ max_position_embeddings=512,
112
+ type_vocab_size=2,
113
+ initializer_range=0.02,
114
+ layer_norm_eps=1e-12,
115
+ pad_token_id=1,
116
+ bos_token_id=0,
117
+ eos_token_id=2,
118
+ position_embedding_type="absolute",
119
+ use_cache=True,
120
+ classifier_dropout=None,
121
+ **kwargs,
122
+ ):
123
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
124
+
125
+ self.vocab_size = vocab_size
126
+ self.hidden_size = hidden_size
127
+ self.num_hidden_layers = num_hidden_layers
128
+ self.num_attention_heads = num_attention_heads
129
+ self.hidden_act = hidden_act
130
+ self.intermediate_size = intermediate_size
131
+ self.hidden_dropout_prob = hidden_dropout_prob
132
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
133
+ self.max_position_embeddings = max_position_embeddings
134
+ self.type_vocab_size = type_vocab_size
135
+ self.initializer_range = initializer_range
136
+ self.layer_norm_eps = layer_norm_eps
137
+ self.position_embedding_type = position_embedding_type
138
+ self.use_cache = use_cache
139
+ self.classifier_dropout = classifier_dropout
140
+
141
+
142
+ class Data2VecTextOnnxConfig(OnnxConfig):
143
+ @property
144
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
145
+ if self.task == "multiple-choice":
146
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
147
+ else:
148
+ dynamic_axis = {0: "batch", 1: "sequence"}
149
+ return OrderedDict(
150
+ [
151
+ ("input_ids", dynamic_axis),
152
+ ("attention_mask", dynamic_axis),
153
+ ]
154
+ )
evalkit_internvl/lib/python3.10/site-packages/transformers/models/data2vec/configuration_data2vec_vision.py ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright Meta Platforms and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Data2VecVision model configuration"""
16
+ from collections import OrderedDict
17
+ from typing import Mapping
18
+
19
+ from packaging import version
20
+
21
+ from ...configuration_utils import PretrainedConfig
22
+ from ...onnx import OnnxConfig
23
+ from ...utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+ DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP = {
29
+ "facebook/data2vec-vision-base-ft": (
30
+ "https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
31
+ ),
32
+ }
33
+
34
+
35
+ class Data2VecVisionConfig(PretrainedConfig):
36
+ r"""
37
+ This is the configuration class to store the configuration of a [`Data2VecVisionModel`]. It is used to instantiate
38
+ an Data2VecVision model according to the specified arguments, defining the model architecture. Instantiating a
39
+ configuration with the defaults will yield a similar configuration to that of the Data2VecVision
40
+ [facebook/data2vec-vision-base](https://huggingface.co/facebook/data2vec-vision-base) architecture.
41
+
42
+ Args:
43
+ hidden_size (`int`, *optional*, defaults to 768):
44
+ Dimensionality of the encoder layers and the pooler layer.
45
+ num_hidden_layers (`int`, *optional*, defaults to 12):
46
+ Number of hidden layers in the Transformer encoder.
47
+ num_attention_heads (`int`, *optional*, defaults to 12):
48
+ Number of attention heads for each attention layer in the Transformer encoder.
49
+ intermediate_size (`int`, *optional*, defaults to 3072):
50
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
51
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
52
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
53
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
54
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
55
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
56
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
57
+ The dropout ratio for the attention probabilities.
58
+ initializer_range (`float`, *optional*, defaults to 0.02):
59
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
60
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
61
+ The epsilon used by the layer normalization layers.
62
+ image_size (`int`, *optional*, defaults to 224):
63
+ The size (resolution) of each image.
64
+ patch_size (`int`, *optional*, defaults to 16):
65
+ The size (resolution) of each patch.
66
+ num_channels (`int`, *optional*, defaults to 3):
67
+ The number of input channels.
68
+ use_mask_token (`bool`, *optional*, defaults to `False`):
69
+ Whether to use a mask token for masked image modeling.
70
+ use_absolute_position_embeddings (`bool`, *optional*, defaults to `False`):
71
+ Whether to use BERT-style absolute position embeddings.
72
+ use_relative_position_bias (`bool`, *optional*, defaults to `False`):
73
+ Whether to use T5-style relative position embeddings in the self-attention layers.
74
+ use_shared_relative_position_bias (`bool`, *optional*, defaults to `False`):
75
+ Whether to use the same relative position embeddings across all self-attention layers of the Transformer.
76
+ layer_scale_init_value (`float`, *optional*, defaults to 0.1):
77
+ Scale to use in the self-attention layers. 0.1 for base, 1e-5 for large. Set 0 to disable layer scale.
78
+ drop_path_rate (`float`, *optional*, defaults to 0.1):
79
+ Stochastic depth rate per sample (when applied in the main path of residual layers).
80
+ use_mean_pooling (`bool`, *optional*, defaults to `True`):
81
+ Whether to mean pool the final hidden states of the patches instead of using the final hidden state of the
82
+ CLS token, before applying the classification head.
83
+ out_indices (`List[int]`, *optional*, defaults to `[3, 5, 7, 11]`):
84
+ Indices of the feature maps to use for semantic segmentation.
85
+ pool_scales (`Tuple[int]`, *optional*, defaults to `[1, 2, 3, 6]`):
86
+ Pooling scales used in Pooling Pyramid Module applied on the last feature map.
87
+ use_auxiliary_head (`bool`, *optional*, defaults to `True`):
88
+ Whether to use an auxiliary head during training.
89
+ auxiliary_loss_weight (`float`, *optional*, defaults to 0.4):
90
+ Weight of the cross-entropy loss of the auxiliary head.
91
+ auxiliary_channels (`int`, *optional*, defaults to 256):
92
+ Number of channels to use in the auxiliary head.
93
+ auxiliary_num_convs (`int`, *optional*, defaults to 1):
94
+ Number of convolutional layers to use in the auxiliary head.
95
+ auxiliary_concat_input (`bool`, *optional*, defaults to `False`):
96
+ Whether to concatenate the output of the auxiliary head with the input before the classification layer.
97
+ semantic_loss_ignore_index (`int`, *optional*, defaults to 255):
98
+ The index that is ignored by the loss function of the semantic segmentation model.
99
+
100
+ Example:
101
+
102
+ ```python
103
+ >>> from transformers import Data2VecVisionConfig, Data2VecVisionModel
104
+
105
+ >>> # Initializing a Data2VecVision data2vec_vision-base-patch16-224-in22k style configuration
106
+ >>> configuration = Data2VecVisionConfig()
107
+
108
+ >>> # Initializing a model (with random weights) from the data2vec_vision-base-patch16-224-in22k style configuration
109
+ >>> model = Data2VecVisionModel(configuration)
110
+
111
+ >>> # Accessing the model configuration
112
+ >>> configuration = model.config
113
+ ```"""
114
+
115
+ model_type = "data2vec-vision"
116
+
117
+ def __init__(
118
+ self,
119
+ hidden_size=768,
120
+ num_hidden_layers=12,
121
+ num_attention_heads=12,
122
+ intermediate_size=3072,
123
+ hidden_act="gelu",
124
+ hidden_dropout_prob=0.0,
125
+ attention_probs_dropout_prob=0.0,
126
+ initializer_range=0.02,
127
+ layer_norm_eps=1e-12,
128
+ image_size=224,
129
+ patch_size=16,
130
+ num_channels=3,
131
+ use_mask_token=False,
132
+ use_absolute_position_embeddings=False,
133
+ use_relative_position_bias=False,
134
+ use_shared_relative_position_bias=False,
135
+ layer_scale_init_value=0.1,
136
+ drop_path_rate=0.1,
137
+ use_mean_pooling=True,
138
+ out_indices=[3, 5, 7, 11],
139
+ pool_scales=[1, 2, 3, 6],
140
+ use_auxiliary_head=True,
141
+ auxiliary_loss_weight=0.4,
142
+ auxiliary_channels=256,
143
+ auxiliary_num_convs=1,
144
+ auxiliary_concat_input=False,
145
+ semantic_loss_ignore_index=255,
146
+ **kwargs,
147
+ ):
148
+ super().__init__(**kwargs)
149
+
150
+ self.hidden_size = hidden_size
151
+ self.num_hidden_layers = num_hidden_layers
152
+ self.num_attention_heads = num_attention_heads
153
+ self.intermediate_size = intermediate_size
154
+ self.hidden_act = hidden_act
155
+ self.hidden_dropout_prob = hidden_dropout_prob
156
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
157
+ self.initializer_range = initializer_range
158
+ self.layer_norm_eps = layer_norm_eps
159
+
160
+ self.image_size = image_size
161
+ self.patch_size = patch_size
162
+ self.num_channels = num_channels
163
+ self.use_mask_token = use_mask_token
164
+ self.use_absolute_position_embeddings = use_absolute_position_embeddings
165
+ self.use_relative_position_bias = use_relative_position_bias
166
+ self.use_shared_relative_position_bias = use_shared_relative_position_bias
167
+ self.layer_scale_init_value = layer_scale_init_value
168
+ self.drop_path_rate = drop_path_rate
169
+ self.use_mean_pooling = use_mean_pooling
170
+ # decode head attributes (semantic segmentation)
171
+ self.out_indices = out_indices
172
+ self.pool_scales = pool_scales
173
+ # auxiliary head attributes (semantic segmentation)
174
+ self.use_auxiliary_head = use_auxiliary_head
175
+ self.auxiliary_loss_weight = auxiliary_loss_weight
176
+ self.auxiliary_channels = auxiliary_channels
177
+ self.auxiliary_num_convs = auxiliary_num_convs
178
+ self.auxiliary_concat_input = auxiliary_concat_input
179
+ self.semantic_loss_ignore_index = semantic_loss_ignore_index
180
+
181
+
182
+ # Copied from transformers.models.vit.configuration_vit.ViTOnnxConfig
183
+ class Data2VecVisionOnnxConfig(OnnxConfig):
184
+ torch_onnx_minimum_version = version.parse("1.11")
185
+
186
+ @property
187
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
188
+ return OrderedDict(
189
+ [
190
+ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
191
+ ]
192
+ )
193
+
194
+ @property
195
+ def atol_for_validation(self) -> float:
196
+ return 1e-4
evalkit_internvl/lib/python3.10/site-packages/transformers/models/data2vec/convert_data2vec_audio_original_pytorch_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,286 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert Wav2Vec2 checkpoint."""
16
+
17
+
18
+ import argparse
19
+ import os
20
+ from functools import reduce
21
+
22
+ import fairseq
23
+ import torch
24
+ from datasets import load_dataset
25
+
26
+ from transformers import Wav2Vec2Processor, logging
27
+ from transformers.models.data2vec.configuration_data2vec_audio import Data2VecAudioConfig
28
+
29
+ # Copied from https://github.com/pytorch/fairseq/blob/main/examples/data2vec/models/data2vec_audio.py
30
+ from transformers.models.data2vec.data2vec_audio import Data2VecAudioModel as Dummy # noqa: F401
31
+ from transformers.models.data2vec.modeling_data2vec_audio import Data2VecAudioForCTC, Data2VecAudioModel
32
+
33
+
34
+ logging.set_verbosity_info()
35
+ logger = logging.get_logger(__name__)
36
+
37
+ MAPPING = {
38
+ "post_extract_proj": "feature_projection.projection",
39
+ "models.0.layer_norm": "feature_projection.layer_norm",
40
+ "self_attn.k_proj": "encoder.layers.*.attention.k_proj",
41
+ "self_attn.v_proj": "encoder.layers.*.attention.v_proj",
42
+ "self_attn.q_proj": "encoder.layers.*.attention.q_proj",
43
+ "self_attn.out_proj": "encoder.layers.*.attention.out_proj",
44
+ "self_attn_layer_norm": "encoder.layers.*.layer_norm",
45
+ "fc1": "encoder.layers.*.feed_forward.intermediate_dense",
46
+ "fc2": "encoder.layers.*.feed_forward.output_dense",
47
+ "final_layer_norm": "encoder.layers.*.final_layer_norm",
48
+ "encoder.layer_norm": "encoder.layer_norm",
49
+ "w2v_model.layer_norm": "feature_projection.layer_norm",
50
+ "w2v_encoder.proj": "lm_head",
51
+ "mask_emb": "masked_spec_embed",
52
+ }
53
+ TOP_LEVEL_KEYS = [
54
+ "lm_head",
55
+ ]
56
+
57
+
58
+ def set_recursively(hf_pointer, key, value, full_name, weight_type):
59
+ for attribute in key.split("."):
60
+ hf_pointer = getattr(hf_pointer, attribute)
61
+
62
+ if weight_type is not None:
63
+ hf_shape = getattr(hf_pointer, weight_type).shape
64
+ else:
65
+ hf_shape = hf_pointer.shape
66
+
67
+ if hf_shape != value.shape:
68
+ raise ValueError(
69
+ f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
70
+ f" {value.shape} for {full_name}"
71
+ )
72
+
73
+ if weight_type == "weight":
74
+ hf_pointer.weight.data = value
75
+ elif weight_type == "weight_g":
76
+ hf_pointer.weight_g.data = value
77
+ elif weight_type == "weight_v":
78
+ hf_pointer.weight_v.data = value
79
+ elif weight_type == "bias":
80
+ hf_pointer.bias.data = value
81
+ else:
82
+ hf_pointer.data = value
83
+
84
+ logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.")
85
+
86
+
87
+ def recursively_load_weights(fairseq_model, hf_model, is_headless):
88
+ unused_weights = []
89
+ fairseq_dict = fairseq_model.state_dict()
90
+
91
+ if not is_headless:
92
+ feature_extractor = hf_model.data2vec_audio.feature_extractor
93
+ pos_conv_embedding = hf_model.data2vec_audio.encoder.pos_conv_embed
94
+
95
+ else:
96
+ feature_extractor = hf_model.feature_extractor
97
+ pos_conv_embedding = hf_model.encoder.pos_conv_embed
98
+
99
+ for name, value in fairseq_dict.items():
100
+ is_used = False
101
+ if "conv_layers" in name:
102
+ load_conv_layer(
103
+ name,
104
+ value,
105
+ feature_extractor,
106
+ unused_weights,
107
+ )
108
+ is_used = True
109
+ elif "pos_conv" in name:
110
+ load_pos_conv_layer(
111
+ name,
112
+ value,
113
+ pos_conv_embedding,
114
+ unused_weights,
115
+ )
116
+ is_used = True
117
+ else:
118
+ for key, mapped_key in MAPPING.items():
119
+ if not is_headless:
120
+ mapped_key = "data2vec_audio." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
121
+ if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
122
+ is_used = True
123
+ if "*" in mapped_key:
124
+ layer_index = name.split(key)[0].split(".")[-2]
125
+ mapped_key = mapped_key.replace("*", layer_index)
126
+ if "weight_g" in name:
127
+ weight_type = "weight_g"
128
+ elif "weight_v" in name:
129
+ weight_type = "weight_v"
130
+ elif "bias" in name:
131
+ weight_type = "bias"
132
+ elif "weight" in name:
133
+ # TODO: don't match quantizer.weight_proj
134
+ weight_type = "weight"
135
+ else:
136
+ weight_type = None
137
+ set_recursively(hf_model, mapped_key, value, name, weight_type)
138
+ continue
139
+ if not is_used:
140
+ unused_weights.append(name)
141
+
142
+ logger.warning(f"Unused weights: {unused_weights}")
143
+
144
+
145
+ def access_by_string(module, path):
146
+ names = path.split(".")
147
+ return reduce(getattr, names, module)
148
+
149
+
150
+ def set_weights(full_name, module, fsq_value, hf_weight_path):
151
+ hf_weight = access_by_string(module, hf_weight_path)
152
+ hf_value = hf_weight.data
153
+
154
+ if fsq_value.shape != hf_value.shape:
155
+ raise ValueError(f"{full_name} has size {fsq_value.shape}, but {hf_value.shape} was found.")
156
+ hf_weight.data = fsq_value
157
+ logger.info(f"{full_name} was correctly initialized from {hf_weight_path}.")
158
+
159
+
160
+ def load_conv_layer(full_name, value, feature_extractor, unused_weights):
161
+ name = full_name.split("conv_layers.")[-1]
162
+ items = name.split(".")
163
+ layer_id = int(items[0])
164
+ type_id = int(items[1])
165
+
166
+ weight_type = name.split(".")[-1]
167
+ if type_id == 0:
168
+ layer_type = "conv"
169
+ elif type_id == 2:
170
+ layer_type = "layer_norm"
171
+ else:
172
+ unused_weights.append(full_name)
173
+ return
174
+
175
+ set_weights(full_name, feature_extractor, value, f"conv_layers.{layer_id}.{layer_type}.{weight_type}")
176
+
177
+
178
+ def load_pos_conv_layer(full_name, value, pos_conv_embeddings, unused_weights):
179
+ name = full_name.split("pos_conv.")[-1]
180
+ items = name.split(".")
181
+ layer_id = int(items[0])
182
+ type_id = int(items[1])
183
+
184
+ weight_type = name.split(".")[-1]
185
+ if type_id != 0:
186
+ unused_weights.append(full_name)
187
+ return
188
+ else:
189
+ layer_type = "conv"
190
+
191
+ set_weights(full_name, pos_conv_embeddings, value, f"layers.{layer_id}.{layer_type}.{weight_type}")
192
+
193
+
194
+ @torch.no_grad()
195
+ def convert_wav2vec2_checkpoint(
196
+ checkpoint_path, pytorch_dump_folder_path, config_path=None, dict_path=None, is_finetuned=True
197
+ ):
198
+ """
199
+ Copy/paste/tweak model's weights to transformers design.
200
+ """
201
+ if config_path is not None:
202
+ config = Data2VecAudioConfig.from_pretrained(config_path)
203
+ else:
204
+ config = Data2VecAudioConfig()
205
+
206
+ if not is_finetuned:
207
+ # Modify final_proj layer name
208
+ hf_wav2vec = Data2VecAudioModel(config)
209
+ data2vec_checkpoint_dir = os.path.dirname(checkpoint_path)
210
+
211
+ state_dict = torch.load(checkpoint_path)
212
+ state_dict["model"]["final_proj.weight"] = state_dict["model"].pop("final_proj.0.weight")
213
+ state_dict["model"]["final_proj.bias"] = state_dict["model"].pop("final_proj.0.bias")
214
+ converted_ckpt = os.path.join(data2vec_checkpoint_dir, "converted.pt")
215
+ torch.save(state_dict, converted_ckpt)
216
+ else:
217
+ hf_wav2vec = Data2VecAudioForCTC(config)
218
+ converted_ckpt = checkpoint_path
219
+
220
+ def load_data2vec(path):
221
+ model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task([path])
222
+ return model[0].eval()
223
+
224
+ model = load_data2vec(converted_ckpt)
225
+
226
+ recursively_load_weights(model, hf_wav2vec, not is_finetuned)
227
+
228
+ processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-large-lv60")
229
+
230
+ ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation")
231
+ input_audio = [x["array"] for x in ds[:4]["audio"]]
232
+
233
+ inputs = processor(input_audio, return_tensors="pt", padding=True)
234
+
235
+ input_values = inputs.input_values
236
+ attention_mask = inputs.attention_mask
237
+ # input_values = inputs.input_values[:, :-1]
238
+ # attention_mask = inputs.attention_mask[:, :-1]
239
+
240
+ hf_wav2vec.eval()
241
+ model.eval()
242
+ if is_finetuned:
243
+ their_output = model(source=input_values, padding_mask=(1 - attention_mask), mask=False, features_only=True)[
244
+ "encoder_out"
245
+ ].transpose(0, 1)
246
+ our_output = hf_wav2vec(input_values, attention_mask=attention_mask)["logits"]
247
+
248
+ pred_ids = torch.argmax(our_output, dim=-1)
249
+ output_string = processor.batch_decode(pred_ids)
250
+
251
+ print(f"Expected Output: {ds[:4]['text']}, Pred: {output_string}")
252
+ else:
253
+ their_output = model(source=input_values, padding_mask=(1 - attention_mask), mask=False, features_only=True)[
254
+ "layer_results"
255
+ ][-1][0].transpose(0, 1)
256
+ our_output = hf_wav2vec(input_values, attention_mask=attention_mask)["last_hidden_state"]
257
+
258
+ print(our_output.shape, their_output.shape)
259
+ max_absolute_diff = torch.max(torch.abs(our_output - their_output)).item()
260
+ print(f"max_absolute_diff = {max_absolute_diff}") # ~ 1e-7
261
+ success = torch.allclose(our_output, their_output, atol=1e-3)
262
+ print("Do both models output the same tensors?", "🔥" if success else "💩")
263
+ if not success:
264
+ raise Exception("Something went wRoNg")
265
+
266
+ hf_wav2vec.save_pretrained(pytorch_dump_folder_path)
267
+
268
+ if is_finetuned:
269
+ processor.save_pretrained(pytorch_dump_folder_path)
270
+ else:
271
+ processor.feature_extractor.save_pretrained(pytorch_dump_folder_path)
272
+
273
+
274
+ if __name__ == "__main__":
275
+ parser = argparse.ArgumentParser()
276
+ parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
277
+ parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
278
+ parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
279
+ parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
280
+ parser.add_argument(
281
+ "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
282
+ )
283
+ args = parser.parse_args()
284
+ convert_wav2vec2_checkpoint(
285
+ args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
286
+ )
evalkit_internvl/lib/python3.10/site-packages/transformers/models/data2vec/modeling_data2vec_audio.py ADDED
@@ -0,0 +1,1509 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Fairseq Authors and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch Data2VecAudio model."""
16
+
17
+ import math
18
+ import warnings
19
+ from typing import Optional, Tuple, Union
20
+
21
+ import numpy as np
22
+ import torch
23
+ import torch.utils.checkpoint
24
+ from torch import nn
25
+ from torch.nn import CrossEntropyLoss
26
+
27
+ from ...activations import ACT2FN
28
+ from ...integrations.deepspeed import is_deepspeed_zero3_enabled
29
+ from ...modeling_outputs import (
30
+ BaseModelOutput,
31
+ CausalLMOutput,
32
+ SequenceClassifierOutput,
33
+ TokenClassifierOutput,
34
+ Wav2Vec2BaseModelOutput,
35
+ XVectorOutput,
36
+ )
37
+ from ...modeling_utils import PreTrainedModel
38
+ from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
39
+ from .configuration_data2vec_audio import Data2VecAudioConfig
40
+
41
+
42
+ logger = logging.get_logger(__name__)
43
+
44
+
45
+ _HIDDEN_STATES_START_POSITION = 2
46
+
47
+ # General docstring
48
+ _CONFIG_FOR_DOC = "Data2VecAudioConfig"
49
+
50
+ # Base docstring
51
+ _CHECKPOINT_FOR_DOC = "facebook/data2vec-audio-base-960h"
52
+ _EXPECTED_OUTPUT_SHAPE = [1, 292, 768]
53
+
54
+ # CTC docstring
55
+ _CTC_EXPECTED_OUTPUT = "'MISTER QUILTER IS THE APOSTLE OF THE MIDDLE CLASSES AND WE ARE GLAD TO WELCOME HIS GOSPEL'"
56
+ _CTC_EXPECTED_LOSS = 66.95
57
+
58
+
59
+ DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST = [
60
+ "facebook/data2vec-audio-base",
61
+ "facebook/data2vec-audio-base-10m",
62
+ "facebook/data2vec-audio-base-100h",
63
+ "facebook/data2vec-audio-base-960h",
64
+ # See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
65
+ ]
66
+
67
+
68
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2._compute_mask_indices
69
+ def _compute_mask_indices(
70
+ shape: Tuple[int, int],
71
+ mask_prob: float,
72
+ mask_length: int,
73
+ attention_mask: Optional[torch.LongTensor] = None,
74
+ min_masks: int = 0,
75
+ ) -> np.ndarray:
76
+ """
77
+ Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for
78
+ ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on
79
+ CPU as part of the preprocessing during training.
80
+
81
+ Args:
82
+ shape: The shape for which to compute masks. This should be of a tuple of size 2 where
83
+ the first element is the batch size and the second element is the length of the axis to span.
84
+ mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of
85
+ independently generated mask spans of length `mask_length` is computed by
86
+ `mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the
87
+ actual percentage will be smaller.
88
+ mask_length: size of the mask
89
+ min_masks: minimum number of masked spans
90
+ attention_mask: A (right-padded) attention mask which independently shortens the feature axis of
91
+ each batch dimension.
92
+ """
93
+ batch_size, sequence_length = shape
94
+
95
+ if mask_length < 1:
96
+ raise ValueError("`mask_length` has to be bigger than 0.")
97
+
98
+ if mask_length > sequence_length:
99
+ raise ValueError(
100
+ f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}"
101
+ f" and `sequence_length`: {sequence_length}`"
102
+ )
103
+
104
+ # epsilon is used for probabilistic rounding
105
+ epsilon = np.random.rand(1).item()
106
+
107
+ def compute_num_masked_span(input_length):
108
+ """Given input length, compute how many spans should be masked"""
109
+ num_masked_span = int(mask_prob * input_length / mask_length + epsilon)
110
+ num_masked_span = max(num_masked_span, min_masks)
111
+
112
+ # make sure num masked span <= sequence_length
113
+ if num_masked_span * mask_length > sequence_length:
114
+ num_masked_span = sequence_length // mask_length
115
+
116
+ # make sure num_masked span is also <= input_length - (mask_length - 1)
117
+ if input_length - (mask_length - 1) < num_masked_span:
118
+ num_masked_span = max(input_length - (mask_length - 1), 0)
119
+
120
+ return num_masked_span
121
+
122
+ # compute number of masked spans in batch
123
+ input_lengths = (
124
+ attention_mask.sum(-1).detach().tolist()
125
+ if attention_mask is not None
126
+ else [sequence_length for _ in range(batch_size)]
127
+ )
128
+
129
+ # SpecAugment mask to fill
130
+ spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=bool)
131
+ spec_aug_mask_idxs = []
132
+
133
+ max_num_masked_span = compute_num_masked_span(sequence_length)
134
+
135
+ if max_num_masked_span == 0:
136
+ return spec_aug_mask
137
+
138
+ for input_length in input_lengths:
139
+ # compute num of masked spans for this input
140
+ num_masked_span = compute_num_masked_span(input_length)
141
+
142
+ # get random indices to mask
143
+ spec_aug_mask_idx = np.random.choice(
144
+ np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False
145
+ )
146
+
147
+ # pick first sampled index that will serve as a dummy index to pad vector
148
+ # to ensure same dimension for all batches due to probabilistic rounding
149
+ # Picking first sample just pads those vectors twice.
150
+ if len(spec_aug_mask_idx) == 0:
151
+ # this case can only happen if `input_length` is strictly smaller then
152
+ # `sequence_length` in which case the last token has to be a padding
153
+ # token which we can use as a dummy mask id
154
+ dummy_mask_idx = sequence_length - 1
155
+ else:
156
+ dummy_mask_idx = spec_aug_mask_idx[0]
157
+
158
+ spec_aug_mask_idx = np.concatenate(
159
+ [spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx]
160
+ )
161
+ spec_aug_mask_idxs.append(spec_aug_mask_idx)
162
+
163
+ spec_aug_mask_idxs = np.array(spec_aug_mask_idxs)
164
+
165
+ # expand masked indices to masked spans
166
+ spec_aug_mask_idxs = np.broadcast_to(
167
+ spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length)
168
+ )
169
+ spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length)
170
+
171
+ # add offset to the starting indexes so that indexes now create a span
172
+ offsets = np.arange(mask_length)[None, None, :]
173
+ offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape(
174
+ batch_size, max_num_masked_span * mask_length
175
+ )
176
+ spec_aug_mask_idxs = spec_aug_mask_idxs + offsets
177
+
178
+ # ensure that we cannot have indices larger than sequence_length
179
+ if spec_aug_mask_idxs.max() > sequence_length - 1:
180
+ spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1
181
+
182
+ # scatter indices to mask
183
+ np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1)
184
+
185
+ return spec_aug_mask
186
+
187
+
188
+ class Data2VecAudioConvLayer(nn.Module):
189
+ def __init__(self, config, layer_id=0):
190
+ super().__init__()
191
+ self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
192
+ self.out_conv_dim = config.conv_dim[layer_id]
193
+
194
+ self.conv = nn.Conv1d(
195
+ self.in_conv_dim,
196
+ self.out_conv_dim,
197
+ kernel_size=config.conv_kernel[layer_id],
198
+ stride=config.conv_stride[layer_id],
199
+ bias=config.conv_bias,
200
+ )
201
+ self.layer_norm = nn.LayerNorm(self.out_conv_dim, elementwise_affine=True)
202
+ self.activation = ACT2FN[config.feat_extract_activation]
203
+
204
+ def forward(self, hidden_states):
205
+ hidden_states = self.conv(hidden_states)
206
+
207
+ hidden_states = hidden_states.transpose(-2, -1)
208
+ hidden_states = self.layer_norm(hidden_states)
209
+ hidden_states = hidden_states.transpose(-2, -1)
210
+
211
+ hidden_states = self.activation(hidden_states)
212
+ return hidden_states
213
+
214
+
215
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2SamePadLayer with Wav2Vec2->Data2VecAudio
216
+ class Data2VecAudioPadLayer(nn.Module):
217
+ def __init__(self, num_conv_pos_embeddings):
218
+ super().__init__()
219
+ self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0
220
+
221
+ def forward(self, hidden_states):
222
+ if self.num_pad_remove > 0:
223
+ hidden_states = hidden_states[:, :, : -self.num_pad_remove]
224
+ return hidden_states
225
+
226
+
227
+ class Data2VecAudioPositionalConvLayer(nn.Module):
228
+ def __init__(self, config):
229
+ super().__init__()
230
+ self.conv = nn.Conv1d(
231
+ config.hidden_size,
232
+ config.hidden_size,
233
+ kernel_size=config.conv_pos_kernel_size,
234
+ padding=config.conv_pos_kernel_size // 2,
235
+ groups=config.num_conv_pos_embedding_groups,
236
+ )
237
+
238
+ self.padding = Data2VecAudioPadLayer(config.conv_pos_kernel_size)
239
+ self.activation = ACT2FN[config.feat_extract_activation]
240
+ # no learnable parameters
241
+ self.layer_norm = nn.LayerNorm(config.hidden_size, elementwise_affine=False)
242
+
243
+ def forward(self, hidden_states):
244
+ hidden_states = self.conv(hidden_states)
245
+ hidden_states = self.padding(hidden_states)
246
+
247
+ hidden_states = hidden_states.transpose(1, 2)
248
+ hidden_states = self.layer_norm(hidden_states)
249
+ hidden_states = hidden_states.transpose(1, 2)
250
+ hidden_states = self.activation(hidden_states)
251
+ return hidden_states
252
+
253
+
254
+ class Data2VecAudioPositionalConvEmbedding(nn.Module):
255
+ def __init__(self, config):
256
+ super().__init__()
257
+ self.layers = nn.ModuleList(
258
+ [Data2VecAudioPositionalConvLayer(config) for _ in range(config.num_conv_pos_embeddings)]
259
+ )
260
+
261
+ def forward(self, hidden_states):
262
+ hidden_states = hidden_states.transpose(1, 2)
263
+ for layer in self.layers:
264
+ hidden_states = layer(hidden_states)
265
+ hidden_states = hidden_states.transpose(1, 2)
266
+ return hidden_states
267
+
268
+
269
+ class Data2VecAudioFeatureEncoder(nn.Module):
270
+ """Construct the features from raw audio waveform"""
271
+
272
+ def __init__(self, config):
273
+ super().__init__()
274
+ self.conv_layers = nn.ModuleList(
275
+ [Data2VecAudioConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers)]
276
+ )
277
+ self.gradient_checkpointing = False
278
+ self._requires_grad = True
279
+
280
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureEncoder._freeze_parameters
281
+ def _freeze_parameters(self):
282
+ for param in self.parameters():
283
+ param.requires_grad = False
284
+ self._requires_grad = False
285
+
286
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureEncoder.forward
287
+ def forward(self, input_values):
288
+ hidden_states = input_values[:, None]
289
+
290
+ # make sure hidden_states require grad for gradient_checkpointing
291
+ if self._requires_grad and self.training:
292
+ hidden_states.requires_grad = True
293
+
294
+ for conv_layer in self.conv_layers:
295
+ if self._requires_grad and self.gradient_checkpointing and self.training:
296
+ hidden_states = self._gradient_checkpointing_func(
297
+ conv_layer.__call__,
298
+ hidden_states,
299
+ )
300
+ else:
301
+ hidden_states = conv_layer(hidden_states)
302
+
303
+ return hidden_states
304
+
305
+
306
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureProjection with Wav2Vec2->Data2VecAudio
307
+ class Data2VecAudioFeatureProjection(nn.Module):
308
+ def __init__(self, config):
309
+ super().__init__()
310
+ self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps)
311
+ self.projection = nn.Linear(config.conv_dim[-1], config.hidden_size)
312
+ self.dropout = nn.Dropout(config.feat_proj_dropout)
313
+
314
+ def forward(self, hidden_states):
315
+ # non-projected hidden states are needed for quantization
316
+ norm_hidden_states = self.layer_norm(hidden_states)
317
+ hidden_states = self.projection(norm_hidden_states)
318
+ hidden_states = self.dropout(hidden_states)
319
+ return hidden_states, norm_hidden_states
320
+
321
+
322
+ # Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->Data2VecAudio
323
+ class Data2VecAudioAttention(nn.Module):
324
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
325
+
326
+ def __init__(
327
+ self,
328
+ embed_dim: int,
329
+ num_heads: int,
330
+ dropout: float = 0.0,
331
+ is_decoder: bool = False,
332
+ bias: bool = True,
333
+ is_causal: bool = False,
334
+ config: Optional[Data2VecAudioConfig] = None,
335
+ ):
336
+ super().__init__()
337
+ self.embed_dim = embed_dim
338
+ self.num_heads = num_heads
339
+ self.dropout = dropout
340
+ self.head_dim = embed_dim // num_heads
341
+ self.config = config
342
+
343
+ if (self.head_dim * num_heads) != self.embed_dim:
344
+ raise ValueError(
345
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
346
+ f" and `num_heads`: {num_heads})."
347
+ )
348
+ self.scaling = self.head_dim**-0.5
349
+ self.is_decoder = is_decoder
350
+ self.is_causal = is_causal
351
+
352
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
353
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
354
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
355
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
356
+
357
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
358
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
359
+
360
+ def forward(
361
+ self,
362
+ hidden_states: torch.Tensor,
363
+ key_value_states: Optional[torch.Tensor] = None,
364
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
365
+ attention_mask: Optional[torch.Tensor] = None,
366
+ layer_head_mask: Optional[torch.Tensor] = None,
367
+ output_attentions: bool = False,
368
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
369
+ """Input shape: Batch x Time x Channel"""
370
+
371
+ # if key_value_states are provided this layer is used as a cross-attention layer
372
+ # for the decoder
373
+ is_cross_attention = key_value_states is not None
374
+
375
+ bsz, tgt_len, _ = hidden_states.size()
376
+
377
+ # get query proj
378
+ query_states = self.q_proj(hidden_states) * self.scaling
379
+ # get key, value proj
380
+ # `past_key_value[0].shape[2] == key_value_states.shape[1]`
381
+ # is checking that the `sequence_length` of the `past_key_value` is the same as
382
+ # the provided `key_value_states` to support prefix tuning
383
+ if (
384
+ is_cross_attention
385
+ and past_key_value is not None
386
+ and past_key_value[0].shape[2] == key_value_states.shape[1]
387
+ ):
388
+ # reuse k,v, cross_attentions
389
+ key_states = past_key_value[0]
390
+ value_states = past_key_value[1]
391
+ elif is_cross_attention:
392
+ # cross_attentions
393
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
394
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
395
+ elif past_key_value is not None:
396
+ # reuse k, v, self_attention
397
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
398
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
399
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
400
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
401
+ else:
402
+ # self_attention
403
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
404
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
405
+
406
+ if self.is_decoder:
407
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
408
+ # Further calls to cross_attention layer can then reuse all cross-attention
409
+ # key/value_states (first "if" case)
410
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
411
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
412
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
413
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
414
+ past_key_value = (key_states, value_states)
415
+
416
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
417
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
418
+ key_states = key_states.reshape(*proj_shape)
419
+ value_states = value_states.reshape(*proj_shape)
420
+
421
+ src_len = key_states.size(1)
422
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
423
+
424
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
425
+ raise ValueError(
426
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
427
+ f" {attn_weights.size()}"
428
+ )
429
+
430
+ if attention_mask is not None:
431
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
432
+ raise ValueError(
433
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
434
+ )
435
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
436
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
437
+
438
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
439
+
440
+ if layer_head_mask is not None:
441
+ if layer_head_mask.size() != (self.num_heads,):
442
+ raise ValueError(
443
+ f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
444
+ f" {layer_head_mask.size()}"
445
+ )
446
+ attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
447
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
448
+
449
+ if output_attentions:
450
+ # this operation is a bit awkward, but it's required to
451
+ # make sure that attn_weights keeps its gradient.
452
+ # In order to do so, attn_weights have to be reshaped
453
+ # twice and have to be reused in the following
454
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
455
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
456
+ else:
457
+ attn_weights_reshaped = None
458
+
459
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
460
+
461
+ attn_output = torch.bmm(attn_probs, value_states)
462
+
463
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
464
+ raise ValueError(
465
+ f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
466
+ f" {attn_output.size()}"
467
+ )
468
+
469
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
470
+ attn_output = attn_output.transpose(1, 2)
471
+
472
+ # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
473
+ # partitioned across GPUs when using tensor-parallelism.
474
+ attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
475
+
476
+ attn_output = self.out_proj(attn_output)
477
+
478
+ return attn_output, attn_weights_reshaped, past_key_value
479
+
480
+
481
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeedForward with Wav2Vec2->Data2VecAudio
482
+ class Data2VecAudioFeedForward(nn.Module):
483
+ def __init__(self, config):
484
+ super().__init__()
485
+ self.intermediate_dropout = nn.Dropout(config.activation_dropout)
486
+
487
+ self.intermediate_dense = nn.Linear(config.hidden_size, config.intermediate_size)
488
+ if isinstance(config.hidden_act, str):
489
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
490
+ else:
491
+ self.intermediate_act_fn = config.hidden_act
492
+
493
+ self.output_dense = nn.Linear(config.intermediate_size, config.hidden_size)
494
+ self.output_dropout = nn.Dropout(config.hidden_dropout)
495
+
496
+ def forward(self, hidden_states):
497
+ hidden_states = self.intermediate_dense(hidden_states)
498
+ hidden_states = self.intermediate_act_fn(hidden_states)
499
+ hidden_states = self.intermediate_dropout(hidden_states)
500
+
501
+ hidden_states = self.output_dense(hidden_states)
502
+ hidden_states = self.output_dropout(hidden_states)
503
+ return hidden_states
504
+
505
+
506
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2EncoderLayer with Wav2Vec2->Data2VecAudio
507
+ class Data2VecAudioEncoderLayer(nn.Module):
508
+ def __init__(self, config):
509
+ super().__init__()
510
+ self.attention = Data2VecAudioAttention(
511
+ embed_dim=config.hidden_size,
512
+ num_heads=config.num_attention_heads,
513
+ dropout=config.attention_dropout,
514
+ is_decoder=False,
515
+ )
516
+ self.dropout = nn.Dropout(config.hidden_dropout)
517
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
518
+ self.feed_forward = Data2VecAudioFeedForward(config)
519
+ self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
520
+
521
+ def forward(self, hidden_states, attention_mask=None, output_attentions=False):
522
+ attn_residual = hidden_states
523
+ hidden_states, attn_weights, _ = self.attention(
524
+ hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
525
+ )
526
+ hidden_states = self.dropout(hidden_states)
527
+ hidden_states = attn_residual + hidden_states
528
+
529
+ hidden_states = self.layer_norm(hidden_states)
530
+ hidden_states = hidden_states + self.feed_forward(hidden_states)
531
+ hidden_states = self.final_layer_norm(hidden_states)
532
+
533
+ outputs = (hidden_states,)
534
+
535
+ if output_attentions:
536
+ outputs += (attn_weights,)
537
+
538
+ return outputs
539
+
540
+
541
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Encoder with Wav2Vec2->Data2VecAudio
542
+ class Data2VecAudioEncoder(nn.Module):
543
+ def __init__(self, config):
544
+ super().__init__()
545
+ self.config = config
546
+ self.pos_conv_embed = Data2VecAudioPositionalConvEmbedding(config)
547
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
548
+ self.dropout = nn.Dropout(config.hidden_dropout)
549
+ self.layers = nn.ModuleList([Data2VecAudioEncoderLayer(config) for _ in range(config.num_hidden_layers)])
550
+ self.gradient_checkpointing = False
551
+
552
+ def forward(
553
+ self,
554
+ hidden_states: torch.tensor,
555
+ attention_mask: Optional[torch.Tensor] = None,
556
+ output_attentions: bool = False,
557
+ output_hidden_states: bool = False,
558
+ return_dict: bool = True,
559
+ ):
560
+ all_hidden_states = () if output_hidden_states else None
561
+ all_self_attentions = () if output_attentions else None
562
+
563
+ if attention_mask is not None:
564
+ # make sure padded tokens output 0
565
+ expand_attention_mask = attention_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2])
566
+ hidden_states[~expand_attention_mask] = 0
567
+
568
+ # extend attention_mask
569
+ attention_mask = 1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)
570
+ attention_mask = attention_mask * torch.finfo(hidden_states.dtype).min
571
+ attention_mask = attention_mask.expand(
572
+ attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]
573
+ )
574
+
575
+ position_embeddings = self.pos_conv_embed(hidden_states)
576
+ hidden_states = hidden_states + position_embeddings
577
+ hidden_states = self.layer_norm(hidden_states)
578
+ hidden_states = self.dropout(hidden_states)
579
+
580
+ deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()
581
+
582
+ for layer in self.layers:
583
+ if output_hidden_states:
584
+ all_hidden_states = all_hidden_states + (hidden_states,)
585
+
586
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
587
+ dropout_probability = torch.rand([])
588
+
589
+ skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False
590
+ if not skip_the_layer or deepspeed_zero3_is_enabled:
591
+ # under deepspeed zero3 all gpus must run in sync
592
+ if self.gradient_checkpointing and self.training:
593
+ layer_outputs = self._gradient_checkpointing_func(
594
+ layer.__call__,
595
+ hidden_states,
596
+ attention_mask,
597
+ output_attentions,
598
+ )
599
+ else:
600
+ layer_outputs = layer(
601
+ hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
602
+ )
603
+ hidden_states = layer_outputs[0]
604
+
605
+ if skip_the_layer:
606
+ layer_outputs = (None, None)
607
+
608
+ if output_attentions:
609
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
610
+
611
+ if output_hidden_states:
612
+ all_hidden_states = all_hidden_states + (hidden_states,)
613
+
614
+ if not return_dict:
615
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
616
+ return BaseModelOutput(
617
+ last_hidden_state=hidden_states,
618
+ hidden_states=all_hidden_states,
619
+ attentions=all_self_attentions,
620
+ )
621
+
622
+
623
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Adapter with Wav2Vec2->Data2VecAudio
624
+ class Data2VecAudioAdapter(nn.Module):
625
+ def __init__(self, config):
626
+ super().__init__()
627
+
628
+ # feature dim might need to be down-projected
629
+ if config.output_hidden_size != config.hidden_size:
630
+ self.proj = nn.Linear(config.hidden_size, config.output_hidden_size)
631
+ self.proj_layer_norm = nn.LayerNorm(config.output_hidden_size)
632
+ else:
633
+ self.proj = self.proj_layer_norm = None
634
+
635
+ self.layers = nn.ModuleList(Data2VecAudioAdapterLayer(config) for _ in range(config.num_adapter_layers))
636
+ self.layerdrop = config.layerdrop
637
+
638
+ def forward(self, hidden_states):
639
+ # down project hidden_states if necessary
640
+ if self.proj is not None and self.proj_layer_norm is not None:
641
+ hidden_states = self.proj(hidden_states)
642
+ hidden_states = self.proj_layer_norm(hidden_states)
643
+
644
+ hidden_states = hidden_states.transpose(1, 2)
645
+
646
+ for layer in self.layers:
647
+ layerdrop_prob = np.random.random()
648
+ if not self.training or (layerdrop_prob > self.layerdrop):
649
+ hidden_states = layer(hidden_states)
650
+
651
+ hidden_states = hidden_states.transpose(1, 2)
652
+ return hidden_states
653
+
654
+
655
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2AdapterLayer with Wav2Vec2->Data2VecAudio
656
+ class Data2VecAudioAdapterLayer(nn.Module):
657
+ def __init__(self, config):
658
+ super().__init__()
659
+ self.conv = nn.Conv1d(
660
+ config.output_hidden_size,
661
+ 2 * config.output_hidden_size,
662
+ config.adapter_kernel_size,
663
+ stride=config.adapter_stride,
664
+ padding=1,
665
+ )
666
+
667
+ def forward(self, hidden_states):
668
+ hidden_states = self.conv(hidden_states)
669
+ hidden_states = nn.functional.glu(hidden_states, dim=1)
670
+
671
+ return hidden_states
672
+
673
+
674
+ class Data2VecAudioPreTrainedModel(PreTrainedModel):
675
+ """
676
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
677
+ models.
678
+ """
679
+
680
+ config_class = Data2VecAudioConfig
681
+ base_model_prefix = "data2vec_audio"
682
+ main_input_name = "input_values"
683
+ supports_gradient_checkpointing = True
684
+
685
+ def _init_weights(self, module):
686
+ """Initialize the weights"""
687
+ if isinstance(module, Data2VecAudioFeatureProjection):
688
+ k = math.sqrt(1 / module.projection.in_features)
689
+ nn.init.uniform_(module.projection.weight, a=-k, b=k)
690
+ nn.init.uniform_(module.projection.bias, a=-k, b=k)
691
+ elif isinstance(module, Data2VecAudioPositionalConvLayer):
692
+ nn.init.constant_(module.conv.bias, 0)
693
+ elif isinstance(module, nn.Linear):
694
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
695
+
696
+ if module.bias is not None:
697
+ module.bias.data.zero_()
698
+ elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)):
699
+ if module.bias is not None:
700
+ module.bias.data.zero_()
701
+ if module.weight is not None:
702
+ module.weight.data.fill_(1.0)
703
+ elif isinstance(module, nn.Conv1d):
704
+ nn.init.kaiming_normal_(module.weight)
705
+
706
+ if module.bias is not None:
707
+ k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0]))
708
+ nn.init.uniform_(module.bias, a=-k, b=k)
709
+
710
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2PreTrainedModel._get_feat_extract_output_lengths with
711
+ def _get_feat_extract_output_lengths(
712
+ self, input_lengths: Union[torch.LongTensor, int], add_adapter: Optional[bool] = None
713
+ ):
714
+ """
715
+ Computes the output length of the convolutional layers
716
+ """
717
+
718
+ add_adapter = self.config.add_adapter if add_adapter is None else add_adapter
719
+
720
+ def _conv_out_length(input_length, kernel_size, stride):
721
+ # 1D convolutional layer output length formula taken
722
+ # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
723
+ return torch.div(input_length - kernel_size, stride, rounding_mode="floor") + 1
724
+
725
+ for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride):
726
+ input_lengths = _conv_out_length(input_lengths, kernel_size, stride)
727
+
728
+ if add_adapter:
729
+ for _ in range(self.config.num_adapter_layers):
730
+ input_lengths = _conv_out_length(input_lengths, 1, self.config.adapter_stride)
731
+
732
+ return input_lengths
733
+
734
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2PreTrainedModel._get_feature_vector_attention_mask
735
+ def _get_feature_vector_attention_mask(
736
+ self, feature_vector_length: int, attention_mask: torch.LongTensor, add_adapter=None
737
+ ):
738
+ # Effectively attention_mask.sum(-1), but not inplace to be able to run
739
+ # on inference mode.
740
+ non_padded_lengths = attention_mask.cumsum(dim=-1)[:, -1]
741
+
742
+ output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths, add_adapter=add_adapter)
743
+ output_lengths = output_lengths.to(torch.long)
744
+
745
+ batch_size = attention_mask.shape[0]
746
+
747
+ attention_mask = torch.zeros(
748
+ (batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device
749
+ )
750
+ # these two operations makes sure that all values before the output lengths idxs are attended to
751
+ attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1
752
+ attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
753
+ return attention_mask
754
+
755
+
756
+ DATA2VEC_AUDIO_START_DOCSTRING = r"""
757
+ Data2VecAudio was proposed in [data2vec: A General Framework for Self-supervised Learning in Speech, Vision and
758
+ Language](https://arxiv.org/pdf/2202.03555) by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and
759
+ Michael Auli.
760
+
761
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
762
+ library implements for all its model (such as downloading or saving etc.).
763
+
764
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
765
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
766
+ behavior.
767
+
768
+ Parameters:
769
+ config ([`Data2VecAudioConfig`]): Model configuration class with all the parameters of the model.
770
+ Initializing with a config file does not load the weights associated with the model, only the
771
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
772
+ """
773
+
774
+
775
+ DATA2VEC_AUDIO_INPUTS_DOCSTRING = r"""
776
+ Args:
777
+ input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
778
+ Float values of input raw speech waveform. Values can be obtained by loading a *.flac* or *.wav* audio file
779
+ into an array of type *List[float]* or a *numpy.ndarray*, *e.g.* via the soundfile library (*pip install
780
+ soundfile*). To prepare the array into *input_values*, the [`AutoProcessor`] should be used for padding and
781
+ conversion into a tensor of type *torch.FloatTensor*. See [`Wav2Vec2Processor.__call__`] for details.
782
+ attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
783
+ Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0,
784
+ 1]`:
785
+
786
+ - 1 for tokens that are **not masked**,
787
+ - 0 for tokens that are **masked**.
788
+
789
+ [What are attention masks?](../glossary#attention-mask)
790
+
791
+ <Tip warning={true}>
792
+
793
+ `attention_mask` should be passed if the corresponding processor has `config.return_attention_mask ==
794
+ True`, which is the case for all pre-trained Data2Vec Audio models. Be aware that that even with
795
+ `attention_mask`, zero-padded inputs will have slightly different outputs compared to non-padded inputs
796
+ because there are more than one convolutional layer in the positional encodings. For a more detailed
797
+ explanation, see [here](https://github.com/huggingface/transformers/issues/25621#issuecomment-1713759349).
798
+
799
+ </Tip>
800
+
801
+ output_attentions (`bool`, *optional*):
802
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
803
+ tensors for more detail.
804
+ output_hidden_states (`bool`, *optional*):
805
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
806
+ more detail.
807
+ return_dict (`bool`, *optional*):
808
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
809
+ """
810
+
811
+
812
+ @add_start_docstrings(
813
+ "The bare Data2VecAudio Model transformer outputting raw hidden-states without any specific head on top.",
814
+ DATA2VEC_AUDIO_START_DOCSTRING,
815
+ )
816
+ class Data2VecAudioModel(Data2VecAudioPreTrainedModel):
817
+ def __init__(self, config: Data2VecAudioConfig):
818
+ super().__init__(config)
819
+ self.config = config
820
+ self.feature_extractor = Data2VecAudioFeatureEncoder(config)
821
+ self.feature_projection = Data2VecAudioFeatureProjection(config)
822
+
823
+ # model only needs masking vector if mask prob is > 0.0
824
+ if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0:
825
+ self.masked_spec_embed = nn.Parameter(torch.FloatTensor(config.hidden_size).uniform_())
826
+
827
+ self.encoder = Data2VecAudioEncoder(config)
828
+
829
+ self.adapter = Data2VecAudioAdapter(config) if config.add_adapter else None
830
+
831
+ # Initialize weights and apply final processing
832
+ self.post_init()
833
+
834
+ def freeze_feature_encoder(self):
835
+ """
836
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
837
+ not be updated during training.
838
+ """
839
+ self.feature_extractor._freeze_parameters()
840
+
841
+ def _mask_hidden_states(
842
+ self,
843
+ hidden_states: torch.FloatTensor,
844
+ mask_time_indices: Optional[torch.FloatTensor] = None,
845
+ attention_mask: Optional[torch.LongTensor] = None,
846
+ ):
847
+ """
848
+ Masks extracted features along time axis and/or along feature axis according to
849
+ [SpecAugment](https://arxiv.org/abs/1904.08779).
850
+ """
851
+
852
+ # `config.apply_spec_augment` can set masking to False
853
+ if not getattr(self.config, "apply_spec_augment", True):
854
+ return hidden_states
855
+
856
+ # generate indices & apply SpecAugment along time axis
857
+ batch_size, sequence_length, hidden_size = hidden_states.size()
858
+
859
+ if mask_time_indices is not None:
860
+ # apply SpecAugment along time axis with given mask_time_indices
861
+ hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
862
+ elif self.config.mask_time_prob > 0 and self.training:
863
+ mask_time_indices = _compute_mask_indices(
864
+ (batch_size, sequence_length),
865
+ mask_prob=self.config.mask_time_prob,
866
+ mask_length=self.config.mask_time_length,
867
+ attention_mask=attention_mask,
868
+ min_masks=self.config.mask_time_min_masks,
869
+ )
870
+ mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool)
871
+ hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
872
+
873
+ if self.config.mask_feature_prob > 0 and self.training:
874
+ # generate indices & apply SpecAugment along feature axis
875
+ mask_feature_indices = _compute_mask_indices(
876
+ (batch_size, hidden_size),
877
+ mask_prob=self.config.mask_feature_prob,
878
+ mask_length=self.config.mask_feature_length,
879
+ min_masks=self.config.mask_feature_min_masks,
880
+ )
881
+ mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool)
882
+ mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1)
883
+ hidden_states[mask_feature_indices] = 0
884
+
885
+ return hidden_states
886
+
887
+ @add_start_docstrings_to_model_forward(DATA2VEC_AUDIO_INPUTS_DOCSTRING)
888
+ @add_code_sample_docstrings(
889
+ checkpoint=_CHECKPOINT_FOR_DOC,
890
+ output_type=Wav2Vec2BaseModelOutput,
891
+ config_class=_CONFIG_FOR_DOC,
892
+ modality="audio",
893
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
894
+ )
895
+ def forward(
896
+ self,
897
+ input_values: Optional[torch.Tensor],
898
+ attention_mask: Optional[torch.Tensor] = None,
899
+ mask_time_indices: Optional[torch.FloatTensor] = None,
900
+ output_attentions: Optional[bool] = None,
901
+ output_hidden_states: Optional[bool] = None,
902
+ return_dict: Optional[bool] = None,
903
+ ) -> Union[Tuple, Wav2Vec2BaseModelOutput]:
904
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
905
+ output_hidden_states = (
906
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
907
+ )
908
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
909
+
910
+ extract_features = self.feature_extractor(input_values)
911
+ extract_features = extract_features.transpose(1, 2)
912
+
913
+ if attention_mask is not None:
914
+ # compute reduced attention_mask corresponding to feature vectors
915
+ attention_mask = self._get_feature_vector_attention_mask(
916
+ extract_features.shape[1], attention_mask, add_adapter=False
917
+ )
918
+
919
+ hidden_states, extract_features = self.feature_projection(extract_features)
920
+ hidden_states = self._mask_hidden_states(
921
+ hidden_states, mask_time_indices=mask_time_indices, attention_mask=attention_mask
922
+ )
923
+
924
+ encoder_outputs = self.encoder(
925
+ hidden_states,
926
+ attention_mask=attention_mask,
927
+ output_attentions=output_attentions,
928
+ output_hidden_states=output_hidden_states,
929
+ return_dict=return_dict,
930
+ )
931
+
932
+ hidden_states = encoder_outputs[0]
933
+
934
+ if self.adapter is not None:
935
+ hidden_states = self.adapter(hidden_states)
936
+
937
+ if not return_dict:
938
+ return (hidden_states, extract_features) + encoder_outputs[1:]
939
+
940
+ return Wav2Vec2BaseModelOutput(
941
+ last_hidden_state=hidden_states,
942
+ extract_features=extract_features,
943
+ hidden_states=encoder_outputs.hidden_states,
944
+ attentions=encoder_outputs.attentions,
945
+ )
946
+
947
+
948
+ @add_start_docstrings(
949
+ """Data2VecAudio Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).""",
950
+ DATA2VEC_AUDIO_START_DOCSTRING,
951
+ )
952
+ class Data2VecAudioForCTC(Data2VecAudioPreTrainedModel):
953
+ def __init__(self, config):
954
+ super().__init__(config)
955
+
956
+ self.data2vec_audio = Data2VecAudioModel(config)
957
+ self.dropout = nn.Dropout(config.final_dropout)
958
+
959
+ if config.vocab_size is None:
960
+ raise ValueError(
961
+ f"You are trying to instantiate {self.__class__} with a configuration that "
962
+ "does not define the vocabulary size of the language model head. Please "
963
+ "instantiate the model as follows: `Data2VecAudioForCTC.from_pretrained(..., vocab_size=vocab_size)`. "
964
+ "or define `vocab_size` of your model's configuration."
965
+ )
966
+ output_hidden_size = (
967
+ config.output_hidden_size if hasattr(config, "add_adapter") and config.add_adapter else config.hidden_size
968
+ )
969
+ self.lm_head = nn.Linear(output_hidden_size, config.vocab_size)
970
+
971
+ # Initialize weights and apply final processing
972
+ self.post_init()
973
+
974
+ def freeze_feature_extractor(self):
975
+ """
976
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
977
+ not be updated during training.
978
+ """
979
+ warnings.warn(
980
+ "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
981
+ "Please use the equivalent `freeze_feature_encoder` method instead.",
982
+ FutureWarning,
983
+ )
984
+ self.freeze_feature_encoder()
985
+
986
+ def freeze_feature_encoder(self):
987
+ """
988
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
989
+ not be updated during training.
990
+ """
991
+ self.data2vec_audio.feature_extractor._freeze_parameters()
992
+
993
+ @add_start_docstrings_to_model_forward(DATA2VEC_AUDIO_INPUTS_DOCSTRING)
994
+ @add_code_sample_docstrings(
995
+ checkpoint=_CHECKPOINT_FOR_DOC,
996
+ output_type=CausalLMOutput,
997
+ config_class=_CONFIG_FOR_DOC,
998
+ expected_output=_CTC_EXPECTED_OUTPUT,
999
+ expected_loss=_CTC_EXPECTED_LOSS,
1000
+ )
1001
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForCTC.forward with wav2vec2->data2vec_audio
1002
+ def forward(
1003
+ self,
1004
+ input_values: Optional[torch.Tensor],
1005
+ attention_mask: Optional[torch.Tensor] = None,
1006
+ output_attentions: Optional[bool] = None,
1007
+ output_hidden_states: Optional[bool] = None,
1008
+ return_dict: Optional[bool] = None,
1009
+ labels: Optional[torch.Tensor] = None,
1010
+ ) -> Union[Tuple, CausalLMOutput]:
1011
+ r"""
1012
+ labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*):
1013
+ Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to
1014
+ the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`.
1015
+ All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,
1016
+ config.vocab_size - 1]`.
1017
+ """
1018
+
1019
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1020
+
1021
+ outputs = self.data2vec_audio(
1022
+ input_values,
1023
+ attention_mask=attention_mask,
1024
+ output_attentions=output_attentions,
1025
+ output_hidden_states=output_hidden_states,
1026
+ return_dict=return_dict,
1027
+ )
1028
+
1029
+ hidden_states = outputs[0]
1030
+ hidden_states = self.dropout(hidden_states)
1031
+
1032
+ logits = self.lm_head(hidden_states)
1033
+
1034
+ loss = None
1035
+ if labels is not None:
1036
+ if labels.max() >= self.config.vocab_size:
1037
+ raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}")
1038
+
1039
+ # retrieve loss input_lengths from attention_mask
1040
+ attention_mask = (
1041
+ attention_mask if attention_mask is not None else torch.ones_like(input_values, dtype=torch.long)
1042
+ )
1043
+ input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long)
1044
+
1045
+ # assuming that padded tokens are filled with -100
1046
+ # when not being attended to
1047
+ labels_mask = labels >= 0
1048
+ target_lengths = labels_mask.sum(-1)
1049
+ flattened_targets = labels.masked_select(labels_mask)
1050
+
1051
+ # ctc_loss doesn't support fp16
1052
+ log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1)
1053
+
1054
+ with torch.backends.cudnn.flags(enabled=False):
1055
+ loss = nn.functional.ctc_loss(
1056
+ log_probs,
1057
+ flattened_targets,
1058
+ input_lengths,
1059
+ target_lengths,
1060
+ blank=self.config.pad_token_id,
1061
+ reduction=self.config.ctc_loss_reduction,
1062
+ zero_infinity=self.config.ctc_zero_infinity,
1063
+ )
1064
+
1065
+ if not return_dict:
1066
+ output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
1067
+ return ((loss,) + output) if loss is not None else output
1068
+
1069
+ return CausalLMOutput(
1070
+ loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions
1071
+ )
1072
+
1073
+
1074
+ @add_start_docstrings(
1075
+ """
1076
+ Data2VecAudio Model with a sequence classification head on top (a linear layer over the pooled output) for tasks
1077
+ like SUPERB Keyword Spotting.
1078
+ """,
1079
+ DATA2VEC_AUDIO_START_DOCSTRING,
1080
+ )
1081
+ class Data2VecAudioForSequenceClassification(Data2VecAudioPreTrainedModel):
1082
+ def __init__(self, config):
1083
+ super().__init__(config)
1084
+
1085
+ if hasattr(config, "add_adapter") and config.add_adapter:
1086
+ raise ValueError(
1087
+ "Sequence classification does not support the use of Data2VecAudio adapters (config.add_adapter=True)"
1088
+ )
1089
+ self.data2vec_audio = Data2VecAudioModel(config)
1090
+ num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings
1091
+ if config.use_weighted_layer_sum:
1092
+ self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
1093
+ self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size)
1094
+ self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels)
1095
+
1096
+ # Initialize weights and apply final processing
1097
+ self.post_init()
1098
+
1099
+ def freeze_feature_extractor(self):
1100
+ """
1101
+ Calling this function will disable the gradient computation for the feature encoder so that its parameters will
1102
+ not be updated during training.
1103
+ """
1104
+ warnings.warn(
1105
+ "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
1106
+ "Please use the equivalent `freeze_feature_encoder` method instead.",
1107
+ FutureWarning,
1108
+ )
1109
+ self.freeze_feature_encoder()
1110
+
1111
+ def freeze_feature_encoder(self):
1112
+ """
1113
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
1114
+ not be updated during training.
1115
+ """
1116
+ self.data2vec_audio.feature_extractor._freeze_parameters()
1117
+
1118
+ def freeze_base_model(self):
1119
+ """
1120
+ Calling this function will disable the gradient computation for the base model so that its parameters will not
1121
+ be updated during training. Only the classification head will be updated.
1122
+ """
1123
+ for param in self.data2vec_audio.parameters():
1124
+ param.requires_grad = False
1125
+
1126
+ @add_start_docstrings_to_model_forward(DATA2VEC_AUDIO_INPUTS_DOCSTRING)
1127
+ @add_code_sample_docstrings(
1128
+ checkpoint=_CHECKPOINT_FOR_DOC,
1129
+ output_type=SequenceClassifierOutput,
1130
+ config_class=_CONFIG_FOR_DOC,
1131
+ modality="audio",
1132
+ )
1133
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification.forward with wav2vec2->data2vec_audio
1134
+ def forward(
1135
+ self,
1136
+ input_values: Optional[torch.Tensor],
1137
+ attention_mask: Optional[torch.Tensor] = None,
1138
+ output_attentions: Optional[bool] = None,
1139
+ output_hidden_states: Optional[bool] = None,
1140
+ return_dict: Optional[bool] = None,
1141
+ labels: Optional[torch.Tensor] = None,
1142
+ ) -> Union[Tuple, SequenceClassifierOutput]:
1143
+ r"""
1144
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1145
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1146
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1147
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1148
+ """
1149
+
1150
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1151
+ output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
1152
+
1153
+ outputs = self.data2vec_audio(
1154
+ input_values,
1155
+ attention_mask=attention_mask,
1156
+ output_attentions=output_attentions,
1157
+ output_hidden_states=output_hidden_states,
1158
+ return_dict=return_dict,
1159
+ )
1160
+
1161
+ if self.config.use_weighted_layer_sum:
1162
+ hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
1163
+ hidden_states = torch.stack(hidden_states, dim=1)
1164
+ norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
1165
+ hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
1166
+ else:
1167
+ hidden_states = outputs[0]
1168
+
1169
+ hidden_states = self.projector(hidden_states)
1170
+ if attention_mask is None:
1171
+ pooled_output = hidden_states.mean(dim=1)
1172
+ else:
1173
+ padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask)
1174
+ hidden_states[~padding_mask] = 0.0
1175
+ pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1)
1176
+
1177
+ logits = self.classifier(pooled_output)
1178
+
1179
+ loss = None
1180
+ if labels is not None:
1181
+ loss_fct = CrossEntropyLoss()
1182
+ loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
1183
+
1184
+ if not return_dict:
1185
+ output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
1186
+ return ((loss,) + output) if loss is not None else output
1187
+
1188
+ return SequenceClassifierOutput(
1189
+ loss=loss,
1190
+ logits=logits,
1191
+ hidden_states=outputs.hidden_states,
1192
+ attentions=outputs.attentions,
1193
+ )
1194
+
1195
+
1196
+ @add_start_docstrings(
1197
+ """
1198
+ Data2VecAudio Model with a frame classification head on top for tasks like Speaker Diarization.
1199
+ """,
1200
+ DATA2VEC_AUDIO_START_DOCSTRING,
1201
+ )
1202
+ class Data2VecAudioForAudioFrameClassification(Data2VecAudioPreTrainedModel):
1203
+ def __init__(self, config):
1204
+ super().__init__(config)
1205
+
1206
+ if hasattr(config, "add_adapter") and config.add_adapter:
1207
+ raise ValueError(
1208
+ "Audio frame classification does not support the use of Data2VecAudio adapters"
1209
+ " (config.add_adapter=True)"
1210
+ )
1211
+ self.data2vec_audio = Data2VecAudioModel(config)
1212
+ num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings
1213
+ if config.use_weighted_layer_sum:
1214
+ self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
1215
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1216
+ self.num_labels = config.num_labels
1217
+
1218
+ self.init_weights()
1219
+
1220
+ def freeze_feature_extractor(self):
1221
+ """
1222
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
1223
+ not be updated during training.
1224
+ """
1225
+ warnings.warn(
1226
+ "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
1227
+ "Please use the equivalent `freeze_feature_encoder` method instead.",
1228
+ FutureWarning,
1229
+ )
1230
+ self.freeze_feature_encoder()
1231
+
1232
+ def freeze_feature_encoder(self):
1233
+ """
1234
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
1235
+ not be updated during training.
1236
+ """
1237
+ self.data2vec_audio.feature_extractor._freeze_parameters()
1238
+
1239
+ def freeze_base_model(self):
1240
+ """
1241
+ Calling this function will disable the gradient computation for the base model so that its parameters will not
1242
+ be updated during training. Only the classification head will be updated.
1243
+ """
1244
+ for param in self.data2vec_audio.parameters():
1245
+ param.requires_grad = False
1246
+
1247
+ @add_start_docstrings_to_model_forward(DATA2VEC_AUDIO_INPUTS_DOCSTRING)
1248
+ @add_code_sample_docstrings(
1249
+ checkpoint=_CHECKPOINT_FOR_DOC,
1250
+ output_type=TokenClassifierOutput,
1251
+ config_class=_CONFIG_FOR_DOC,
1252
+ modality="audio",
1253
+ )
1254
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForAudioFrameClassification.forward with wav2vec2->data2vec_audio
1255
+ def forward(
1256
+ self,
1257
+ input_values: Optional[torch.Tensor],
1258
+ attention_mask: Optional[torch.Tensor] = None,
1259
+ labels: Optional[torch.Tensor] = None,
1260
+ output_attentions: Optional[bool] = None,
1261
+ output_hidden_states: Optional[bool] = None,
1262
+ return_dict: Optional[bool] = None,
1263
+ ) -> Union[Tuple, TokenClassifierOutput]:
1264
+ r"""
1265
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1266
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1267
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1268
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1269
+ """
1270
+
1271
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1272
+ output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
1273
+
1274
+ outputs = self.data2vec_audio(
1275
+ input_values,
1276
+ attention_mask=attention_mask,
1277
+ output_attentions=output_attentions,
1278
+ output_hidden_states=output_hidden_states,
1279
+ return_dict=return_dict,
1280
+ )
1281
+
1282
+ if self.config.use_weighted_layer_sum:
1283
+ hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
1284
+ hidden_states = torch.stack(hidden_states, dim=1)
1285
+ norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
1286
+ hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
1287
+ else:
1288
+ hidden_states = outputs[0]
1289
+
1290
+ logits = self.classifier(hidden_states)
1291
+
1292
+ loss = None
1293
+ if labels is not None:
1294
+ loss_fct = CrossEntropyLoss()
1295
+ loss = loss_fct(logits.view(-1, self.num_labels), torch.argmax(labels.view(-1, self.num_labels), axis=1))
1296
+
1297
+ if not return_dict:
1298
+ output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
1299
+ return output
1300
+
1301
+ return TokenClassifierOutput(
1302
+ loss=loss,
1303
+ logits=logits,
1304
+ hidden_states=outputs.hidden_states,
1305
+ attentions=outputs.attentions,
1306
+ )
1307
+
1308
+
1309
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.AMSoftmaxLoss
1310
+ class AMSoftmaxLoss(nn.Module):
1311
+ def __init__(self, input_dim, num_labels, scale=30.0, margin=0.4):
1312
+ super(AMSoftmaxLoss, self).__init__()
1313
+ self.scale = scale
1314
+ self.margin = margin
1315
+ self.num_labels = num_labels
1316
+ self.weight = nn.Parameter(torch.randn(input_dim, num_labels), requires_grad=True)
1317
+ self.loss = nn.CrossEntropyLoss()
1318
+
1319
+ def forward(self, hidden_states, labels):
1320
+ labels = labels.flatten()
1321
+ weight = nn.functional.normalize(self.weight, dim=0)
1322
+ hidden_states = nn.functional.normalize(hidden_states, dim=1)
1323
+ cos_theta = torch.mm(hidden_states, weight)
1324
+ psi = cos_theta - self.margin
1325
+
1326
+ onehot = nn.functional.one_hot(labels, self.num_labels)
1327
+ logits = self.scale * torch.where(onehot.bool(), psi, cos_theta)
1328
+ loss = self.loss(logits, labels)
1329
+
1330
+ return loss
1331
+
1332
+
1333
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.TDNNLayer
1334
+ class TDNNLayer(nn.Module):
1335
+ def __init__(self, config, layer_id=0):
1336
+ super().__init__()
1337
+ self.in_conv_dim = config.tdnn_dim[layer_id - 1] if layer_id > 0 else config.tdnn_dim[layer_id]
1338
+ self.out_conv_dim = config.tdnn_dim[layer_id]
1339
+ self.kernel_size = config.tdnn_kernel[layer_id]
1340
+ self.dilation = config.tdnn_dilation[layer_id]
1341
+
1342
+ self.kernel = nn.Linear(self.in_conv_dim * self.kernel_size, self.out_conv_dim)
1343
+ self.activation = nn.ReLU()
1344
+
1345
+ def forward(self, hidden_states):
1346
+ hidden_states = hidden_states.unsqueeze(1)
1347
+ hidden_states = nn.functional.unfold(
1348
+ hidden_states,
1349
+ (self.kernel_size, self.in_conv_dim),
1350
+ stride=(1, self.in_conv_dim),
1351
+ dilation=(self.dilation, 1),
1352
+ )
1353
+ hidden_states = hidden_states.transpose(1, 2)
1354
+ hidden_states = self.kernel(hidden_states)
1355
+
1356
+ hidden_states = self.activation(hidden_states)
1357
+ return hidden_states
1358
+
1359
+
1360
+ @add_start_docstrings(
1361
+ """
1362
+ Data2VecAudio Model with an XVector feature extraction head on top for tasks like Speaker Verification.
1363
+ """,
1364
+ DATA2VEC_AUDIO_START_DOCSTRING,
1365
+ )
1366
+ class Data2VecAudioForXVector(Data2VecAudioPreTrainedModel):
1367
+ def __init__(self, config):
1368
+ super().__init__(config)
1369
+
1370
+ self.data2vec_audio = Data2VecAudioModel(config)
1371
+ num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings
1372
+ if config.use_weighted_layer_sum:
1373
+ self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
1374
+ self.projector = nn.Linear(config.hidden_size, config.tdnn_dim[0])
1375
+
1376
+ tdnn_layers = [TDNNLayer(config, i) for i in range(len(config.tdnn_dim))]
1377
+ self.tdnn = nn.ModuleList(tdnn_layers)
1378
+
1379
+ self.feature_extractor = nn.Linear(config.tdnn_dim[-1] * 2, config.xvector_output_dim)
1380
+ self.classifier = nn.Linear(config.xvector_output_dim, config.xvector_output_dim)
1381
+
1382
+ self.objective = AMSoftmaxLoss(config.xvector_output_dim, config.num_labels)
1383
+
1384
+ self.init_weights()
1385
+
1386
+ def freeze_feature_extractor(self):
1387
+ """
1388
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
1389
+ not be updated during training.
1390
+ """
1391
+ warnings.warn(
1392
+ "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
1393
+ "Please use the equivalent `freeze_feature_encoder` method instead.",
1394
+ FutureWarning,
1395
+ )
1396
+ self.freeze_feature_encoder()
1397
+
1398
+ def freeze_feature_encoder(self):
1399
+ """
1400
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
1401
+ not be updated during training.
1402
+ """
1403
+ self.data2vec_audio.feature_extractor._freeze_parameters()
1404
+
1405
+ def freeze_base_model(self):
1406
+ """
1407
+ Calling this function will disable the gradient computation for the base model so that its parameters will not
1408
+ be updated during training. Only the classification head will be updated.
1409
+ """
1410
+ for param in self.data2vec_audio.parameters():
1411
+ param.requires_grad = False
1412
+
1413
+ def _get_tdnn_output_lengths(self, input_lengths: Union[torch.LongTensor, int]):
1414
+ """
1415
+ Computes the output length of the TDNN layers
1416
+ """
1417
+
1418
+ def _conv_out_length(input_length, kernel_size, stride):
1419
+ # 1D convolutional layer output length formula taken
1420
+ # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
1421
+ return (input_length - kernel_size) // stride + 1
1422
+
1423
+ for kernel_size in self.config.tdnn_kernel:
1424
+ input_lengths = _conv_out_length(input_lengths, kernel_size, 1)
1425
+
1426
+ return input_lengths
1427
+
1428
+ @add_start_docstrings_to_model_forward(DATA2VEC_AUDIO_INPUTS_DOCSTRING)
1429
+ @add_code_sample_docstrings(
1430
+ checkpoint=_CHECKPOINT_FOR_DOC,
1431
+ output_type=XVectorOutput,
1432
+ config_class=_CONFIG_FOR_DOC,
1433
+ modality="audio",
1434
+ )
1435
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForXVector.forward with wav2vec2->data2vec_audio
1436
+ def forward(
1437
+ self,
1438
+ input_values: Optional[torch.Tensor],
1439
+ attention_mask: Optional[torch.Tensor] = None,
1440
+ output_attentions: Optional[bool] = None,
1441
+ output_hidden_states: Optional[bool] = None,
1442
+ return_dict: Optional[bool] = None,
1443
+ labels: Optional[torch.Tensor] = None,
1444
+ ) -> Union[Tuple, XVectorOutput]:
1445
+ r"""
1446
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1447
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1448
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1449
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1450
+ """
1451
+
1452
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1453
+ output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
1454
+
1455
+ outputs = self.data2vec_audio(
1456
+ input_values,
1457
+ attention_mask=attention_mask,
1458
+ output_attentions=output_attentions,
1459
+ output_hidden_states=output_hidden_states,
1460
+ return_dict=return_dict,
1461
+ )
1462
+
1463
+ if self.config.use_weighted_layer_sum:
1464
+ hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
1465
+ hidden_states = torch.stack(hidden_states, dim=1)
1466
+ norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
1467
+ hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
1468
+ else:
1469
+ hidden_states = outputs[0]
1470
+
1471
+ hidden_states = self.projector(hidden_states)
1472
+
1473
+ for tdnn_layer in self.tdnn:
1474
+ hidden_states = tdnn_layer(hidden_states)
1475
+
1476
+ # Statistic Pooling
1477
+ if attention_mask is None:
1478
+ mean_features = hidden_states.mean(dim=1)
1479
+ std_features = hidden_states.std(dim=1)
1480
+ else:
1481
+ feat_extract_output_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(dim=1))
1482
+ tdnn_output_lengths = self._get_tdnn_output_lengths(feat_extract_output_lengths)
1483
+ mean_features = []
1484
+ std_features = []
1485
+ for i, length in enumerate(tdnn_output_lengths):
1486
+ mean_features.append(hidden_states[i, :length].mean(dim=0))
1487
+ std_features.append(hidden_states[i, :length].std(dim=0))
1488
+ mean_features = torch.stack(mean_features)
1489
+ std_features = torch.stack(std_features)
1490
+ statistic_pooling = torch.cat([mean_features, std_features], dim=-1)
1491
+
1492
+ output_embeddings = self.feature_extractor(statistic_pooling)
1493
+ logits = self.classifier(output_embeddings)
1494
+
1495
+ loss = None
1496
+ if labels is not None:
1497
+ loss = self.objective(logits, labels)
1498
+
1499
+ if not return_dict:
1500
+ output = (logits, output_embeddings) + outputs[_HIDDEN_STATES_START_POSITION:]
1501
+ return ((loss,) + output) if loss is not None else output
1502
+
1503
+ return XVectorOutput(
1504
+ loss=loss,
1505
+ logits=logits,
1506
+ embeddings=output_embeddings,
1507
+ hidden_states=outputs.hidden_states,
1508
+ attentions=outputs.attentions,
1509
+ )
evalkit_internvl/lib/python3.10/site-packages/transformers/models/data2vec/modeling_data2vec_text.py ADDED
@@ -0,0 +1,1560 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """PyTorch Data2VecText model."""
16
+
17
+ import math
18
+ from typing import List, Optional, Tuple, Union
19
+
20
+ import torch
21
+ import torch.utils.checkpoint
22
+ from torch import nn
23
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
24
+
25
+ from ...activations import ACT2FN, gelu
26
+ from ...modeling_outputs import (
27
+ BaseModelOutputWithPastAndCrossAttentions,
28
+ BaseModelOutputWithPoolingAndCrossAttentions,
29
+ CausalLMOutputWithCrossAttentions,
30
+ MaskedLMOutput,
31
+ MultipleChoiceModelOutput,
32
+ QuestionAnsweringModelOutput,
33
+ SequenceClassifierOutput,
34
+ TokenClassifierOutput,
35
+ )
36
+ from ...modeling_utils import PreTrainedModel
37
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
38
+ from ...utils import (
39
+ add_code_sample_docstrings,
40
+ add_start_docstrings,
41
+ add_start_docstrings_to_model_forward,
42
+ logging,
43
+ replace_return_docstrings,
44
+ )
45
+ from .configuration_data2vec_text import Data2VecTextConfig
46
+
47
+
48
+ logger = logging.get_logger(__name__)
49
+
50
+
51
+ _HIDDEN_STATES_START_POSITION = 2
52
+
53
+ # General docstring
54
+ _CHECKPOINT_FOR_DOC = "facebook/data2vec-text-base"
55
+ _CONFIG_FOR_DOC = "Data2VecTextConfig"
56
+
57
+
58
+ DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST = [
59
+ "facebook/data2vec-text-base",
60
+ # See all data2vec models at https://huggingface.co/models?filter=data2vec-text
61
+ ]
62
+
63
+
64
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaEmbeddings with Roberta->Data2VecText
65
+ class Data2VecTextForTextEmbeddings(nn.Module):
66
+ """
67
+ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
68
+ """
69
+
70
+ # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__
71
+ def __init__(self, config):
72
+ super().__init__()
73
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
74
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
75
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
76
+
77
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
78
+ # any TensorFlow checkpoint file
79
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
80
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
81
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
82
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
83
+ self.register_buffer(
84
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
85
+ )
86
+ self.register_buffer(
87
+ "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
88
+ )
89
+
90
+ # End copy
91
+ self.padding_idx = config.pad_token_id
92
+ self.position_embeddings = nn.Embedding(
93
+ config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
94
+ )
95
+
96
+ def forward(
97
+ self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
98
+ ):
99
+ if position_ids is None:
100
+ if input_ids is not None:
101
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
102
+ position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length)
103
+ else:
104
+ position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
105
+
106
+ if input_ids is not None:
107
+ input_shape = input_ids.size()
108
+ else:
109
+ input_shape = inputs_embeds.size()[:-1]
110
+
111
+ seq_length = input_shape[1]
112
+
113
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
114
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
115
+ # issue #5664
116
+ if token_type_ids is None:
117
+ if hasattr(self, "token_type_ids"):
118
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
119
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
120
+ token_type_ids = buffered_token_type_ids_expanded
121
+ else:
122
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
123
+
124
+ if inputs_embeds is None:
125
+ inputs_embeds = self.word_embeddings(input_ids)
126
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
127
+
128
+ embeddings = inputs_embeds + token_type_embeddings
129
+ if self.position_embedding_type == "absolute":
130
+ position_embeddings = self.position_embeddings(position_ids)
131
+ embeddings += position_embeddings
132
+ embeddings = self.LayerNorm(embeddings)
133
+ embeddings = self.dropout(embeddings)
134
+ return embeddings
135
+
136
+ def create_position_ids_from_inputs_embeds(self, inputs_embeds):
137
+ """
138
+ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
139
+
140
+ Args:
141
+ inputs_embeds: torch.Tensor
142
+
143
+ Returns: torch.Tensor
144
+ """
145
+ input_shape = inputs_embeds.size()[:-1]
146
+ sequence_length = input_shape[1]
147
+
148
+ position_ids = torch.arange(
149
+ self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
150
+ )
151
+ return position_ids.unsqueeze(0).expand(input_shape)
152
+
153
+
154
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaSelfAttention with Roberta->Data2VecText
155
+ class Data2VecTextSelfAttention(nn.Module):
156
+ def __init__(self, config, position_embedding_type=None):
157
+ super().__init__()
158
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
159
+ raise ValueError(
160
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
161
+ f"heads ({config.num_attention_heads})"
162
+ )
163
+
164
+ self.num_attention_heads = config.num_attention_heads
165
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
166
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
167
+
168
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
169
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
170
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
171
+
172
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
173
+ self.position_embedding_type = position_embedding_type or getattr(
174
+ config, "position_embedding_type", "absolute"
175
+ )
176
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
177
+ self.max_position_embeddings = config.max_position_embeddings
178
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
179
+
180
+ self.is_decoder = config.is_decoder
181
+
182
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
183
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
184
+ x = x.view(new_x_shape)
185
+ return x.permute(0, 2, 1, 3)
186
+
187
+ def forward(
188
+ self,
189
+ hidden_states: torch.Tensor,
190
+ attention_mask: Optional[torch.FloatTensor] = None,
191
+ head_mask: Optional[torch.FloatTensor] = None,
192
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
193
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
194
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
195
+ output_attentions: Optional[bool] = False,
196
+ ) -> Tuple[torch.Tensor]:
197
+ mixed_query_layer = self.query(hidden_states)
198
+
199
+ # If this is instantiated as a cross-attention module, the keys
200
+ # and values come from an encoder; the attention mask needs to be
201
+ # such that the encoder's padding tokens are not attended to.
202
+ is_cross_attention = encoder_hidden_states is not None
203
+
204
+ if is_cross_attention and past_key_value is not None:
205
+ # reuse k,v, cross_attentions
206
+ key_layer = past_key_value[0]
207
+ value_layer = past_key_value[1]
208
+ attention_mask = encoder_attention_mask
209
+ elif is_cross_attention:
210
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
211
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
212
+ attention_mask = encoder_attention_mask
213
+ elif past_key_value is not None:
214
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
215
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
216
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
217
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
218
+ else:
219
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
220
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
221
+
222
+ query_layer = self.transpose_for_scores(mixed_query_layer)
223
+
224
+ use_cache = past_key_value is not None
225
+ if self.is_decoder:
226
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
227
+ # Further calls to cross_attention layer can then reuse all cross-attention
228
+ # key/value_states (first "if" case)
229
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
230
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
231
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
232
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
233
+ past_key_value = (key_layer, value_layer)
234
+
235
+ # Take the dot product between "query" and "key" to get the raw attention scores.
236
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
237
+
238
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
239
+ query_length, key_length = query_layer.shape[2], key_layer.shape[2]
240
+ if use_cache:
241
+ position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
242
+ -1, 1
243
+ )
244
+ else:
245
+ position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
246
+ position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
247
+ distance = position_ids_l - position_ids_r
248
+
249
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
250
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
251
+
252
+ if self.position_embedding_type == "relative_key":
253
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
254
+ attention_scores = attention_scores + relative_position_scores
255
+ elif self.position_embedding_type == "relative_key_query":
256
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
257
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
258
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
259
+
260
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
261
+ if attention_mask is not None:
262
+ # Apply the attention mask is (precomputed for all layers in Data2VecTextModel forward() function)
263
+ attention_scores = attention_scores + attention_mask
264
+
265
+ # Normalize the attention scores to probabilities.
266
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
267
+
268
+ # This is actually dropping out entire tokens to attend to, which might
269
+ # seem a bit unusual, but is taken from the original Transformer paper.
270
+ attention_probs = self.dropout(attention_probs)
271
+
272
+ # Mask heads if we want to
273
+ if head_mask is not None:
274
+ attention_probs = attention_probs * head_mask
275
+
276
+ context_layer = torch.matmul(attention_probs, value_layer)
277
+
278
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
279
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
280
+ context_layer = context_layer.view(new_context_layer_shape)
281
+
282
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
283
+
284
+ if self.is_decoder:
285
+ outputs = outputs + (past_key_value,)
286
+ return outputs
287
+
288
+
289
+ # Copied from transformers.models.bert.modeling_bert.BertSelfOutput
290
+ class Data2VecTextSelfOutput(nn.Module):
291
+ def __init__(self, config):
292
+ super().__init__()
293
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
294
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
295
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
296
+
297
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
298
+ hidden_states = self.dense(hidden_states)
299
+ hidden_states = self.dropout(hidden_states)
300
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
301
+ return hidden_states
302
+
303
+
304
+ # Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Data2VecText
305
+ class Data2VecTextAttention(nn.Module):
306
+ def __init__(self, config, position_embedding_type=None):
307
+ super().__init__()
308
+ self.self = Data2VecTextSelfAttention(config, position_embedding_type=position_embedding_type)
309
+ self.output = Data2VecTextSelfOutput(config)
310
+ self.pruned_heads = set()
311
+
312
+ def prune_heads(self, heads):
313
+ if len(heads) == 0:
314
+ return
315
+ heads, index = find_pruneable_heads_and_indices(
316
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
317
+ )
318
+
319
+ # Prune linear layers
320
+ self.self.query = prune_linear_layer(self.self.query, index)
321
+ self.self.key = prune_linear_layer(self.self.key, index)
322
+ self.self.value = prune_linear_layer(self.self.value, index)
323
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
324
+
325
+ # Update hyper params and store pruned heads
326
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
327
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
328
+ self.pruned_heads = self.pruned_heads.union(heads)
329
+
330
+ def forward(
331
+ self,
332
+ hidden_states: torch.Tensor,
333
+ attention_mask: Optional[torch.FloatTensor] = None,
334
+ head_mask: Optional[torch.FloatTensor] = None,
335
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
336
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
337
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
338
+ output_attentions: Optional[bool] = False,
339
+ ) -> Tuple[torch.Tensor]:
340
+ self_outputs = self.self(
341
+ hidden_states,
342
+ attention_mask,
343
+ head_mask,
344
+ encoder_hidden_states,
345
+ encoder_attention_mask,
346
+ past_key_value,
347
+ output_attentions,
348
+ )
349
+ attention_output = self.output(self_outputs[0], hidden_states)
350
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
351
+ return outputs
352
+
353
+
354
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate
355
+ class Data2VecTextIntermediate(nn.Module):
356
+ def __init__(self, config):
357
+ super().__init__()
358
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
359
+ if isinstance(config.hidden_act, str):
360
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
361
+ else:
362
+ self.intermediate_act_fn = config.hidden_act
363
+
364
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
365
+ hidden_states = self.dense(hidden_states)
366
+ hidden_states = self.intermediate_act_fn(hidden_states)
367
+ return hidden_states
368
+
369
+
370
+ # Copied from transformers.models.bert.modeling_bert.BertOutput
371
+ class Data2VecTextOutput(nn.Module):
372
+ def __init__(self, config):
373
+ super().__init__()
374
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
375
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
376
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
377
+
378
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
379
+ hidden_states = self.dense(hidden_states)
380
+ hidden_states = self.dropout(hidden_states)
381
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
382
+ return hidden_states
383
+
384
+
385
+ # Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Data2VecText
386
+ class Data2VecTextLayer(nn.Module):
387
+ def __init__(self, config):
388
+ super().__init__()
389
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
390
+ self.seq_len_dim = 1
391
+ self.attention = Data2VecTextAttention(config)
392
+ self.is_decoder = config.is_decoder
393
+ self.add_cross_attention = config.add_cross_attention
394
+ if self.add_cross_attention:
395
+ if not self.is_decoder:
396
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
397
+ self.crossattention = Data2VecTextAttention(config, position_embedding_type="absolute")
398
+ self.intermediate = Data2VecTextIntermediate(config)
399
+ self.output = Data2VecTextOutput(config)
400
+
401
+ def forward(
402
+ self,
403
+ hidden_states: torch.Tensor,
404
+ attention_mask: Optional[torch.FloatTensor] = None,
405
+ head_mask: Optional[torch.FloatTensor] = None,
406
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
407
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
408
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
409
+ output_attentions: Optional[bool] = False,
410
+ ) -> Tuple[torch.Tensor]:
411
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
412
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
413
+ self_attention_outputs = self.attention(
414
+ hidden_states,
415
+ attention_mask,
416
+ head_mask,
417
+ output_attentions=output_attentions,
418
+ past_key_value=self_attn_past_key_value,
419
+ )
420
+ attention_output = self_attention_outputs[0]
421
+
422
+ # if decoder, the last output is tuple of self-attn cache
423
+ if self.is_decoder:
424
+ outputs = self_attention_outputs[1:-1]
425
+ present_key_value = self_attention_outputs[-1]
426
+ else:
427
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
428
+
429
+ cross_attn_present_key_value = None
430
+ if self.is_decoder and encoder_hidden_states is not None:
431
+ if not hasattr(self, "crossattention"):
432
+ raise ValueError(
433
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
434
+ " by setting `config.add_cross_attention=True`"
435
+ )
436
+
437
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
438
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
439
+ cross_attention_outputs = self.crossattention(
440
+ attention_output,
441
+ attention_mask,
442
+ head_mask,
443
+ encoder_hidden_states,
444
+ encoder_attention_mask,
445
+ cross_attn_past_key_value,
446
+ output_attentions,
447
+ )
448
+ attention_output = cross_attention_outputs[0]
449
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
450
+
451
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
452
+ cross_attn_present_key_value = cross_attention_outputs[-1]
453
+ present_key_value = present_key_value + cross_attn_present_key_value
454
+
455
+ layer_output = apply_chunking_to_forward(
456
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
457
+ )
458
+ outputs = (layer_output,) + outputs
459
+
460
+ # if decoder, return the attn key/values as the last output
461
+ if self.is_decoder:
462
+ outputs = outputs + (present_key_value,)
463
+
464
+ return outputs
465
+
466
+ def feed_forward_chunk(self, attention_output):
467
+ intermediate_output = self.intermediate(attention_output)
468
+ layer_output = self.output(intermediate_output, attention_output)
469
+ return layer_output
470
+
471
+
472
+ # Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Data2VecText
473
+ class Data2VecTextEncoder(nn.Module):
474
+ def __init__(self, config):
475
+ super().__init__()
476
+ self.config = config
477
+ self.layer = nn.ModuleList([Data2VecTextLayer(config) for _ in range(config.num_hidden_layers)])
478
+ self.gradient_checkpointing = False
479
+
480
+ def forward(
481
+ self,
482
+ hidden_states: torch.Tensor,
483
+ attention_mask: Optional[torch.FloatTensor] = None,
484
+ head_mask: Optional[torch.FloatTensor] = None,
485
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
486
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
487
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
488
+ use_cache: Optional[bool] = None,
489
+ output_attentions: Optional[bool] = False,
490
+ output_hidden_states: Optional[bool] = False,
491
+ return_dict: Optional[bool] = True,
492
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
493
+ all_hidden_states = () if output_hidden_states else None
494
+ all_self_attentions = () if output_attentions else None
495
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
496
+
497
+ if self.gradient_checkpointing and self.training:
498
+ if use_cache:
499
+ logger.warning_once(
500
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
501
+ )
502
+ use_cache = False
503
+
504
+ next_decoder_cache = () if use_cache else None
505
+ for i, layer_module in enumerate(self.layer):
506
+ if output_hidden_states:
507
+ all_hidden_states = all_hidden_states + (hidden_states,)
508
+
509
+ layer_head_mask = head_mask[i] if head_mask is not None else None
510
+ past_key_value = past_key_values[i] if past_key_values is not None else None
511
+
512
+ if self.gradient_checkpointing and self.training:
513
+ layer_outputs = self._gradient_checkpointing_func(
514
+ layer_module.__call__,
515
+ hidden_states,
516
+ attention_mask,
517
+ layer_head_mask,
518
+ encoder_hidden_states,
519
+ encoder_attention_mask,
520
+ past_key_value,
521
+ output_attentions,
522
+ )
523
+ else:
524
+ layer_outputs = layer_module(
525
+ hidden_states,
526
+ attention_mask,
527
+ layer_head_mask,
528
+ encoder_hidden_states,
529
+ encoder_attention_mask,
530
+ past_key_value,
531
+ output_attentions,
532
+ )
533
+
534
+ hidden_states = layer_outputs[0]
535
+ if use_cache:
536
+ next_decoder_cache += (layer_outputs[-1],)
537
+ if output_attentions:
538
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
539
+ if self.config.add_cross_attention:
540
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
541
+
542
+ if output_hidden_states:
543
+ all_hidden_states = all_hidden_states + (hidden_states,)
544
+
545
+ if not return_dict:
546
+ return tuple(
547
+ v
548
+ for v in [
549
+ hidden_states,
550
+ next_decoder_cache,
551
+ all_hidden_states,
552
+ all_self_attentions,
553
+ all_cross_attentions,
554
+ ]
555
+ if v is not None
556
+ )
557
+ return BaseModelOutputWithPastAndCrossAttentions(
558
+ last_hidden_state=hidden_states,
559
+ past_key_values=next_decoder_cache,
560
+ hidden_states=all_hidden_states,
561
+ attentions=all_self_attentions,
562
+ cross_attentions=all_cross_attentions,
563
+ )
564
+
565
+
566
+ # Copied from transformers.models.bert.modeling_bert.BertPooler
567
+ class Data2VecTextPooler(nn.Module):
568
+ def __init__(self, config):
569
+ super().__init__()
570
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
571
+ self.activation = nn.Tanh()
572
+
573
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
574
+ # We "pool" the model by simply taking the hidden state corresponding
575
+ # to the first token.
576
+ first_token_tensor = hidden_states[:, 0]
577
+ pooled_output = self.dense(first_token_tensor)
578
+ pooled_output = self.activation(pooled_output)
579
+ return pooled_output
580
+
581
+
582
+ class Data2VecTextPreTrainedModel(PreTrainedModel):
583
+ """
584
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
585
+ models.
586
+ """
587
+
588
+ config_class = Data2VecTextConfig
589
+ base_model_prefix = "data2vec_text"
590
+ supports_gradient_checkpointing = True
591
+ _no_split_modules = ["Data2VecTextForTextEmbeddings", "Data2VecTextLayer"]
592
+
593
+ def _init_weights(self, module):
594
+ """Initialize the weights"""
595
+ if isinstance(module, nn.Linear):
596
+ # Slightly different from the TF version which uses truncated_normal for initialization
597
+ # cf https://github.com/pytorch/pytorch/pull/5617
598
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
599
+ if module.bias is not None:
600
+ module.bias.data.zero_()
601
+ elif isinstance(module, nn.Embedding):
602
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
603
+ if module.padding_idx is not None:
604
+ module.weight.data[module.padding_idx].zero_()
605
+ elif isinstance(module, nn.LayerNorm):
606
+ if hasattr(module, "bias") and module.bias is not None:
607
+ module.bias.data.zero_()
608
+ if hasattr(module, "weight") and module.weight is not None:
609
+ module.weight.data.fill_(1.0)
610
+
611
+
612
+ DATA2VECTEXT_START_DOCSTRING = r"""
613
+ Data2VecText was proposed in [data2vec: A General Framework for Self-supervised Learning in Speech, Vision and
614
+ Language](https://arxiv.org/pdf/2202.03555) by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and
615
+ Michael Auli.
616
+
617
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
618
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
619
+ etc.)
620
+
621
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
622
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
623
+ and behavior.
624
+
625
+ Parameters:
626
+ config ([`Data2VecTextConfig`]): Model configuration class with all the parameters of the
627
+ model. Initializing with a config file does not load the weights associated with the model, only the
628
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
629
+ """
630
+
631
+ DATA2VECTEXT_INPUTS_DOCSTRING = r"""
632
+ Args:
633
+ input_ids (`torch.LongTensor` of shape `({0})`):
634
+ Indices of input sequence tokens in the vocabulary.
635
+
636
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
637
+ [`PreTrainedTokenizer.__call__`] for details.
638
+
639
+ [What are input IDs?](../glossary#input-ids)
640
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
641
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
642
+
643
+ - 1 for tokens that are **not masked**,
644
+ - 0 for tokens that are **masked**.
645
+
646
+ [What are attention masks?](../glossary#attention-mask)
647
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
648
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
649
+ 1]`:
650
+
651
+ - 0 corresponds to a *sentence A* token,
652
+ - 1 corresponds to a *sentence B* token.
653
+
654
+ [What are token type IDs?](../glossary#token-type-ids)
655
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
656
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
657
+ config.max_position_embeddings - 1]`.
658
+
659
+ [What are position IDs?](../glossary#position-ids)
660
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
661
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
662
+
663
+ - 1 indicates the head is **not masked**,
664
+ - 0 indicates the head is **masked**.
665
+
666
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
667
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
668
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
669
+ model's internal embedding lookup matrix.
670
+ output_attentions (`bool`, *optional*):
671
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
672
+ tensors for more detail.
673
+ output_hidden_states (`bool`, *optional*):
674
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
675
+ more detail.
676
+ return_dict (`bool`, *optional*):
677
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
678
+ """
679
+
680
+
681
+ @add_start_docstrings(
682
+ "The bare Data2VecText Model for text transformer outputting raw hidden-states without any specific head on top.",
683
+ DATA2VECTEXT_START_DOCSTRING,
684
+ )
685
+ class Data2VecTextModel(Data2VecTextPreTrainedModel):
686
+ """
687
+
688
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
689
+ cross-attention is added between the self-attention layers, following the architecture described in *Attention is
690
+ all you need*_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz
691
+ Kaiser and Illia Polosukhin.
692
+
693
+ To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
694
+ to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
695
+ `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
696
+
697
+ .. _*Attention is all you need*: https://arxiv.org/abs/1706.03762
698
+
699
+ """
700
+
701
+ def __init__(self, config, add_pooling_layer=True):
702
+ super().__init__(config)
703
+ self.config = config
704
+
705
+ self.embeddings = Data2VecTextForTextEmbeddings(config)
706
+ self.encoder = Data2VecTextEncoder(config)
707
+
708
+ self.pooler = Data2VecTextPooler(config) if add_pooling_layer else None
709
+
710
+ # Initialize weights and apply final processing
711
+ self.post_init()
712
+
713
+ def get_input_embeddings(self):
714
+ return self.embeddings.word_embeddings
715
+
716
+ def set_input_embeddings(self, value):
717
+ self.embeddings.word_embeddings = value
718
+
719
+ def _prune_heads(self, heads_to_prune):
720
+ """
721
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
722
+ class PreTrainedModel
723
+ """
724
+ for layer, heads in heads_to_prune.items():
725
+ self.encoder.layer[layer].attention.prune_heads(heads)
726
+
727
+ @add_start_docstrings_to_model_forward(DATA2VECTEXT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
728
+ @add_code_sample_docstrings(
729
+ checkpoint=_CHECKPOINT_FOR_DOC,
730
+ output_type=BaseModelOutputWithPoolingAndCrossAttentions,
731
+ config_class=_CONFIG_FOR_DOC,
732
+ )
733
+ # Copied from transformers.models.bert.modeling_bert.BertModel.forward
734
+ def forward(
735
+ self,
736
+ input_ids: Optional[torch.Tensor] = None,
737
+ attention_mask: Optional[torch.Tensor] = None,
738
+ token_type_ids: Optional[torch.Tensor] = None,
739
+ position_ids: Optional[torch.Tensor] = None,
740
+ head_mask: Optional[torch.Tensor] = None,
741
+ inputs_embeds: Optional[torch.Tensor] = None,
742
+ encoder_hidden_states: Optional[torch.Tensor] = None,
743
+ encoder_attention_mask: Optional[torch.Tensor] = None,
744
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
745
+ use_cache: Optional[bool] = None,
746
+ output_attentions: Optional[bool] = None,
747
+ output_hidden_states: Optional[bool] = None,
748
+ return_dict: Optional[bool] = None,
749
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
750
+ r"""
751
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
752
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
753
+ the model is configured as a decoder.
754
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
755
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
756
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
757
+
758
+ - 1 for tokens that are **not masked**,
759
+ - 0 for tokens that are **masked**.
760
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
761
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
762
+
763
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
764
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
765
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
766
+ use_cache (`bool`, *optional*):
767
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
768
+ `past_key_values`).
769
+ """
770
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
771
+ output_hidden_states = (
772
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
773
+ )
774
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
775
+
776
+ if self.config.is_decoder:
777
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
778
+ else:
779
+ use_cache = False
780
+
781
+ if input_ids is not None and inputs_embeds is not None:
782
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
783
+ elif input_ids is not None:
784
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
785
+ input_shape = input_ids.size()
786
+ elif inputs_embeds is not None:
787
+ input_shape = inputs_embeds.size()[:-1]
788
+ else:
789
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
790
+
791
+ batch_size, seq_length = input_shape
792
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
793
+
794
+ # past_key_values_length
795
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
796
+
797
+ if attention_mask is None:
798
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
799
+
800
+ if token_type_ids is None:
801
+ if hasattr(self.embeddings, "token_type_ids"):
802
+ buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
803
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
804
+ token_type_ids = buffered_token_type_ids_expanded
805
+ else:
806
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
807
+
808
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
809
+ # ourselves in which case we just need to make it broadcastable to all heads.
810
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
811
+
812
+ # If a 2D or 3D attention mask is provided for the cross-attention
813
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
814
+ if self.config.is_decoder and encoder_hidden_states is not None:
815
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
816
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
817
+ if encoder_attention_mask is None:
818
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
819
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
820
+ else:
821
+ encoder_extended_attention_mask = None
822
+
823
+ # Prepare head mask if needed
824
+ # 1.0 in head_mask indicate we keep the head
825
+ # attention_probs has shape bsz x n_heads x N x N
826
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
827
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
828
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
829
+
830
+ embedding_output = self.embeddings(
831
+ input_ids=input_ids,
832
+ position_ids=position_ids,
833
+ token_type_ids=token_type_ids,
834
+ inputs_embeds=inputs_embeds,
835
+ past_key_values_length=past_key_values_length,
836
+ )
837
+ encoder_outputs = self.encoder(
838
+ embedding_output,
839
+ attention_mask=extended_attention_mask,
840
+ head_mask=head_mask,
841
+ encoder_hidden_states=encoder_hidden_states,
842
+ encoder_attention_mask=encoder_extended_attention_mask,
843
+ past_key_values=past_key_values,
844
+ use_cache=use_cache,
845
+ output_attentions=output_attentions,
846
+ output_hidden_states=output_hidden_states,
847
+ return_dict=return_dict,
848
+ )
849
+ sequence_output = encoder_outputs[0]
850
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
851
+
852
+ if not return_dict:
853
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
854
+
855
+ return BaseModelOutputWithPoolingAndCrossAttentions(
856
+ last_hidden_state=sequence_output,
857
+ pooler_output=pooled_output,
858
+ past_key_values=encoder_outputs.past_key_values,
859
+ hidden_states=encoder_outputs.hidden_states,
860
+ attentions=encoder_outputs.attentions,
861
+ cross_attentions=encoder_outputs.cross_attentions,
862
+ )
863
+
864
+
865
+ @add_start_docstrings(
866
+ """Data2VecText Model with a `language modeling` head on top for CLM fine-tuning.""", DATA2VECTEXT_START_DOCSTRING
867
+ )
868
+ class Data2VecTextForCausalLM(Data2VecTextPreTrainedModel):
869
+ _tied_weights_keys = ["lm_head.decoder.weight", "lm_head.decoder.bias"]
870
+
871
+ def __init__(self, config):
872
+ super().__init__(config)
873
+
874
+ if not config.is_decoder:
875
+ logger.warning("If you want to use `Data2VecTextLMHeadModel` as a standalone, add `is_decoder=True.`")
876
+
877
+ self.data2vec_text = Data2VecTextModel(config, add_pooling_layer=False)
878
+ self.lm_head = Data2VecTextLMHead(config)
879
+
880
+ # Initialize weights and apply final processing
881
+ self.post_init()
882
+
883
+ def get_output_embeddings(self):
884
+ return self.lm_head.decoder
885
+
886
+ def set_output_embeddings(self, new_embeddings):
887
+ self.lm_head.decoder = new_embeddings
888
+
889
+ @add_start_docstrings_to_model_forward(DATA2VECTEXT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
890
+ @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
891
+ def forward(
892
+ self,
893
+ input_ids: Optional[torch.LongTensor] = None,
894
+ attention_mask: Optional[torch.FloatTensor] = None,
895
+ token_type_ids: Optional[torch.LongTensor] = None,
896
+ position_ids: Optional[torch.LongTensor] = None,
897
+ head_mask: Optional[torch.FloatTensor] = None,
898
+ inputs_embeds: Optional[torch.FloatTensor] = None,
899
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
900
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
901
+ labels: Optional[torch.LongTensor] = None,
902
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
903
+ use_cache: Optional[bool] = None,
904
+ output_attentions: Optional[bool] = None,
905
+ output_hidden_states: Optional[bool] = None,
906
+ return_dict: Optional[bool] = None,
907
+ ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
908
+ r"""
909
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
910
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
911
+ the model is configured as a decoder.
912
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
913
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
914
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
915
+
916
+ - 1 for tokens that are **not masked**,
917
+ - 0 for tokens that are **masked**.
918
+
919
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
920
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
921
+ `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
922
+ ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
923
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
924
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
925
+
926
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
927
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
928
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
929
+ use_cache (`bool`, *optional*):
930
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
931
+ `past_key_values`).
932
+
933
+ Returns:
934
+
935
+ Example:
936
+
937
+ ```python
938
+ >>> from transformers import AutoTokenizer, Data2VecTextForCausalLM, Data2VecTextConfig
939
+ >>> import torch
940
+
941
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/data2vec-text-base")
942
+ >>> config = Data2VecTextConfig.from_pretrained("facebook/data2vec-text-base")
943
+ >>> config.is_decoder = True
944
+ >>> model = Data2VecTextForCausalLM.from_pretrained("facebook/data2vec-text-base", config=config)
945
+
946
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
947
+ >>> outputs = model(**inputs)
948
+
949
+ >>> prediction_logits = outputs.logits
950
+ ```"""
951
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
952
+ if labels is not None:
953
+ use_cache = False
954
+
955
+ outputs = self.data2vec_text(
956
+ input_ids,
957
+ attention_mask=attention_mask,
958
+ token_type_ids=token_type_ids,
959
+ position_ids=position_ids,
960
+ head_mask=head_mask,
961
+ inputs_embeds=inputs_embeds,
962
+ encoder_hidden_states=encoder_hidden_states,
963
+ encoder_attention_mask=encoder_attention_mask,
964
+ past_key_values=past_key_values,
965
+ use_cache=use_cache,
966
+ output_attentions=output_attentions,
967
+ output_hidden_states=output_hidden_states,
968
+ return_dict=return_dict,
969
+ )
970
+
971
+ sequence_output = outputs[0]
972
+ prediction_scores = self.lm_head(sequence_output)
973
+
974
+ lm_loss = None
975
+ if labels is not None:
976
+ # we are doing next-token prediction; shift prediction scores and input ids by one
977
+ shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
978
+ labels = labels[:, 1:].contiguous()
979
+ loss_fct = CrossEntropyLoss()
980
+
981
+ labels = labels.to(shifted_prediction_scores.device)
982
+ lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
983
+
984
+ if not return_dict:
985
+ output = (prediction_scores,) + outputs[2:]
986
+ return ((lm_loss,) + output) if lm_loss is not None else output
987
+
988
+ return CausalLMOutputWithCrossAttentions(
989
+ loss=lm_loss,
990
+ logits=prediction_scores,
991
+ past_key_values=outputs.past_key_values,
992
+ hidden_states=outputs.hidden_states,
993
+ attentions=outputs.attentions,
994
+ cross_attentions=outputs.cross_attentions,
995
+ )
996
+
997
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs):
998
+ input_shape = input_ids.shape
999
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
1000
+ if attention_mask is None:
1001
+ attention_mask = input_ids.new_ones(input_shape)
1002
+
1003
+ # cut decoder_input_ids if past_key_values is used
1004
+ if past_key_values is not None:
1005
+ past_length = past_key_values[0][0].shape[2]
1006
+
1007
+ # Some generation methods already pass only the last input ID
1008
+ if input_ids.shape[1] > past_length:
1009
+ remove_prefix_length = past_length
1010
+ else:
1011
+ # Default to old behavior: keep only final ID
1012
+ remove_prefix_length = input_ids.shape[1] - 1
1013
+
1014
+ input_ids = input_ids[:, remove_prefix_length:]
1015
+
1016
+ return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values}
1017
+
1018
+ def _reorder_cache(self, past_key_values, beam_idx):
1019
+ reordered_past = ()
1020
+ for layer_past in past_key_values:
1021
+ reordered_past += (
1022
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1023
+ )
1024
+ return reordered_past
1025
+
1026
+
1027
+ @add_start_docstrings("""data2vec Model with a `language modeling` head on top.""", DATA2VECTEXT_START_DOCSTRING)
1028
+ class Data2VecTextForMaskedLM(Data2VecTextPreTrainedModel):
1029
+ _tied_weights_keys = ["lm_head.decoder.weight", "lm_head.decoder.bias"]
1030
+
1031
+ def __init__(self, config):
1032
+ super().__init__(config)
1033
+
1034
+ if config.is_decoder:
1035
+ logger.warning(
1036
+ "If you want to use `Data2VecTextForMaskedLM` make sure `config.is_decoder=False` for "
1037
+ "bi-directional self-attention."
1038
+ )
1039
+
1040
+ self.data2vec_text = Data2VecTextModel(config, add_pooling_layer=False)
1041
+ self.lm_head = Data2VecTextLMHead(config)
1042
+
1043
+ # Initialize weights and apply final processing
1044
+ self.post_init()
1045
+
1046
+ def get_output_embeddings(self):
1047
+ return self.lm_head.decoder
1048
+
1049
+ def set_output_embeddings(self, new_embeddings):
1050
+ self.lm_head.decoder = new_embeddings
1051
+
1052
+ @add_start_docstrings_to_model_forward(DATA2VECTEXT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1053
+ @add_code_sample_docstrings(
1054
+ checkpoint=_CHECKPOINT_FOR_DOC,
1055
+ output_type=MaskedLMOutput,
1056
+ config_class=_CONFIG_FOR_DOC,
1057
+ mask="<mask>",
1058
+ )
1059
+ def forward(
1060
+ self,
1061
+ input_ids: Optional[torch.LongTensor] = None,
1062
+ attention_mask: Optional[torch.FloatTensor] = None,
1063
+ token_type_ids: Optional[torch.LongTensor] = None,
1064
+ position_ids: Optional[torch.LongTensor] = None,
1065
+ head_mask: Optional[torch.FloatTensor] = None,
1066
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1067
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
1068
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
1069
+ labels: Optional[torch.LongTensor] = None,
1070
+ output_attentions: Optional[bool] = None,
1071
+ output_hidden_states: Optional[bool] = None,
1072
+ return_dict: Optional[bool] = None,
1073
+ ) -> Union[Tuple, MaskedLMOutput]:
1074
+ r"""
1075
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1076
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1077
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1078
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1079
+ kwargs (`Dict[str, any]`, optional, defaults to *{}*):
1080
+ Used to hide legacy arguments that have been deprecated.
1081
+ """
1082
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1083
+
1084
+ outputs = self.data2vec_text(
1085
+ input_ids,
1086
+ attention_mask=attention_mask,
1087
+ token_type_ids=token_type_ids,
1088
+ position_ids=position_ids,
1089
+ head_mask=head_mask,
1090
+ inputs_embeds=inputs_embeds,
1091
+ encoder_hidden_states=encoder_hidden_states,
1092
+ encoder_attention_mask=encoder_attention_mask,
1093
+ output_attentions=output_attentions,
1094
+ output_hidden_states=output_hidden_states,
1095
+ return_dict=return_dict,
1096
+ )
1097
+ sequence_output = outputs[0]
1098
+ prediction_scores = self.lm_head(sequence_output)
1099
+
1100
+ masked_lm_loss = None
1101
+ if labels is not None:
1102
+ loss_fct = CrossEntropyLoss()
1103
+
1104
+ labels = labels.to(prediction_scores.device)
1105
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1106
+
1107
+ if not return_dict:
1108
+ output = (prediction_scores,) + outputs[2:]
1109
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1110
+
1111
+ return MaskedLMOutput(
1112
+ loss=masked_lm_loss,
1113
+ logits=prediction_scores,
1114
+ hidden_states=outputs.hidden_states,
1115
+ attentions=outputs.attentions,
1116
+ )
1117
+
1118
+
1119
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaLMHead with Roberta->Data2VecText
1120
+ class Data2VecTextLMHead(nn.Module):
1121
+ """Data2VecText Head for masked language modeling."""
1122
+
1123
+ def __init__(self, config):
1124
+ super().__init__()
1125
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
1126
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
1127
+
1128
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size)
1129
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
1130
+ self.decoder.bias = self.bias
1131
+
1132
+ def forward(self, features, **kwargs):
1133
+ x = self.dense(features)
1134
+ x = gelu(x)
1135
+ x = self.layer_norm(x)
1136
+
1137
+ # project back to size of vocabulary with bias
1138
+ x = self.decoder(x)
1139
+
1140
+ return x
1141
+
1142
+ def _tie_weights(self):
1143
+ # To tie those two weights if they get disconnected (on TPU or when the bias is resized)
1144
+ # For accelerate compatibility and to not break backward compatibility
1145
+ if self.decoder.bias.device.type == "meta":
1146
+ self.decoder.bias = self.bias
1147
+ else:
1148
+ self.bias = self.decoder.bias
1149
+
1150
+
1151
+ @add_start_docstrings(
1152
+ """
1153
+ Data2VecText Model transformer with a sequence classification/regression head on top (a linear layer on top of the
1154
+ pooled output) e.g. for GLUE tasks.
1155
+ """,
1156
+ DATA2VECTEXT_START_DOCSTRING,
1157
+ )
1158
+ class Data2VecTextForSequenceClassification(Data2VecTextPreTrainedModel):
1159
+ def __init__(self, config):
1160
+ super().__init__(config)
1161
+ self.num_labels = config.num_labels
1162
+ self.config = config
1163
+
1164
+ self.data2vec_text = Data2VecTextModel(config, add_pooling_layer=False)
1165
+ self.classifier = Data2VecTextClassificationHead(config)
1166
+
1167
+ # Initialize weights and apply final processing
1168
+ self.post_init()
1169
+
1170
+ @add_start_docstrings_to_model_forward(DATA2VECTEXT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1171
+ @add_code_sample_docstrings(
1172
+ checkpoint=_CHECKPOINT_FOR_DOC,
1173
+ output_type=SequenceClassifierOutput,
1174
+ config_class=_CONFIG_FOR_DOC,
1175
+ )
1176
+ def forward(
1177
+ self,
1178
+ input_ids: Optional[torch.LongTensor] = None,
1179
+ attention_mask: Optional[torch.FloatTensor] = None,
1180
+ token_type_ids: Optional[torch.LongTensor] = None,
1181
+ position_ids: Optional[torch.LongTensor] = None,
1182
+ head_mask: Optional[torch.FloatTensor] = None,
1183
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1184
+ labels: Optional[torch.LongTensor] = None,
1185
+ output_attentions: Optional[bool] = None,
1186
+ output_hidden_states: Optional[bool] = None,
1187
+ return_dict: Optional[bool] = None,
1188
+ ) -> Union[Tuple, SequenceClassifierOutput]:
1189
+ r"""
1190
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1191
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1192
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1193
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1194
+ """
1195
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1196
+
1197
+ outputs = self.data2vec_text(
1198
+ input_ids,
1199
+ attention_mask=attention_mask,
1200
+ token_type_ids=token_type_ids,
1201
+ position_ids=position_ids,
1202
+ head_mask=head_mask,
1203
+ inputs_embeds=inputs_embeds,
1204
+ output_attentions=output_attentions,
1205
+ output_hidden_states=output_hidden_states,
1206
+ return_dict=return_dict,
1207
+ )
1208
+ sequence_output = outputs[0]
1209
+ logits = self.classifier(sequence_output)
1210
+
1211
+ loss = None
1212
+ if labels is not None:
1213
+ labels = labels.to(logits.device)
1214
+
1215
+ if self.config.problem_type is None:
1216
+ if self.num_labels == 1:
1217
+ self.config.problem_type = "regression"
1218
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1219
+ self.config.problem_type = "single_label_classification"
1220
+ else:
1221
+ self.config.problem_type = "multi_label_classification"
1222
+
1223
+ if self.config.problem_type == "regression":
1224
+ loss_fct = MSELoss()
1225
+ if self.num_labels == 1:
1226
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1227
+ else:
1228
+ loss = loss_fct(logits, labels)
1229
+ elif self.config.problem_type == "single_label_classification":
1230
+ loss_fct = CrossEntropyLoss()
1231
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1232
+ elif self.config.problem_type == "multi_label_classification":
1233
+ loss_fct = BCEWithLogitsLoss()
1234
+ loss = loss_fct(logits, labels)
1235
+
1236
+ if not return_dict:
1237
+ output = (logits,) + outputs[2:]
1238
+ return ((loss,) + output) if loss is not None else output
1239
+
1240
+ return SequenceClassifierOutput(
1241
+ loss=loss,
1242
+ logits=logits,
1243
+ hidden_states=outputs.hidden_states,
1244
+ attentions=outputs.attentions,
1245
+ )
1246
+
1247
+
1248
+ @add_start_docstrings(
1249
+ """
1250
+ Data2VecText Model with a multiple choice classification head on top (a linear layer on top of the pooled output
1251
+ and a softmax) e.g. for RocStories/SWAG tasks.
1252
+ """,
1253
+ DATA2VECTEXT_START_DOCSTRING,
1254
+ )
1255
+ class Data2VecTextForMultipleChoice(Data2VecTextPreTrainedModel):
1256
+ def __init__(self, config):
1257
+ super().__init__(config)
1258
+
1259
+ self.data2vec_text = Data2VecTextModel(config)
1260
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1261
+ self.classifier = nn.Linear(config.hidden_size, 1)
1262
+
1263
+ # Initialize weights and apply final processing
1264
+ self.post_init()
1265
+
1266
+ @add_start_docstrings_to_model_forward(
1267
+ DATA2VECTEXT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
1268
+ )
1269
+ @add_code_sample_docstrings(
1270
+ checkpoint=_CHECKPOINT_FOR_DOC,
1271
+ output_type=MultipleChoiceModelOutput,
1272
+ config_class=_CONFIG_FOR_DOC,
1273
+ )
1274
+ def forward(
1275
+ self,
1276
+ input_ids: Optional[torch.LongTensor] = None,
1277
+ token_type_ids: Optional[torch.LongTensor] = None,
1278
+ attention_mask: Optional[torch.FloatTensor] = None,
1279
+ labels: Optional[torch.LongTensor] = None,
1280
+ position_ids: Optional[torch.LongTensor] = None,
1281
+ head_mask: Optional[torch.FloatTensor] = None,
1282
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1283
+ output_attentions: Optional[bool] = None,
1284
+ output_hidden_states: Optional[bool] = None,
1285
+ return_dict: Optional[bool] = None,
1286
+ ) -> Union[Tuple, MultipleChoiceModelOutput]:
1287
+ r"""
1288
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1289
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
1290
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
1291
+ `input_ids` above)
1292
+ """
1293
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1294
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
1295
+
1296
+ flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
1297
+ flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
1298
+ flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
1299
+ flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
1300
+ flat_inputs_embeds = (
1301
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
1302
+ if inputs_embeds is not None
1303
+ else None
1304
+ )
1305
+
1306
+ outputs = self.data2vec_text(
1307
+ flat_input_ids,
1308
+ position_ids=flat_position_ids,
1309
+ token_type_ids=flat_token_type_ids,
1310
+ attention_mask=flat_attention_mask,
1311
+ head_mask=head_mask,
1312
+ inputs_embeds=flat_inputs_embeds,
1313
+ output_attentions=output_attentions,
1314
+ output_hidden_states=output_hidden_states,
1315
+ return_dict=return_dict,
1316
+ )
1317
+ pooled_output = outputs[1]
1318
+
1319
+ pooled_output = self.dropout(pooled_output)
1320
+ logits = self.classifier(pooled_output)
1321
+ reshaped_logits = logits.view(-1, num_choices)
1322
+
1323
+ loss = None
1324
+ if labels is not None:
1325
+ loss_fct = CrossEntropyLoss()
1326
+
1327
+ labels = labels.to(reshaped_logits.device)
1328
+ loss = loss_fct(reshaped_logits, labels)
1329
+
1330
+ if not return_dict:
1331
+ output = (reshaped_logits,) + outputs[2:]
1332
+ return ((loss,) + output) if loss is not None else output
1333
+
1334
+ return MultipleChoiceModelOutput(
1335
+ loss=loss,
1336
+ logits=reshaped_logits,
1337
+ hidden_states=outputs.hidden_states,
1338
+ attentions=outputs.attentions,
1339
+ )
1340
+
1341
+
1342
+ @add_start_docstrings(
1343
+ """
1344
+ Data2VecText Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
1345
+ for Named-Entity-Recognition (NER) tasks.
1346
+ """,
1347
+ DATA2VECTEXT_START_DOCSTRING,
1348
+ )
1349
+ class Data2VecTextForTokenClassification(Data2VecTextPreTrainedModel):
1350
+ def __init__(self, config):
1351
+ super().__init__(config)
1352
+ self.num_labels = config.num_labels
1353
+
1354
+ self.data2vec_text = Data2VecTextModel(config, add_pooling_layer=False)
1355
+ classifier_dropout = (
1356
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1357
+ )
1358
+ self.dropout = nn.Dropout(classifier_dropout)
1359
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1360
+
1361
+ # Initialize weights and apply final processing
1362
+ self.post_init()
1363
+
1364
+ @add_start_docstrings_to_model_forward(DATA2VECTEXT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1365
+ @add_code_sample_docstrings(
1366
+ checkpoint=_CHECKPOINT_FOR_DOC,
1367
+ output_type=TokenClassifierOutput,
1368
+ config_class=_CONFIG_FOR_DOC,
1369
+ )
1370
+ def forward(
1371
+ self,
1372
+ input_ids: Optional[torch.LongTensor] = None,
1373
+ attention_mask: Optional[torch.FloatTensor] = None,
1374
+ token_type_ids: Optional[torch.LongTensor] = None,
1375
+ position_ids: Optional[torch.LongTensor] = None,
1376
+ head_mask: Optional[torch.FloatTensor] = None,
1377
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1378
+ labels: Optional[torch.LongTensor] = None,
1379
+ output_attentions: Optional[bool] = None,
1380
+ output_hidden_states: Optional[bool] = None,
1381
+ return_dict: Optional[bool] = None,
1382
+ ) -> Union[Tuple, TokenClassifierOutput]:
1383
+ r"""
1384
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1385
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1386
+ """
1387
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1388
+
1389
+ outputs = self.data2vec_text(
1390
+ input_ids,
1391
+ attention_mask=attention_mask,
1392
+ token_type_ids=token_type_ids,
1393
+ position_ids=position_ids,
1394
+ head_mask=head_mask,
1395
+ inputs_embeds=inputs_embeds,
1396
+ output_attentions=output_attentions,
1397
+ output_hidden_states=output_hidden_states,
1398
+ return_dict=return_dict,
1399
+ )
1400
+
1401
+ sequence_output = outputs[0]
1402
+
1403
+ sequence_output = self.dropout(sequence_output)
1404
+ logits = self.classifier(sequence_output)
1405
+
1406
+ loss = None
1407
+ if labels is not None:
1408
+ loss_fct = CrossEntropyLoss()
1409
+
1410
+ labels = labels.to(logits.device)
1411
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1412
+
1413
+ if not return_dict:
1414
+ output = (logits,) + outputs[2:]
1415
+ return ((loss,) + output) if loss is not None else output
1416
+
1417
+ return TokenClassifierOutput(
1418
+ loss=loss,
1419
+ logits=logits,
1420
+ hidden_states=outputs.hidden_states,
1421
+ attentions=outputs.attentions,
1422
+ )
1423
+
1424
+
1425
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaClassificationHead with Roberta->Data2VecText
1426
+ class Data2VecTextClassificationHead(nn.Module):
1427
+ """Head for sentence-level classification tasks."""
1428
+
1429
+ def __init__(self, config):
1430
+ super().__init__()
1431
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
1432
+ classifier_dropout = (
1433
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1434
+ )
1435
+ self.dropout = nn.Dropout(classifier_dropout)
1436
+ self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
1437
+
1438
+ def forward(self, features, **kwargs):
1439
+ x = features[:, 0, :] # take <s> token (equiv. to [CLS])
1440
+ x = self.dropout(x)
1441
+ x = self.dense(x)
1442
+ x = torch.tanh(x)
1443
+ x = self.dropout(x)
1444
+ x = self.out_proj(x)
1445
+ return x
1446
+
1447
+
1448
+ @add_start_docstrings(
1449
+ """
1450
+ Data2VecText Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
1451
+ linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1452
+ """,
1453
+ DATA2VECTEXT_START_DOCSTRING,
1454
+ )
1455
+ class Data2VecTextForQuestionAnswering(Data2VecTextPreTrainedModel):
1456
+ def __init__(self, config):
1457
+ super().__init__(config)
1458
+ self.num_labels = config.num_labels
1459
+
1460
+ self.data2vec_text = Data2VecTextModel(config, add_pooling_layer=False)
1461
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1462
+
1463
+ # Initialize weights and apply final processing
1464
+ self.post_init()
1465
+
1466
+ @add_start_docstrings_to_model_forward(DATA2VECTEXT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1467
+ @add_code_sample_docstrings(
1468
+ checkpoint=_CHECKPOINT_FOR_DOC,
1469
+ output_type=QuestionAnsweringModelOutput,
1470
+ config_class=_CONFIG_FOR_DOC,
1471
+ )
1472
+ def forward(
1473
+ self,
1474
+ input_ids: Optional[torch.LongTensor] = None,
1475
+ attention_mask: Optional[torch.FloatTensor] = None,
1476
+ token_type_ids: Optional[torch.LongTensor] = None,
1477
+ position_ids: Optional[torch.LongTensor] = None,
1478
+ head_mask: Optional[torch.FloatTensor] = None,
1479
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1480
+ start_positions: Optional[torch.LongTensor] = None,
1481
+ end_positions: Optional[torch.LongTensor] = None,
1482
+ output_attentions: Optional[bool] = None,
1483
+ output_hidden_states: Optional[bool] = None,
1484
+ return_dict: Optional[bool] = None,
1485
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
1486
+ r"""
1487
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1488
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1489
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1490
+ are not taken into account for computing the loss.
1491
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1492
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1493
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1494
+ are not taken into account for computing the loss.
1495
+ """
1496
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1497
+
1498
+ outputs = self.data2vec_text(
1499
+ input_ids,
1500
+ attention_mask=attention_mask,
1501
+ token_type_ids=token_type_ids,
1502
+ position_ids=position_ids,
1503
+ head_mask=head_mask,
1504
+ inputs_embeds=inputs_embeds,
1505
+ output_attentions=output_attentions,
1506
+ output_hidden_states=output_hidden_states,
1507
+ return_dict=return_dict,
1508
+ )
1509
+
1510
+ sequence_output = outputs[0]
1511
+
1512
+ logits = self.qa_outputs(sequence_output)
1513
+ start_logits, end_logits = logits.split(1, dim=-1)
1514
+ start_logits = start_logits.squeeze(-1).contiguous()
1515
+ end_logits = end_logits.squeeze(-1).contiguous()
1516
+
1517
+ total_loss = None
1518
+ if start_positions is not None and end_positions is not None:
1519
+ # If we are on multi-GPU, split add a dimension
1520
+ if len(start_positions.size()) > 1:
1521
+ start_positions = start_positions.squeeze(-1)
1522
+ if len(end_positions.size()) > 1:
1523
+ end_positions = end_positions.squeeze(-1)
1524
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1525
+ ignored_index = start_logits.size(1)
1526
+ start_positions = start_positions.clamp(0, ignored_index)
1527
+ end_positions = end_positions.clamp(0, ignored_index)
1528
+
1529
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1530
+ start_loss = loss_fct(start_logits, start_positions)
1531
+ end_loss = loss_fct(end_logits, end_positions)
1532
+ total_loss = (start_loss + end_loss) / 2
1533
+
1534
+ if not return_dict:
1535
+ output = (start_logits, end_logits) + outputs[2:]
1536
+ return ((total_loss,) + output) if total_loss is not None else output
1537
+
1538
+ return QuestionAnsweringModelOutput(
1539
+ loss=total_loss,
1540
+ start_logits=start_logits,
1541
+ end_logits=end_logits,
1542
+ hidden_states=outputs.hidden_states,
1543
+ attentions=outputs.attentions,
1544
+ )
1545
+
1546
+
1547
+ def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
1548
+ """
1549
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
1550
+ are ignored. This is modified from fairseq's `utils.make_positions`.
1551
+
1552
+ Args:
1553
+ x: torch.Tensor x:
1554
+
1555
+ Returns: torch.Tensor
1556
+ """
1557
+ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
1558
+ mask = input_ids.ne(padding_idx).int()
1559
+ incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
1560
+ return incremental_indices.long() + padding_idx
evalkit_internvl/lib/python3.10/site-packages/transformers/models/data2vec/modeling_tf_data2vec_vision.py ADDED
@@ -0,0 +1,1725 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Meta Platforms and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ TF 2.0 Data2Vec Vision model."""
16
+
17
+
18
+ from __future__ import annotations
19
+
20
+ import collections.abc
21
+ import math
22
+ from dataclasses import dataclass
23
+ from typing import List, Optional, Tuple, Union
24
+
25
+ import numpy as np
26
+ import tensorflow as tf
27
+
28
+ from ...activations_tf import get_tf_activation
29
+ from ...modeling_tf_outputs import (
30
+ TFBaseModelOutput,
31
+ TFBaseModelOutputWithPooling,
32
+ TFSemanticSegmenterOutput,
33
+ TFSequenceClassifierOutput,
34
+ )
35
+ from ...modeling_tf_utils import (
36
+ TFModelInputType,
37
+ TFPreTrainedModel,
38
+ TFSequenceClassificationLoss,
39
+ get_initializer,
40
+ keras_serializable,
41
+ unpack_inputs,
42
+ )
43
+ from ...tf_utils import shape_list, stable_softmax
44
+ from ...utils import (
45
+ add_code_sample_docstrings,
46
+ add_start_docstrings,
47
+ add_start_docstrings_to_model_forward,
48
+ logging,
49
+ replace_return_docstrings,
50
+ )
51
+ from .configuration_data2vec_vision import Data2VecVisionConfig
52
+
53
+
54
+ logger = logging.get_logger(__name__)
55
+
56
+ # General docstring
57
+ _CONFIG_FOR_DOC = "Data2VecVisionConfig"
58
+
59
+ # Base docstring
60
+ _CHECKPOINT_FOR_DOC = "facebook/data2vec-vision-base"
61
+ _EXPECTED_OUTPUT_SHAPE = [1, 197, 768]
62
+
63
+ # Image classification docstring
64
+ _IMAGE_CLASS_CHECKPOINT = "facebook/data2vec-vision-base-ft1k"
65
+ _IMAGE_CLASS_EXPECTED_OUTPUT = "remote control, remote"
66
+
67
+ TF_DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST = [
68
+ "facebook/data2vec-vision-base-ft1k",
69
+ # See all Data2VecVision models at https://huggingface.co/models?filter=data2vec-vision
70
+ ]
71
+
72
+
73
+ @dataclass
74
+ class TFData2VecVisionModelOutputWithPooling(TFBaseModelOutputWithPooling):
75
+ """
76
+ Class for outputs of [`TFData2VecVisionModel`].
77
+
78
+ Args:
79
+ last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
80
+ Sequence of hidden-states at the output of the last layer of the model.
81
+ pooler_output (`tf.Tensor` of shape `(batch_size, hidden_size)`):
82
+ Average of the last layer hidden states of the patch tokens (excluding the *[CLS]* token) if
83
+ *config.use_mean_pooling* is set to True. If set to False, then the final hidden state of the *[CLS]* token
84
+ will be returned.
85
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
86
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
87
+ `(batch_size, sequence_length, hidden_size)`.
88
+
89
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
90
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
91
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
92
+ sequence_length)`.
93
+
94
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
95
+ heads.
96
+ """
97
+
98
+ last_hidden_state: tf.Tensor = None
99
+ pooler_output: tf.Tensor = None
100
+ hidden_states: Tuple[tf.Tensor] | None = None
101
+ attentions: Tuple[tf.Tensor] | None = None
102
+
103
+
104
+ class TFData2VecVisionDropPath(tf.keras.layers.Layer):
105
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
106
+ References:
107
+ (1) github.com:rwightman/pytorch-image-models
108
+ """
109
+
110
+ def __init__(self, drop_path, **kwargs):
111
+ super().__init__(**kwargs)
112
+ self.drop_path = drop_path
113
+
114
+ def call(self, x, training=None):
115
+ if training:
116
+ keep_prob = 1 - self.drop_path
117
+ shape = (tf.shape(x)[0],) + (1,) * (len(tf.shape(x)) - 1)
118
+ random_tensor = keep_prob + tf.random.uniform(shape, 0, 1)
119
+ random_tensor = tf.floor(random_tensor)
120
+ return (x / keep_prob) * random_tensor
121
+ return x
122
+
123
+
124
+ class TFData2VecVisionEmbeddings(tf.keras.layers.Layer):
125
+ """
126
+ Construct the CLS token, position and patch embeddings. Optionally, also the mask token.
127
+
128
+ """
129
+
130
+ def __init__(self, config: Data2VecVisionConfig, **kwargs):
131
+ super().__init__(**kwargs)
132
+ self.config = config
133
+
134
+ self.patch_embeddings = TFData2VecVisionPatchEmbeddings(config, name="patch_embeddings")
135
+ self.num_patches = self.patch_embeddings.num_patches
136
+ self.config = config
137
+
138
+ self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
139
+
140
+ def build(self, input_shape=None):
141
+ self.cls_token = self.add_weight(
142
+ shape=(1, 1, self.config.hidden_size),
143
+ initializer=tf.random_normal_initializer(stddev=self.config.initializer_range),
144
+ trainable=True,
145
+ name="cls_token",
146
+ )
147
+ if self.config.use_mask_token:
148
+ self.mask_token = self.add_weight(
149
+ shape=(1, 1, self.config.hidden_size),
150
+ initializer=tf.random_normal_initializer(stddev=self.config.initializer_range),
151
+ trainable=True,
152
+ name="mask_token",
153
+ )
154
+ else:
155
+ self.mask_token = None
156
+
157
+ if self.config.use_absolute_position_embeddings:
158
+ self.position_embeddings = self.add_weight(
159
+ shape=(1, self.num_patches + 1, self.config.hidden_size),
160
+ initializer=tf.random_normal_initializer(stddev=self.config.initializer_range),
161
+ trainable=True,
162
+ name="position_embeddings",
163
+ )
164
+ else:
165
+ self.position_embeddings = None
166
+
167
+ if self.built:
168
+ return
169
+ self.built = True
170
+ if getattr(self, "patch_embeddings", None) is not None:
171
+ with tf.name_scope(self.patch_embeddings.name):
172
+ self.patch_embeddings.build(None)
173
+
174
+ def call(self, pixel_values: tf.Tensor, bool_masked_pos: tf.Tensor | None = None) -> tf.Tensor:
175
+ embeddings = self.patch_embeddings(pixel_values)
176
+ batch_size, seq_len, projection_dim = shape_list(embeddings)
177
+
178
+ cls_tokens = tf.tile(self.cls_token, (batch_size, 1, 1))
179
+
180
+ if bool_masked_pos is not None:
181
+ mask_tokens = tf.broadcast_to(self.mask_token, (batch_size, seq_len, projection_dim))
182
+ # replace the masked visual tokens by mask_tokens
183
+ w = bool_masked_pos[..., None]
184
+ w = tf.cast(w, mask_tokens.dtype)
185
+ # since TF doesn't support eager tensor assignment
186
+ embeddings = embeddings * (1 - w) + mask_tokens * w
187
+
188
+ embeddings = tf.concat([cls_tokens, embeddings], axis=1)
189
+ if self.position_embeddings is not None:
190
+ embeddings = embeddings + self.position_embeddings
191
+ embeddings = self.dropout(embeddings)
192
+
193
+ return embeddings
194
+
195
+
196
+ class TFData2VecVisionPatchEmbeddings(tf.keras.layers.Layer):
197
+ """
198
+ Image to Patch Embedding.
199
+ """
200
+
201
+ def __init__(self, config: Data2VecVisionConfig, **kwargs):
202
+ super().__init__(**kwargs)
203
+ self.config = config
204
+
205
+ image_size, patch_size = config.image_size, config.patch_size
206
+ num_channels, hidden_size = config.num_channels, config.hidden_size
207
+
208
+ image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
209
+ patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
210
+ num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
211
+ patch_shape = (image_size[0] // patch_size[0], image_size[1] // patch_size[1])
212
+ self.image_size = image_size
213
+ self.patch_size = patch_size
214
+ self.num_patches = num_patches
215
+ self.patch_shape = patch_shape
216
+ self.num_channels = num_channels
217
+
218
+ self.projection = tf.keras.layers.Conv2D(
219
+ filters=hidden_size,
220
+ kernel_size=patch_size,
221
+ strides=patch_size,
222
+ padding="valid",
223
+ data_format="channels_last",
224
+ kernel_initializer="glorot_uniform", # following torch.nn.Linear
225
+ bias_initializer="zeros",
226
+ name="projection",
227
+ )
228
+
229
+ def call(self, pixel_values: tf.Tensor, training: bool = False) -> tf.Tensor:
230
+ batch_size, num_channels, height, width = shape_list(pixel_values)
231
+ if tf.executing_eagerly():
232
+ if num_channels != self.num_channels:
233
+ raise ValueError(
234
+ "Make sure that the channel dimension of the pixel values match with the one set in the"
235
+ " configuration."
236
+ )
237
+ if height != self.image_size[0] or width != self.image_size[1]:
238
+ raise ValueError(
239
+ f"Input image size ({height}*{width}) doesn't match model"
240
+ f" ({self.image_size[0]}*{self.image_size[1]})."
241
+ )
242
+
243
+ # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
244
+ # So change the input format from `NCHW` to `NHWC`.
245
+ # shape = (batch_size, in_height, in_width, in_channels=num_channels)
246
+ pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1))
247
+
248
+ projection = self.projection(pixel_values)
249
+
250
+ # Change the 2D spatial dimensions to a single temporal dimension.
251
+ # shape = (batch_size, num_patches, out_channels=embed_dim)
252
+ num_patches = (width // self.patch_size[1]) * (height // self.patch_size[0])
253
+
254
+ return tf.reshape(tensor=projection, shape=(batch_size, num_patches, -1))
255
+
256
+ def build(self, input_shape=None):
257
+ if self.built:
258
+ return
259
+ self.built = True
260
+ if getattr(self, "projection", None) is not None:
261
+ with tf.name_scope(self.projection.name):
262
+ self.projection.build([None, None, None, self.num_channels])
263
+
264
+
265
+ class TFData2VecVisionSelfAttention(tf.keras.layers.Layer):
266
+ def __init__(self, config: Data2VecVisionConfig, window_size: Optional[tuple] = None, **kwargs):
267
+ super().__init__(**kwargs)
268
+
269
+ if config.hidden_size % config.num_attention_heads != 0:
270
+ raise ValueError(
271
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number "
272
+ f"of attention heads ({config.num_attention_heads})"
273
+ )
274
+
275
+ self.num_attention_heads = config.num_attention_heads
276
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
277
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
278
+ self.sqrt_att_head_size = math.sqrt(self.attention_head_size)
279
+
280
+ self.query = tf.keras.layers.Dense(
281
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
282
+ )
283
+ self.key = tf.keras.layers.Dense(
284
+ units=self.all_head_size,
285
+ kernel_initializer=get_initializer(config.initializer_range),
286
+ name="key",
287
+ use_bias=False,
288
+ )
289
+ self.value = tf.keras.layers.Dense(
290
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
291
+ )
292
+ self.dropout = tf.keras.layers.Dropout(rate=config.attention_probs_dropout_prob)
293
+
294
+ if window_size:
295
+ self.relative_position_bias = TFData2VecVisionRelativePositionBias(
296
+ config, window_size=window_size, name="relative_position_bias"
297
+ )
298
+ else:
299
+ self.relative_position_bias = None
300
+ self.config = config
301
+
302
+ def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor:
303
+ # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
304
+ tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size))
305
+
306
+ # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size]
307
+ return tf.transpose(tensor, perm=[0, 2, 1, 3])
308
+
309
+ def call(
310
+ self,
311
+ hidden_states: tf.Tensor,
312
+ head_mask: tf.Tensor,
313
+ output_attentions: bool,
314
+ relative_position_bias: Optional["TFData2VecVisionRelativePositionBias"] = None,
315
+ training: bool = False,
316
+ ) -> Tuple[tf.Tensor]:
317
+ batch_size = shape_list(hidden_states)[0]
318
+ mixed_query_layer = self.query(inputs=hidden_states)
319
+ mixed_key_layer = self.key(inputs=hidden_states)
320
+ mixed_value_layer = self.value(inputs=hidden_states)
321
+ query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
322
+ key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
323
+ value_layer = self.transpose_for_scores(mixed_value_layer, batch_size)
324
+
325
+ # Take the dot product between "query" and "key" to get the raw attention scores.
326
+ # (batch size, num_heads, seq_len_q, seq_len_k)
327
+ attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
328
+ attention_scores = attention_scores / self.sqrt_att_head_size
329
+
330
+ # Add relative position bias if present.
331
+ if self.relative_position_bias is not None:
332
+ # Passing `0.0` to the `relative_position_bias()` layer because otherwise Keras
333
+ # might complain about `Layer.call()` not being invoked properly. In this case this input
334
+ # i.e., 0.0 is not going to be used in any calculations so we're safe.
335
+ attention_scores = attention_scores + self.relative_position_bias(0.0)[None, ...]
336
+
337
+ # Add shared relative position bias if provided.
338
+ if relative_position_bias is not None:
339
+ attention_scores = attention_scores + relative_position_bias
340
+
341
+ # Normalize the attention scores to probabilities.
342
+ attention_probs = stable_softmax(logits=attention_scores, axis=-1)
343
+
344
+ # This is actually dropping out entire tokens to attend to, which might
345
+ # seem a bit unusual, but is taken from the original Transformer paper.
346
+ attention_probs = self.dropout(inputs=attention_probs, training=training)
347
+
348
+ # Mask heads if we want to
349
+ if head_mask is not None:
350
+ attention_probs = tf.multiply(attention_probs, head_mask)
351
+
352
+ attention_output = tf.matmul(attention_probs, value_layer)
353
+ attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3])
354
+
355
+ # (batch_size, seq_len_q, all_head_size)
356
+ attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.all_head_size))
357
+ outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
358
+
359
+ return outputs
360
+
361
+ def build(self, input_shape=None):
362
+ if self.built:
363
+ return
364
+ self.built = True
365
+ if getattr(self, "query", None) is not None:
366
+ with tf.name_scope(self.query.name):
367
+ self.query.build([None, None, self.config.hidden_size])
368
+ if getattr(self, "key", None) is not None:
369
+ with tf.name_scope(self.key.name):
370
+ self.key.build([None, None, self.config.hidden_size])
371
+ if getattr(self, "value", None) is not None:
372
+ with tf.name_scope(self.value.name):
373
+ self.value.build([None, None, self.config.hidden_size])
374
+ if getattr(self, "relative_position_bias", None) is not None:
375
+ with tf.name_scope(self.relative_position_bias.name):
376
+ self.relative_position_bias.build(None)
377
+
378
+
379
+ class TFData2VecVisionSelfOutput(tf.keras.layers.Layer):
380
+ """
381
+ The residual connection is defined in TFData2VecVisionLayer instead of here (as is the case with other models), due
382
+ to the layernorm applied before each block.
383
+ """
384
+
385
+ def __init__(self, config: Data2VecVisionConfig, **kwargs):
386
+ super().__init__(**kwargs)
387
+
388
+ self.dense = tf.keras.layers.Dense(
389
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
390
+ )
391
+ self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
392
+ self.config = config
393
+
394
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, gamma=None, training: bool = False) -> tf.Tensor:
395
+ hidden_states = self.dense(inputs=hidden_states)
396
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
397
+
398
+ return hidden_states
399
+
400
+ def build(self, input_shape=None):
401
+ if self.built:
402
+ return
403
+ self.built = True
404
+ if getattr(self, "dense", None) is not None:
405
+ with tf.name_scope(self.dense.name):
406
+ self.dense.build([None, None, self.config.hidden_size])
407
+
408
+
409
+ class TFData2VecVisionAttention(tf.keras.layers.Layer):
410
+ def __init__(self, config: Data2VecVisionConfig, window_size: Optional[tuple] = None, **kwargs):
411
+ super().__init__(**kwargs)
412
+
413
+ self.attention = TFData2VecVisionSelfAttention(config, window_size=window_size, name="attention")
414
+ self.dense_output = TFData2VecVisionSelfOutput(config, name="output")
415
+
416
+ def prune_heads(self, heads):
417
+ raise NotImplementedError
418
+
419
+ def call(
420
+ self,
421
+ input_tensor: tf.Tensor,
422
+ head_mask: tf.Tensor,
423
+ output_attentions: bool,
424
+ relative_position_bias: Optional["TFData2VecVisionRelativePositionBias"] = None,
425
+ training: bool = False,
426
+ ) -> Tuple[tf.Tensor]:
427
+ self_outputs = self.attention(
428
+ hidden_states=input_tensor,
429
+ head_mask=head_mask,
430
+ output_attentions=output_attentions,
431
+ relative_position_bias=relative_position_bias,
432
+ training=training,
433
+ )
434
+ attention_output = self.dense_output(
435
+ hidden_states=self_outputs[0], input_tensor=input_tensor, training=training
436
+ )
437
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
438
+
439
+ return outputs
440
+
441
+ def build(self, input_shape=None):
442
+ if self.built:
443
+ return
444
+ self.built = True
445
+ if getattr(self, "attention", None) is not None:
446
+ with tf.name_scope(self.attention.name):
447
+ self.attention.build(None)
448
+ if getattr(self, "dense_output", None) is not None:
449
+ with tf.name_scope(self.dense_output.name):
450
+ self.dense_output.build(None)
451
+
452
+
453
+ # Copied from transformers.models.vit.modeling_tf_vit.TFViTIntermediate with ViT->Data2VecVision
454
+ class TFData2VecVisionIntermediate(tf.keras.layers.Layer):
455
+ def __init__(self, config: Data2VecVisionConfig, **kwargs):
456
+ super().__init__(**kwargs)
457
+
458
+ self.dense = tf.keras.layers.Dense(
459
+ units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
460
+ )
461
+
462
+ if isinstance(config.hidden_act, str):
463
+ self.intermediate_act_fn = get_tf_activation(config.hidden_act)
464
+ else:
465
+ self.intermediate_act_fn = config.hidden_act
466
+ self.config = config
467
+
468
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
469
+ hidden_states = self.dense(inputs=hidden_states)
470
+ hidden_states = self.intermediate_act_fn(hidden_states)
471
+
472
+ return hidden_states
473
+
474
+ def build(self, input_shape=None):
475
+ if self.built:
476
+ return
477
+ self.built = True
478
+ if getattr(self, "dense", None) is not None:
479
+ with tf.name_scope(self.dense.name):
480
+ self.dense.build([None, None, self.config.hidden_size])
481
+
482
+
483
+ class TFData2VecVisionOutput(tf.keras.layers.Layer):
484
+ def __init__(self, config: Data2VecVisionConfig, **kwargs):
485
+ super().__init__(**kwargs)
486
+
487
+ self.dense = tf.keras.layers.Dense(
488
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
489
+ )
490
+ self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
491
+ self.config = config
492
+
493
+ def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor:
494
+ hidden_states = self.dense(inputs=hidden_states)
495
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
496
+
497
+ return hidden_states
498
+
499
+ def build(self, input_shape=None):
500
+ if self.built:
501
+ return
502
+ self.built = True
503
+ if getattr(self, "dense", None) is not None:
504
+ with tf.name_scope(self.dense.name):
505
+ self.dense.build([None, None, self.config.intermediate_size])
506
+
507
+
508
+ class TFData2VecVisionLayer(tf.keras.layers.Layer):
509
+ """This corresponds to the Block class in the timm implementation."""
510
+
511
+ def __init__(
512
+ self, config: Data2VecVisionConfig, window_size: Optional[tuple] = None, drop_path_rate: float = 0.0, **kwargs
513
+ ):
514
+ super().__init__(**kwargs)
515
+ self.config = config
516
+
517
+ self.attention = TFData2VecVisionAttention(config, window_size=window_size, name="attention")
518
+ self.intermediate = TFData2VecVisionIntermediate(config, name="intermediate")
519
+ self.data2vec_output = TFData2VecVisionOutput(config, name="output")
520
+
521
+ self.layernorm_before = tf.keras.layers.LayerNormalization(
522
+ epsilon=config.layer_norm_eps, name="layernorm_before"
523
+ )
524
+ self.layernorm_after = tf.keras.layers.LayerNormalization(
525
+ epsilon=config.layer_norm_eps, name="layernorm_after"
526
+ )
527
+ # Using `layers.Activation` instead of `tf.identity` to better control `training`
528
+ # behaviour.
529
+ self.drop_path = (
530
+ TFData2VecVisionDropPath(drop_path_rate, name="drop_path")
531
+ if drop_path_rate > 0.0
532
+ else tf.keras.layers.Activation("linear", name="drop_path")
533
+ )
534
+ self.init_values = config.layer_scale_init_value
535
+
536
+ def build(self, input_shape: tf.TensorShape = None):
537
+ if self.init_values > 0:
538
+ self.lambda_1 = self.add_weight(
539
+ shape=(self.config.hidden_size),
540
+ initializer="ones",
541
+ trainable=True,
542
+ name="lambda_1",
543
+ )
544
+ self.lambda_2 = self.add_weight(
545
+ shape=(self.config.hidden_size),
546
+ initializer="ones",
547
+ trainable=True,
548
+ name="lambda_2",
549
+ )
550
+ self.lambda_1.assign(self.init_values * tf.ones((self.config.hidden_size)))
551
+ self.lambda_2.assign(self.init_values * tf.ones((self.config.hidden_size)))
552
+ else:
553
+ self.lambda_1, self.lambda_2 = None, None
554
+
555
+ if self.built:
556
+ return
557
+ self.built = True
558
+ if getattr(self, "attention", None) is not None:
559
+ with tf.name_scope(self.attention.name):
560
+ self.attention.build(None)
561
+ if getattr(self, "intermediate", None) is not None:
562
+ with tf.name_scope(self.intermediate.name):
563
+ self.intermediate.build(None)
564
+ if getattr(self, "data2vec_output", None) is not None:
565
+ with tf.name_scope(self.data2vec_output.name):
566
+ self.data2vec_output.build(None)
567
+ if getattr(self, "layernorm_before", None) is not None:
568
+ with tf.name_scope(self.layernorm_before.name):
569
+ self.layernorm_before.build([None, None, self.config.hidden_size])
570
+ if getattr(self, "layernorm_after", None) is not None:
571
+ with tf.name_scope(self.layernorm_after.name):
572
+ self.layernorm_after.build([None, None, self.config.hidden_size])
573
+ if getattr(self, "drop_path", None) is not None:
574
+ with tf.name_scope(self.drop_path.name):
575
+ self.drop_path.build(None)
576
+
577
+ def call(
578
+ self,
579
+ hidden_states: tf.Tensor,
580
+ head_mask: tf.Tensor,
581
+ output_attentions: bool,
582
+ relative_position_bias: Optional["TFData2VecVisionRelativePositionBias"] = None,
583
+ training: bool = False,
584
+ ) -> Tuple[tf.Tensor]:
585
+ self_attention_outputs = self.attention(
586
+ # in Data2VecVision, layernorm is applied before self-attention
587
+ input_tensor=self.layernorm_before(inputs=hidden_states),
588
+ head_mask=head_mask,
589
+ output_attentions=output_attentions,
590
+ relative_position_bias=relative_position_bias,
591
+ training=training,
592
+ )
593
+ attention_output = self_attention_outputs[0]
594
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
595
+
596
+ # apply lambda_1 if present
597
+ if self.lambda_1 is not None:
598
+ attention_output = self.lambda_1 * attention_output
599
+
600
+ # first residual connection
601
+ hidden_states = self.drop_path(attention_output) + hidden_states
602
+
603
+ # in Data2VecVision, layernorm is also applied after self-attention
604
+ layer_output = self.layernorm_after(hidden_states)
605
+
606
+ layer_output = self.intermediate(layer_output)
607
+ layer_output = self.data2vec_output(layer_output)
608
+
609
+ if self.lambda_2 is not None:
610
+ layer_output = self.lambda_2 * layer_output
611
+
612
+ # second residual connection
613
+ layer_output = self.drop_path(layer_output) + hidden_states
614
+
615
+ outputs = (layer_output,) + outputs
616
+
617
+ return outputs
618
+
619
+
620
+ # Taken and modified from here:
621
+ # https://github.com/leondgarse/keras_cv_attention_models/blob/main/keras_cv_attention_models/beit/beit.py#L28
622
+ class TFData2VecVisionRelativePositionBias(tf.keras.layers.Layer):
623
+ def __init__(self, config: Data2VecVisionConfig, window_size: tuple, **kwargs) -> None:
624
+ super().__init__(**kwargs)
625
+ self.config = config
626
+
627
+ self.window_size = window_size
628
+ # +3 for cls_token_pos_len
629
+ # window_size can be something like (14, 14)
630
+ self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
631
+
632
+ self.relative_position_index = self.get_position_index()
633
+
634
+ def build(self, input_shape):
635
+ self.relative_position_bias_table = self.add_weight(
636
+ shape=(self.num_relative_distance, self.config.num_attention_heads),
637
+ initializer="zeros",
638
+ trainable=True,
639
+ name="relative_position_bias_table",
640
+ ) # [2*Wh-1 * 2*Ww-1, nH]
641
+ # cls to token & token 2 cls & cls to cls
642
+
643
+ super().build(input_shape)
644
+
645
+ def get_position_index(self):
646
+ # get pair-wise relative position index for each token inside the window
647
+ xx, yy = tf.meshgrid(range(self.window_size[0]), range(self.window_size[1]))
648
+ coords = tf.stack([yy, xx], axis=0) # [2, Wh, Ww]
649
+ coords_flatten = tf.reshape(coords, [2, -1]) # [2, Wh*Ww]
650
+
651
+ relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # [2, Wh*Ww, Wh*Ww]
652
+ relative_coords = tf.transpose(relative_coords, perm=[1, 2, 0]) # [Wh*Ww, Wh*Ww, 2]
653
+
654
+ xx = (relative_coords[:, :, 0] + self.window_size[0] - 1) * (2 * self.window_size[1] - 1)
655
+ yy = relative_coords[:, :, 1] + self.window_size[1] - 1
656
+ relative_coords = tf.stack([xx, yy], axis=-1)
657
+
658
+ relative_position_index = tf.reduce_sum(relative_coords, axis=-1) # [Wh*Ww, Wh*Ww]
659
+
660
+ top = tf.ones((1, relative_position_index.shape[1]), dtype=relative_position_index.dtype) * (
661
+ self.num_relative_distance - 3
662
+ )
663
+ left = tf.ones((relative_position_index.shape[0], 1), dtype=relative_position_index.dtype) * (
664
+ self.num_relative_distance - 2
665
+ )
666
+ corner = tf.ones((1, 1), dtype=relative_position_index.dtype) * (self.num_relative_distance - 1)
667
+
668
+ left_corner = tf.concat([corner, left], axis=0)
669
+ relative_position_index = tf.concat([top, relative_position_index], axis=0)
670
+ relative_position_index = tf.concat([left_corner, relative_position_index], axis=1) # [Wh*Ww + 1, Wh*Ww + 1]
671
+ return relative_position_index
672
+
673
+ def call(self, inputs=None) -> tf.Tensor:
674
+ relative_position_bias = tf.gather(self.relative_position_bias_table, self.relative_position_index, axis=0)
675
+ return tf.transpose(relative_position_bias, [2, 0, 1])
676
+
677
+
678
+ class TFData2VecVisionEncoder(tf.keras.layers.Layer):
679
+ def __init__(self, config: Data2VecVisionConfig, window_size: Optional[tuple] = None, **kwargs):
680
+ super().__init__(**kwargs)
681
+ self.config = config
682
+ if config.use_shared_relative_position_bias:
683
+ self.relative_position_bias = TFData2VecVisionRelativePositionBias(
684
+ config, window_size=window_size, name="relative_position_bias"
685
+ )
686
+ else:
687
+ self.relative_position_bias = None
688
+
689
+ # stochastic depth decay rule
690
+ dpr = list(tf.linspace(0.0, config.drop_path_rate, config.num_hidden_layers))
691
+ self.layer = [
692
+ TFData2VecVisionLayer(
693
+ config,
694
+ window_size=window_size if config.use_relative_position_bias else None,
695
+ drop_path_rate=dpr[i],
696
+ name=f"layer_._{i}",
697
+ )
698
+ for i in range(config.num_hidden_layers)
699
+ ]
700
+
701
+ def call(
702
+ self,
703
+ hidden_states: tf.Tensor,
704
+ head_mask: tf.Tensor | None = None,
705
+ output_attentions: bool = False,
706
+ output_hidden_states: bool = False,
707
+ return_dict: bool = True,
708
+ ) -> Union[tuple, TFBaseModelOutput]:
709
+ all_hidden_states = () if output_hidden_states else None
710
+ all_self_attentions = () if output_attentions else None
711
+
712
+ for i, layer_module in enumerate(self.layer):
713
+ if output_hidden_states:
714
+ all_hidden_states = all_hidden_states + (hidden_states,)
715
+
716
+ layer_head_mask = head_mask[i] if head_mask is not None else None
717
+ # Passing `0.0` to the `relative_position_bias()` layer because otherwise Keras
718
+ # might complain about `Layer.call()` not being invoked properly. In this case this input
719
+ # i.e., 0.0 is not going to be used in any calculations so we're safe.
720
+ relative_position_bias = (
721
+ self.relative_position_bias(0.0) if self.relative_position_bias is not None else None
722
+ )
723
+ layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions, relative_position_bias)
724
+
725
+ hidden_states = layer_outputs[0]
726
+
727
+ if output_attentions:
728
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
729
+
730
+ if output_hidden_states:
731
+ all_hidden_states = all_hidden_states + (hidden_states,)
732
+
733
+ if not return_dict:
734
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
735
+
736
+ return TFBaseModelOutput(
737
+ last_hidden_state=hidden_states,
738
+ hidden_states=all_hidden_states,
739
+ attentions=all_self_attentions,
740
+ )
741
+
742
+ def build(self, input_shape=None):
743
+ if self.built:
744
+ return
745
+ self.built = True
746
+ if getattr(self, "relative_position_bias", None) is not None:
747
+ with tf.name_scope(self.relative_position_bias.name):
748
+ self.relative_position_bias.build(None)
749
+ if getattr(self, "layer", None) is not None:
750
+ for layer in self.layer:
751
+ with tf.name_scope(layer.name):
752
+ layer.build(None)
753
+
754
+
755
+ @keras_serializable
756
+ class TFData2VecVisionMainLayer(tf.keras.layers.Layer):
757
+ config_class = Data2VecVisionConfig
758
+
759
+ def __init__(self, config: Data2VecVisionConfig, add_pooling_layer: bool = True, **kwargs):
760
+ super().__init__(**kwargs)
761
+
762
+ self.config = config
763
+ self.add_pooling_layer = add_pooling_layer
764
+
765
+ self.embeddings = TFData2VecVisionEmbeddings(config, name="embeddings")
766
+ self.encoder = TFData2VecVisionEncoder(
767
+ config, window_size=self.embeddings.patch_embeddings.patch_shape, name="encoder"
768
+ )
769
+ self.layernorm = (
770
+ tf.identity
771
+ if config.use_mean_pooling
772
+ else tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm")
773
+ )
774
+
775
+ # We are setting the `data_format` like so because from here on we will revert to the
776
+ # NCHW output format
777
+ self.pooler = TFData2VecVisionPooler(config, name="pooler") if add_pooling_layer else None
778
+
779
+ def get_input_embeddings(self) -> tf.keras.layers.Layer:
780
+ return self.embeddings.patch_embeddings
781
+
782
+ def _prune_heads(self, heads_to_prune):
783
+ """
784
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
785
+ class PreTrainedModel
786
+ """
787
+ raise NotImplementedError
788
+
789
+ @unpack_inputs
790
+ def call(
791
+ self,
792
+ pixel_values: tf.Tensor | None = None,
793
+ bool_masked_pos: tf.Tensor | None = None,
794
+ head_mask: tf.Tensor | None = None,
795
+ output_attentions: Optional[bool] = None,
796
+ output_hidden_states: Optional[bool] = None,
797
+ return_dict: Optional[bool] = None,
798
+ training: bool = False,
799
+ ) -> Union[tuple, TFData2VecVisionModelOutputWithPooling]:
800
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
801
+ output_hidden_states = (
802
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
803
+ )
804
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
805
+
806
+ if pixel_values is None:
807
+ raise ValueError("You have to specify pixel_values")
808
+
809
+ # Prepare head mask if needed
810
+ # 1.0 in head_mask indicate we keep the head
811
+ # attention_probs has shape bsz x n_heads x N x N
812
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
813
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
814
+ if head_mask is not None:
815
+ raise NotImplementedError
816
+ else:
817
+ head_mask = [None] * self.config.num_hidden_layers
818
+
819
+ embedding_output = self.embeddings(pixel_values, bool_masked_pos, training=training)
820
+
821
+ encoder_outputs = self.encoder(
822
+ embedding_output,
823
+ head_mask=head_mask,
824
+ output_attentions=output_attentions,
825
+ output_hidden_states=output_hidden_states,
826
+ return_dict=return_dict,
827
+ training=training,
828
+ )
829
+
830
+ sequence_output = encoder_outputs[0]
831
+ sequence_output = self.layernorm(sequence_output)
832
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
833
+
834
+ if not return_dict:
835
+ head_outputs = (sequence_output, pooled_output) if pooled_output is not None else (sequence_output,)
836
+ return head_outputs + encoder_outputs[1:]
837
+
838
+ return TFData2VecVisionModelOutputWithPooling(
839
+ last_hidden_state=sequence_output,
840
+ pooler_output=pooled_output,
841
+ hidden_states=encoder_outputs.hidden_states,
842
+ attentions=encoder_outputs.attentions,
843
+ )
844
+
845
+ def build(self, input_shape=None):
846
+ if self.built:
847
+ return
848
+ self.built = True
849
+ if getattr(self, "embeddings", None) is not None:
850
+ with tf.name_scope(self.embeddings.name):
851
+ self.embeddings.build(None)
852
+ if getattr(self, "encoder", None) is not None:
853
+ with tf.name_scope(self.encoder.name):
854
+ self.encoder.build(None)
855
+ if getattr(self, "layernorm", None) is not None:
856
+ if hasattr(self.layernorm, "name"):
857
+ with tf.name_scope(self.layernorm.name):
858
+ self.layernorm.build((None, self.config.hidden_size))
859
+ if getattr(self, "pooler", None) is not None:
860
+ with tf.name_scope(self.pooler.name):
861
+ self.pooler.build(None)
862
+
863
+
864
+ class TFData2VecVisionPooler(tf.keras.layers.Layer):
865
+ def __init__(self, config: Data2VecVisionConfig, **kwargs):
866
+ super().__init__(**kwargs)
867
+ self.layernorm = (
868
+ tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm")
869
+ if config.use_mean_pooling
870
+ else None
871
+ )
872
+ self.config = config
873
+
874
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
875
+ if self.layernorm is not None:
876
+ # Mean pool the final hidden states of the patch tokens
877
+ patch_tokens = hidden_states[:, 1:, :]
878
+ pooled_output = self.layernorm(tf.reduce_mean(patch_tokens, axis=1))
879
+ else:
880
+ # Pool by simply taking the final hidden state of the [CLS] token
881
+ pooled_output = hidden_states[:, 0]
882
+
883
+ return pooled_output
884
+
885
+ def build(self, input_shape=None):
886
+ if self.built:
887
+ return
888
+ self.built = True
889
+ if getattr(self, "layernorm", None) is not None:
890
+ if hasattr(self.layernorm, "name"):
891
+ with tf.name_scope(self.layernorm.name):
892
+ self.layernorm.build((None, self.config.hidden_size))
893
+
894
+
895
+ class TFData2VecVisionPreTrainedModel(TFPreTrainedModel):
896
+ """
897
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
898
+ models.
899
+ """
900
+
901
+ config_class = Data2VecVisionConfig
902
+ base_model_prefix = "data2vec_vision"
903
+ main_input_name = "pixel_values"
904
+ _keys_to_ignore_on_load_unexpected = [r"relative_position_index"]
905
+
906
+
907
+ DATA2VEC_VISION_START_DOCSTRING = r"""
908
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
909
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
910
+ etc.).
911
+
912
+ This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
913
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
914
+ behavior.
915
+
916
+ <Tip>
917
+
918
+ TensorFlow models and layers in `transformers` accept two formats as input:
919
+
920
+ - having all inputs as keyword arguments (like PyTorch models), or
921
+ - having all inputs as a list, tuple or dict in the first positional argument.
922
+
923
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
924
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
925
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
926
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
927
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
928
+ positional argument:
929
+
930
+ - a single Tensor with `pixel_values` only and nothing else: `model(pixel_values)`
931
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
932
+ `model([pixel_values, attention_mask])` or `model([pixel_values, attention_mask, token_type_ids])`
933
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
934
+ `model({"pixel_values": pixel_values, "token_type_ids": token_type_ids})`
935
+
936
+ Note that when creating models and layers with
937
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
938
+ about any of this, as you can just pass inputs like you would to any other Python function!
939
+
940
+ </Tip>
941
+
942
+ Args:
943
+ config ([`Data2VecVisionConfig`]): Model configuration class with all the parameters of the model.
944
+ Initializing with a config file does not load the weights associated with the model, only the
945
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
946
+ """
947
+
948
+ DATA2VEC_VISION_INPUTS_DOCSTRING = r"""
949
+ Args:
950
+ pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` `Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):
951
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
952
+ [`BeitImageProcessor.__call__`] for details.
953
+
954
+ head_mask (`np.ndarray` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
955
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
956
+ - 1 indicates the head is **not masked**,
957
+ - 0 indicates the head is **masked**.
958
+
959
+ output_attentions (`bool`, *optional*):
960
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
961
+ tensors for more detail.
962
+
963
+ output_hidden_states (`bool`, *optional*):
964
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
965
+ more detail.
966
+
967
+ return_dict (`bool`, *optional*):
968
+ Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. This argument can be used
969
+ in eager mode, in graph mode the value will always be set to True.
970
+
971
+ training (`bool`, *optional*, defaults to `False``):
972
+ Whether or not to use the model in training mode (some modules like dropout modules have different
973
+ behaviors between training and evaluation).
974
+ """
975
+
976
+
977
+ @add_start_docstrings(
978
+ "The bare Data2VecVision Model transformer outputting raw hidden-states without any specific head on top.",
979
+ DATA2VEC_VISION_START_DOCSTRING,
980
+ )
981
+ class TFData2VecVisionModel(TFData2VecVisionPreTrainedModel):
982
+ def __init__(self, config: Data2VecVisionConfig, add_pooling_layer: bool = False, *inputs, **kwargs):
983
+ super().__init__(config, *inputs, **kwargs)
984
+ self.config = config
985
+
986
+ self.data2vec_vision = TFData2VecVisionMainLayer(
987
+ config, add_pooling_layer=add_pooling_layer, name="data2vec_vision"
988
+ )
989
+
990
+ def get_input_embeddings(self):
991
+ return self.data2vec_vision.get_input_embeddings()
992
+
993
+ @unpack_inputs
994
+ @add_start_docstrings_to_model_forward(DATA2VEC_VISION_INPUTS_DOCSTRING)
995
+ @add_code_sample_docstrings(
996
+ checkpoint=_CHECKPOINT_FOR_DOC,
997
+ output_type=TFData2VecVisionModelOutputWithPooling,
998
+ config_class=_CONFIG_FOR_DOC,
999
+ modality="vision",
1000
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
1001
+ )
1002
+ def call(
1003
+ self,
1004
+ pixel_values: TFModelInputType | None = None,
1005
+ bool_masked_pos: tf.Tensor | None = None,
1006
+ head_mask: np.ndarray | tf.Tensor | None = None,
1007
+ output_attentions: Optional[bool] = None,
1008
+ output_hidden_states: Optional[bool] = None,
1009
+ return_dict: Optional[bool] = None,
1010
+ training: bool = False,
1011
+ ) -> Union[tuple, TFData2VecVisionModelOutputWithPooling]:
1012
+ r"""
1013
+ bool_masked_pos (`tf.Tensor` of shape `(batch_size, num_patches)`, *optional*):
1014
+ Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
1015
+ """
1016
+ outputs = self.data2vec_vision(
1017
+ pixel_values=pixel_values,
1018
+ bool_masked_pos=bool_masked_pos,
1019
+ head_mask=head_mask,
1020
+ output_attentions=output_attentions,
1021
+ output_hidden_states=output_hidden_states,
1022
+ return_dict=return_dict,
1023
+ training=training,
1024
+ )
1025
+
1026
+ return outputs
1027
+
1028
+ def build(self, input_shape=None):
1029
+ if self.built:
1030
+ return
1031
+ self.built = True
1032
+ if getattr(self, "data2vec_vision", None) is not None:
1033
+ with tf.name_scope(self.data2vec_vision.name):
1034
+ self.data2vec_vision.build(None)
1035
+
1036
+
1037
+ @add_start_docstrings(
1038
+ """
1039
+ Data2VecVision Model transformer with an image classification head on top (a linear layer on top of the average of
1040
+ the final hidden states of the patch tokens) e.g. for ImageNet.
1041
+ """,
1042
+ DATA2VEC_VISION_START_DOCSTRING,
1043
+ )
1044
+ class TFData2VecVisionForImageClassification(TFData2VecVisionPreTrainedModel, TFSequenceClassificationLoss):
1045
+ def __init__(self, config: Data2VecVisionConfig, *inputs, **kwargs):
1046
+ super().__init__(config, *inputs, **kwargs)
1047
+
1048
+ self.num_labels = config.num_labels
1049
+ self.data2vec_vision = TFData2VecVisionMainLayer(config, add_pooling_layer=True, name="data2vec_vision")
1050
+
1051
+ # Classifier head
1052
+ self.classifier = tf.keras.layers.Dense(
1053
+ units=config.num_labels,
1054
+ kernel_initializer=get_initializer(config.initializer_range),
1055
+ name="classifier",
1056
+ )
1057
+ self.config = config
1058
+
1059
+ @unpack_inputs
1060
+ @add_start_docstrings_to_model_forward(DATA2VEC_VISION_INPUTS_DOCSTRING)
1061
+ @add_code_sample_docstrings(
1062
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
1063
+ output_type=TFSequenceClassifierOutput,
1064
+ config_class=_CONFIG_FOR_DOC,
1065
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
1066
+ )
1067
+ def call(
1068
+ self,
1069
+ pixel_values: TFModelInputType | None = None,
1070
+ head_mask: np.ndarray | tf.Tensor | None = None,
1071
+ output_attentions: Optional[bool] = None,
1072
+ output_hidden_states: Optional[bool] = None,
1073
+ return_dict: Optional[bool] = None,
1074
+ labels: np.ndarray | tf.Tensor | None = None,
1075
+ training: Optional[bool] = False,
1076
+ ) -> Union[TFSequenceClassifierOutput, tuple]:
1077
+ r"""
1078
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
1079
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
1080
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1081
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1082
+ """
1083
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1084
+
1085
+ outputs = self.data2vec_vision(
1086
+ pixel_values=pixel_values,
1087
+ head_mask=head_mask,
1088
+ output_attentions=output_attentions,
1089
+ output_hidden_states=output_hidden_states,
1090
+ return_dict=return_dict,
1091
+ training=training,
1092
+ )
1093
+
1094
+ pooled_output = outputs.pooler_output if return_dict else outputs[1]
1095
+ logits = self.classifier(pooled_output)
1096
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
1097
+
1098
+ if not return_dict:
1099
+ output = (logits,) + outputs[2:]
1100
+ return ((loss,) + output) if loss is not None else output
1101
+
1102
+ return TFSequenceClassifierOutput(
1103
+ loss=loss,
1104
+ logits=logits,
1105
+ hidden_states=outputs.hidden_states,
1106
+ attentions=outputs.attentions,
1107
+ )
1108
+
1109
+ def build(self, input_shape=None):
1110
+ if self.built:
1111
+ return
1112
+ self.built = True
1113
+ if getattr(self, "data2vec_vision", None) is not None:
1114
+ with tf.name_scope(self.data2vec_vision.name):
1115
+ self.data2vec_vision.build(None)
1116
+ if getattr(self, "classifier", None) is not None:
1117
+ with tf.name_scope(self.classifier.name):
1118
+ self.classifier.build([None, None, self.config.hidden_size])
1119
+
1120
+
1121
+ class TFData2VecVisionConvModule(tf.keras.layers.Layer):
1122
+ """
1123
+ A convolutional block that bundles conv/norm/activation layers. This block simplifies the usage of convolution
1124
+ layers, which are commonly used with a norm layer (e.g., BatchNorm) and activation layer (e.g., ReLU).
1125
+
1126
+ Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
1127
+ """
1128
+
1129
+ def __init__(
1130
+ self,
1131
+ in_channels: int,
1132
+ out_channels: int,
1133
+ kernel_size: Union[int, Tuple[int, int]],
1134
+ padding: str = "valid",
1135
+ bias: bool = False,
1136
+ dilation: Union[int, Tuple[int, int]] = 1,
1137
+ **kwargs,
1138
+ ) -> None:
1139
+ super().__init__(**kwargs)
1140
+ self.conv = tf.keras.layers.Conv2D(
1141
+ filters=out_channels,
1142
+ kernel_size=kernel_size,
1143
+ padding=padding,
1144
+ use_bias=bias,
1145
+ dilation_rate=dilation,
1146
+ name="conv",
1147
+ )
1148
+ self.bn = tf.keras.layers.BatchNormalization(name="bn", momentum=0.9, epsilon=1e-5)
1149
+ self.activation = tf.nn.relu
1150
+ self.in_channels = in_channels
1151
+ self.out_channels = out_channels
1152
+
1153
+ def call(self, input: tf.Tensor) -> tf.Tensor:
1154
+ output = self.conv(input)
1155
+ output = self.bn(output)
1156
+ output = self.activation(output)
1157
+ return output
1158
+
1159
+ def build(self, input_shape=None):
1160
+ if self.built:
1161
+ return
1162
+ self.built = True
1163
+ if getattr(self, "conv", None) is not None:
1164
+ with tf.name_scope(self.conv.name):
1165
+ self.conv.build([None, None, None, self.in_channels])
1166
+ if getattr(self, "bn", None) is not None:
1167
+ with tf.name_scope(self.bn.name):
1168
+ self.bn.build((None, None, None, self.out_channels))
1169
+
1170
+
1171
+ class TFAdaptiveAvgPool2D(tf.keras.layers.Layer):
1172
+ def __init__(self, output_dims: Tuple[int, int], input_ordering: str = "NHWC", **kwargs):
1173
+ super().__init__(**kwargs)
1174
+ self.output_dims = output_dims
1175
+ self.input_ordering = input_ordering
1176
+ if input_ordering not in ("NCHW", "NHWC"):
1177
+ raise ValueError("Unrecognized input_ordering, should be 'NCHW' or 'NHWC'!")
1178
+ self.h_axis = input_ordering.index("H")
1179
+ self.w_axis = input_ordering.index("W")
1180
+
1181
+ def pseudo_1d_pool(self, inputs: tf.Tensor, h_pooling: bool):
1182
+ # Figure out which axis we're pooling on
1183
+ if h_pooling:
1184
+ axis = self.h_axis
1185
+ output_dim = self.output_dims[0]
1186
+ else:
1187
+ axis = self.w_axis
1188
+ output_dim = self.output_dims[1]
1189
+ input_dim = inputs.shape[axis]
1190
+
1191
+ # Figure out the potential pooling windows
1192
+ # This is the key idea - the torch op always uses only two
1193
+ # consecutive pooling window sizes, like 3 and 4. Therefore,
1194
+ # if we pool with both possible sizes, we simply need to gather
1195
+ # the 'correct' pool at each position to reimplement the torch op.
1196
+ small_window = math.ceil(input_dim / output_dim)
1197
+ big_window = small_window + 1
1198
+ if h_pooling:
1199
+ output_dim = self.output_dims[0]
1200
+ small_window_shape = (small_window, 1)
1201
+ big_window_shape = (big_window, 1)
1202
+ else:
1203
+ output_dim = self.output_dims[1]
1204
+ small_window_shape = (1, small_window)
1205
+ big_window_shape = (1, big_window)
1206
+
1207
+ # For resizes to 1, or integer resizes, we can take quick shortcuts
1208
+ if output_dim == input_dim:
1209
+ return inputs
1210
+ elif output_dim == 1:
1211
+ return tf.reduce_mean(inputs, axis=axis, keepdims=True)
1212
+ elif input_dim % output_dim == 0:
1213
+ return tf.nn.avg_pool2d(
1214
+ inputs,
1215
+ ksize=small_window_shape,
1216
+ strides=small_window_shape,
1217
+ padding="VALID",
1218
+ data_format=self.input_ordering,
1219
+ )
1220
+ # When upscaling by an integer factor we can also take a quick shortcut
1221
+ elif output_dim > input_dim and output_dim % input_dim == 0:
1222
+ return tf.repeat(inputs, repeats=output_dim // input_dim, axis=axis)
1223
+
1224
+ # For non-integer resizes, we pool with both possible window sizes and concatenate them
1225
+ if output_dim < input_dim:
1226
+ small_pool = tf.nn.avg_pool2d(
1227
+ inputs, ksize=small_window_shape, strides=1, padding="VALID", data_format=self.input_ordering
1228
+ )
1229
+ big_pool = tf.nn.avg_pool2d(
1230
+ inputs, ksize=big_window_shape, strides=1, padding="VALID", data_format=self.input_ordering
1231
+ )
1232
+ both_pool = tf.concat([small_pool, big_pool], axis=axis)
1233
+ else:
1234
+ # When we're actually upscaling instead, then we build the pools a bit differently
1235
+ small_pool = inputs
1236
+ big_pool = tf.nn.avg_pool2d(
1237
+ inputs, ksize=big_window_shape, strides=1, padding="VALID", data_format=self.input_ordering
1238
+ )
1239
+ both_pool = tf.concat([small_pool, big_pool], axis=axis)
1240
+
1241
+ # We compute vectors of the start and end positions for each pooling window
1242
+ # Each (start, end) pair here corresponds to a single output position
1243
+ window_starts = tf.math.floor((tf.range(output_dim, dtype=tf.float32) * input_dim) / output_dim)
1244
+ window_starts = tf.cast(window_starts, tf.int64)
1245
+ window_ends = tf.math.ceil((tf.range(1, output_dim + 1, dtype=tf.float32) * input_dim) / output_dim)
1246
+ window_ends = tf.cast(window_ends, tf.int64)
1247
+
1248
+ # pool_selector is a boolean array of shape (output_dim,) where 1 indicates that output position
1249
+ # has a big receptive field and 0 indicates that that output position has a small receptive field
1250
+ pool_selector = tf.cast(window_ends - window_starts - small_window, tf.bool)
1251
+
1252
+ # Since we concatenated the small and big pools, we need to do a bit of
1253
+ # pointer arithmetic to get the indices of the big pools
1254
+ small_indices = window_starts
1255
+ big_indices = window_starts + small_pool.shape[axis]
1256
+
1257
+ # Finally, we use the pool_selector to generate a list of indices, one per output position
1258
+ gather_indices = tf.where(pool_selector, big_indices, small_indices)
1259
+
1260
+ # Gathering from those indices yields the final, correct pooling
1261
+ return tf.gather(both_pool, gather_indices, axis=axis)
1262
+
1263
+ def call(self, inputs: tf.Tensor):
1264
+ if self.input_ordering == "NHWC":
1265
+ input_shape = inputs.shape[1:3]
1266
+ else:
1267
+ input_shape = inputs.shape[2:]
1268
+
1269
+ # We break the task down into each possible case
1270
+ # Firstly, if we're resizing down to 1, it's just tf.reduce_mean
1271
+ if self.output_dims[0] == self.output_dims[1] == 1:
1272
+ if self.input_ordering == "NHWC":
1273
+ reduce_dims = [1, 2]
1274
+ else:
1275
+ reduce_dims = [2, 3]
1276
+ return tf.reduce_mean(inputs, axis=reduce_dims, keepdims=True)
1277
+ # Secondly, if we're resizing by an integer factor on both dimensions, we can take a quick shortcut
1278
+ elif input_shape[0] % self.output_dims[0] == 0 and input_shape[1] % self.output_dims[1] == 0:
1279
+ h_resize = int(input_shape[0] // self.output_dims[0])
1280
+ w_resize = int(input_shape[1] // self.output_dims[1])
1281
+ return tf.nn.avg_pool2d(
1282
+ inputs,
1283
+ ksize=(h_resize, w_resize),
1284
+ strides=(h_resize, w_resize),
1285
+ padding="VALID",
1286
+ data_format=self.input_ordering,
1287
+ )
1288
+ else:
1289
+ # Finally, if we can't take the shortcut, we do a 1D pool on each axis. pseudo_1d_pool will take a shortcut
1290
+ # for dimensions where an integer resize is possible. It can also handle upscaling.
1291
+ h_pooled = self.pseudo_1d_pool(inputs, h_pooling=True)
1292
+ return self.pseudo_1d_pool(h_pooled, h_pooling=False)
1293
+
1294
+
1295
+ class TFData2VecVisionPyramidPoolingModule(tf.keras.layers.Layer):
1296
+ """
1297
+ Pyramid Pooling Module (PPM) used in PSPNet.
1298
+
1299
+ Args:
1300
+ pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
1301
+ Module.
1302
+ channels (int): Channels after modules, before conv_seg.
1303
+
1304
+ Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
1305
+ """
1306
+
1307
+ def __init__(self, pool_scales: Tuple[int, ...], in_channels: int, out_channels: int, **kwargs) -> None:
1308
+ super().__init__(**kwargs)
1309
+ self.pool_scales = pool_scales
1310
+ self.in_channels = in_channels
1311
+ self.out_channels = out_channels
1312
+
1313
+ self.layer_list = []
1314
+ for idx, pool_scale in enumerate(pool_scales):
1315
+ pool_scale = pool_scale if isinstance(pool_scale, collections.abc.Iterable) else (pool_scale, pool_scale)
1316
+ self.layer_list.append(
1317
+ [
1318
+ TFAdaptiveAvgPool2D(output_dims=pool_scale),
1319
+ TFData2VecVisionConvModule(
1320
+ in_channels=in_channels, out_channels=self.out_channels, kernel_size=1, name=f"{idx}.1"
1321
+ ),
1322
+ ]
1323
+ )
1324
+
1325
+ def call(self, x: tf.Tensor) -> List[tf.Tensor]:
1326
+ ppm_outs = []
1327
+ inputs = x
1328
+
1329
+ for ppm in self.layer_list:
1330
+ for layer_module in ppm:
1331
+ ppm_out = layer_module(x)
1332
+ x = ppm_out
1333
+
1334
+ upsampled_ppm_out = tf.image.resize(ppm_out, size=shape_list(inputs)[1:-1], method="bilinear")
1335
+ ppm_outs.append(upsampled_ppm_out)
1336
+ return ppm_outs
1337
+
1338
+ def build(self, input_shape=None):
1339
+ for layer in self.layer_list:
1340
+ for layer_module in layer:
1341
+ with tf.name_scope(layer_module.name):
1342
+ layer_module.build(None)
1343
+
1344
+
1345
+ class TFData2VecVisionUperHead(tf.keras.layers.Layer):
1346
+ """
1347
+ Unified Perceptual Parsing for Scene Understanding. This head is the implementation of
1348
+ [UPerNet](https://arxiv.org/abs/1807.10221).
1349
+
1350
+ Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
1351
+ """
1352
+
1353
+ def __init__(self, config: Data2VecVisionConfig, **kwargs) -> None:
1354
+ super().__init__(**kwargs)
1355
+
1356
+ self.pool_scales = config.pool_scales # e.g. (1, 2, 3, 6)
1357
+ self.in_channels = [config.hidden_size] * 4 # e.g. [768, 768, 768, 768]
1358
+ self.channels = config.hidden_size
1359
+ self.classifier = tf.keras.layers.Conv2D(config.num_labels, kernel_size=1, name="classifier")
1360
+
1361
+ # PSP Module
1362
+ self.psp_modules = TFData2VecVisionPyramidPoolingModule(
1363
+ self.pool_scales, self.in_channels[-1], self.channels, name="psp_modules"
1364
+ )
1365
+ self.bottleneck = TFData2VecVisionConvModule(
1366
+ self.in_channels[-1] + len(self.pool_scales) * self.channels,
1367
+ self.channels,
1368
+ kernel_size=3,
1369
+ padding="same",
1370
+ name="bottleneck",
1371
+ )
1372
+ # FPN Module
1373
+ self.lateral_convs = []
1374
+ self.fpn_convs = []
1375
+ for idx, in_channels in enumerate(self.in_channels[:-1]): # skip the top layer
1376
+ l_conv = TFData2VecVisionConvModule(
1377
+ in_channels, out_channels=self.channels, kernel_size=1, name=f"lateral_convs.{idx}"
1378
+ )
1379
+ fpn_conv = TFData2VecVisionConvModule(
1380
+ in_channels=self.channels,
1381
+ out_channels=self.channels,
1382
+ kernel_size=3,
1383
+ padding="same",
1384
+ name=f"fpn_convs.{idx}",
1385
+ )
1386
+ self.lateral_convs.append(l_conv)
1387
+ self.fpn_convs.append(fpn_conv)
1388
+
1389
+ self.fpn_bottleneck = TFData2VecVisionConvModule(
1390
+ in_channels=len(self.in_channels) * self.channels,
1391
+ out_channels=self.channels,
1392
+ kernel_size=3,
1393
+ padding="same",
1394
+ name="fpn_bottleneck",
1395
+ )
1396
+
1397
+ def psp_forward(self, inputs):
1398
+ x = inputs[-1]
1399
+ psp_outs = [x]
1400
+ psp_outs.extend(self.psp_modules(x))
1401
+ psp_outs = tf.concat(psp_outs, axis=-1)
1402
+ output = self.bottleneck(psp_outs)
1403
+
1404
+ return output
1405
+
1406
+ def call(self, encoder_hidden_states: tf.Tensor) -> tf.Tensor:
1407
+ # build laterals
1408
+ laterals = [lateral_conv(encoder_hidden_states[i]) for i, lateral_conv in enumerate(self.lateral_convs)]
1409
+
1410
+ laterals.append(self.psp_forward(encoder_hidden_states))
1411
+
1412
+ # build top-down path
1413
+ used_backbone_levels = len(laterals)
1414
+ for i in range(used_backbone_levels - 1, 0, -1):
1415
+ prev_shape = shape_list(laterals[i - 1])[1:-1]
1416
+ laterals[i - 1] = laterals[i - 1] + tf.image.resize(laterals[i], size=prev_shape, method="bilinear")
1417
+
1418
+ # build outputs
1419
+ fpn_outs = [self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels - 1)]
1420
+ # append psp feature
1421
+ fpn_outs.append(laterals[-1])
1422
+
1423
+ for i in range(used_backbone_levels - 1, 0, -1):
1424
+ fpn_outs[i] = tf.image.resize(fpn_outs[i], size=shape_list(fpn_outs[0])[1:-1], method="bilinear")
1425
+ fpn_outs = tf.concat(fpn_outs, axis=-1)
1426
+ output = self.fpn_bottleneck(fpn_outs)
1427
+ output = self.classifier(output)
1428
+
1429
+ return output
1430
+
1431
+ def build(self, input_shape=None):
1432
+ if self.built:
1433
+ return
1434
+ self.built = True
1435
+ if getattr(self, "classifier", None) is not None:
1436
+ with tf.name_scope(self.classifier.name):
1437
+ self.classifier.build([None, None, None, self.channels])
1438
+ if getattr(self, "psp_modules", None) is not None:
1439
+ with tf.name_scope(self.psp_modules.name):
1440
+ self.psp_modules.build(None)
1441
+ if getattr(self, "bottleneck", None) is not None:
1442
+ with tf.name_scope(self.bottleneck.name):
1443
+ self.bottleneck.build(None)
1444
+ if getattr(self, "fpn_bottleneck", None) is not None:
1445
+ with tf.name_scope(self.fpn_bottleneck.name):
1446
+ self.fpn_bottleneck.build(None)
1447
+ for layer in self.lateral_convs:
1448
+ with tf.name_scope(layer.name):
1449
+ layer.build(None)
1450
+ for layer in self.fpn_convs:
1451
+ with tf.name_scope(layer.name):
1452
+ layer.build(None)
1453
+
1454
+
1455
+ class TFData2VecVisionFCNHead(tf.keras.layers.Layer):
1456
+ """
1457
+ Fully Convolution Networks for Semantic Segmentation. This head is implemented from
1458
+ [FCNNet](https://arxiv.org/abs/1411.4038).
1459
+
1460
+ Args:
1461
+ config (Data2VecVisionConfig): Configuration.
1462
+ kernel_size (int): The kernel size for convs in the head. Default: 3.
1463
+ dilation (int): The dilation rate for convs in the head. Default: 1.
1464
+
1465
+
1466
+ Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
1467
+ """
1468
+
1469
+ def __init__(
1470
+ self,
1471
+ config: Data2VecVisionConfig,
1472
+ in_index: int = 2,
1473
+ kernel_size: int = 3,
1474
+ dilation: Union[int, Tuple[int, int]] = 1,
1475
+ **kwargs,
1476
+ ) -> None:
1477
+ super().__init__(**kwargs)
1478
+ self.in_channels = config.hidden_size
1479
+ self.channels = config.auxiliary_channels
1480
+ self.num_convs = config.auxiliary_num_convs
1481
+ self.concat_input = config.auxiliary_concat_input
1482
+ self.in_index = in_index
1483
+
1484
+ convs = []
1485
+ convs.append(
1486
+ TFData2VecVisionConvModule(
1487
+ in_channels=self.in_channels,
1488
+ out_channels=self.channels,
1489
+ kernel_size=kernel_size,
1490
+ padding="same",
1491
+ dilation=dilation,
1492
+ name="convs.0",
1493
+ )
1494
+ )
1495
+ for i in range(self.num_convs - 1):
1496
+ convs.append(
1497
+ TFData2VecVisionConvModule(
1498
+ in_channels=self.channels,
1499
+ out_channels=self.channels,
1500
+ kernel_size=kernel_size,
1501
+ padding="same",
1502
+ dilation=dilation,
1503
+ name=f"conv_module_{i+2}",
1504
+ )
1505
+ )
1506
+ if self.num_convs == 0:
1507
+ self.convs = [tf.identity]
1508
+ else:
1509
+ self.convs = convs
1510
+ if self.concat_input:
1511
+ self.conv_cat = TFData2VecVisionConvModule(
1512
+ self.in_channels + self.channels,
1513
+ out_channels=self.channels,
1514
+ kernel_size=kernel_size,
1515
+ padding="same",
1516
+ name="conv_cat",
1517
+ )
1518
+
1519
+ self.classifier = tf.keras.layers.Conv2D(config.num_labels, kernel_size=1, name="classifier")
1520
+
1521
+ def call(self, encoder_hidden_states: tf.Tensor) -> tf.Tensor:
1522
+ # just take the relevant feature maps
1523
+ hidden_states = encoder_hidden_states[self.in_index]
1524
+ output = hidden_states
1525
+ for layer_module in self.convs:
1526
+ output = layer_module(output)
1527
+ if self.concat_input:
1528
+ output = self.conv_cat(tf.concat([hidden_states, output], axis=-1))
1529
+ output = self.classifier(output)
1530
+ return output
1531
+
1532
+ def build(self, input_shape=None):
1533
+ if self.built:
1534
+ return
1535
+ self.built = True
1536
+ if getattr(self, "classifier", None) is not None:
1537
+ with tf.name_scope(self.classifier.name):
1538
+ self.classifier.build([None, None, None, self.channels])
1539
+ if getattr(self, "conv_cat", None) is not None:
1540
+ with tf.name_scope(self.conv_cat.name):
1541
+ self.conv_cat.build(None)
1542
+
1543
+
1544
+ @add_start_docstrings(
1545
+ """
1546
+ Data2VecVision Model transformer with a semantic segmentation head on top e.g. for ADE20k, CityScapes.
1547
+ """,
1548
+ DATA2VEC_VISION_START_DOCSTRING,
1549
+ )
1550
+ class TFData2VecVisionForSemanticSegmentation(TFData2VecVisionPreTrainedModel):
1551
+ def __init__(self, config: Data2VecVisionConfig, *inputs, **kwargs) -> None:
1552
+ super().__init__(config, *inputs, **kwargs)
1553
+ self.num_labels = config.num_labels
1554
+ self.data2vec_vision = TFData2VecVisionMainLayer(config, add_pooling_layer=False, name="data2vec_vision")
1555
+
1556
+ # FPNs
1557
+ self.fpn1 = [
1558
+ tf.keras.layers.Conv2DTranspose(config.hidden_size, kernel_size=2, strides=2, name="fpn1.0"),
1559
+ tf.keras.layers.BatchNormalization(name="fpn1.1", momentum=0.9, epsilon=1e-5),
1560
+ tf.keras.layers.Activation("gelu"),
1561
+ tf.keras.layers.Conv2DTranspose(config.hidden_size, kernel_size=2, strides=2, name="fpn1.3"),
1562
+ ]
1563
+ self.fpn2 = [tf.keras.layers.Conv2DTranspose(config.hidden_size, kernel_size=2, strides=2, name="fpn2.0")]
1564
+
1565
+ self.fpn3 = tf.identity
1566
+ self.fpn4 = tf.keras.layers.MaxPool2D(pool_size=2, strides=2)
1567
+
1568
+ # Semantic segmentation head(s)
1569
+ self.decode_head = TFData2VecVisionUperHead(config, name="decode_head")
1570
+ self.auxiliary_head = (
1571
+ TFData2VecVisionFCNHead(config, name="auxiliary_head") if config.use_auxiliary_head else None
1572
+ )
1573
+
1574
+ def compute_loss(self, logits, auxiliary_logits, labels):
1575
+ # upsample logits to the images' original size
1576
+ if len(shape_list(labels)) > 3:
1577
+ label_interp_shape = shape_list(labels)[1:-1]
1578
+ else:
1579
+ label_interp_shape = shape_list(labels)[-2:]
1580
+
1581
+ upsampled_logits = tf.image.resize(logits, size=label_interp_shape, method="bilinear")
1582
+ if auxiliary_logits is not None:
1583
+ upsampled_auxiliary_logits = tf.image.resize(auxiliary_logits, size=label_interp_shape, method="bilinear")
1584
+ # compute weighted loss
1585
+ loss_fct = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction="none")
1586
+
1587
+ # Copied from https://www.tensorflow.org/text/tutorials/transformer#loss_and_metrics.
1588
+ # Utility to mask the index to ignore during computing the loss.
1589
+ def masked_loss(real, pred):
1590
+ mask = tf.math.logical_not(tf.math.equal(real, self.config.semantic_loss_ignore_index))
1591
+ loss_ = loss_fct(real, pred)
1592
+ mask = tf.cast(mask, dtype=loss_.dtype)
1593
+ loss_ *= mask
1594
+ reduced_masked_loss = tf.reduce_sum(loss_) / tf.reduce_sum(mask)
1595
+ return tf.reshape(reduced_masked_loss, (1,))
1596
+
1597
+ main_loss = masked_loss(labels, upsampled_logits)
1598
+ auxiliary_loss = masked_loss(labels, upsampled_auxiliary_logits)
1599
+ loss = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
1600
+
1601
+ return loss
1602
+
1603
+ @unpack_inputs
1604
+ @add_start_docstrings_to_model_forward(DATA2VEC_VISION_INPUTS_DOCSTRING)
1605
+ @replace_return_docstrings(output_type=TFSemanticSegmenterOutput, config_class=_CONFIG_FOR_DOC)
1606
+ def call(
1607
+ self,
1608
+ pixel_values: tf.Tensor | None = None,
1609
+ head_mask: tf.Tensor | None = None,
1610
+ labels: tf.Tensor | None = None,
1611
+ output_attentions: Optional[bool] = None,
1612
+ output_hidden_states: Optional[bool] = None,
1613
+ return_dict: Optional[bool] = None,
1614
+ ) -> Union[tuple, TFSemanticSegmenterOutput]:
1615
+ r"""
1616
+ labels (`tf.Tensor` of shape `(batch_size, height, width)`, *optional*):
1617
+ Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ...,
1618
+ config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy).
1619
+
1620
+ Returns:
1621
+
1622
+ Examples:
1623
+
1624
+ ```python
1625
+ >>> from transformers import AutoImageProcessor, TFData2VecVisionForSemanticSegmentation
1626
+ >>> from PIL import Image
1627
+ >>> import requests
1628
+
1629
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1630
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1631
+
1632
+ >>> image_processor = AutoImageProcessor.from_pretrained("facebook/data2vec-vision-base")
1633
+ >>> model = TFData2VecVisionForSemanticSegmentation.from_pretrained("facebook/data2vec-vision-base")
1634
+
1635
+ >>> inputs = image_processor(images=image, return_tensors="pt")
1636
+ >>> outputs = model(**inputs)
1637
+ >>> # logits are of shape (batch_size, num_labels, height, width)
1638
+ >>> logits = outputs.logits
1639
+ ```"""
1640
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1641
+ output_hidden_states = (
1642
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1643
+ )
1644
+
1645
+ outputs = self.data2vec_vision(
1646
+ pixel_values,
1647
+ head_mask=head_mask,
1648
+ output_attentions=output_attentions,
1649
+ output_hidden_states=True, # we need the intermediate hidden states
1650
+ return_dict=return_dict,
1651
+ )
1652
+ encoder_hidden_states = outputs.hidden_states if return_dict else outputs[1]
1653
+
1654
+ # only keep certain features, and reshape
1655
+ # note that we do +1 as the encoder_hidden_states also includes the initial embeddings
1656
+ features = [feature for idx, feature in enumerate(encoder_hidden_states) if idx + 1 in self.config.out_indices]
1657
+ patch_resolution = self.config.image_size // self.config.patch_size
1658
+
1659
+ def reshape_features(x):
1660
+ # We do it this way so TF can always infer the non-batch dims at compile time
1661
+ x = tf.reshape(x, (-1, patch_resolution, patch_resolution, self.config.hidden_size))
1662
+ return x
1663
+
1664
+ features = [reshape_features(x[:, 1:, :]) for x in features]
1665
+
1666
+ # apply FPNs
1667
+ ops = [self.fpn1, self.fpn2, self.fpn3, self.fpn4]
1668
+ for module in ops[0]:
1669
+ features[0] = module(features[0])
1670
+ features[1] = ops[1][0](features[1])
1671
+ for i in range(len(features[2:])):
1672
+ features[i + 2] = ops[i + 2](features[i + 2])
1673
+
1674
+ logits = self.decode_head(features)
1675
+ # Tranpose the logits to maintain consistency in the output formats.
1676
+ transposed_logits = tf.transpose(logits, perm=[0, 3, 1, 2])
1677
+
1678
+ auxiliary_logits = None
1679
+ if self.auxiliary_head is not None:
1680
+ auxiliary_logits = self.auxiliary_head(features)
1681
+
1682
+ loss = None
1683
+ if labels is not None:
1684
+ if self.config.num_labels == 1:
1685
+ raise ValueError("The number of labels should be greater than one")
1686
+ else:
1687
+ loss = self.compute_loss(logits, auxiliary_logits, labels)
1688
+
1689
+ if not return_dict:
1690
+ if output_hidden_states:
1691
+ output = (logits,) + outputs[1:]
1692
+ else:
1693
+ output = (logits,) + outputs[2:]
1694
+ return ((loss,) + output) if loss is not None else output
1695
+
1696
+ return TFSemanticSegmenterOutput(
1697
+ loss=loss,
1698
+ logits=transposed_logits,
1699
+ hidden_states=outputs.hidden_states if output_hidden_states else None,
1700
+ attentions=outputs.attentions,
1701
+ )
1702
+
1703
+ def build(self, input_shape=None):
1704
+ if self.built:
1705
+ return
1706
+ self.built = True
1707
+ if getattr(self, "data2vec_vision", None) is not None:
1708
+ with tf.name_scope(self.data2vec_vision.name):
1709
+ self.data2vec_vision.build(None)
1710
+ if getattr(self, "decode_head", None) is not None:
1711
+ with tf.name_scope(self.decode_head.name):
1712
+ self.decode_head.build(None)
1713
+ if getattr(self, "auxiliary_head", None) is not None:
1714
+ with tf.name_scope(self.auxiliary_head.name):
1715
+ self.auxiliary_head.build(None)
1716
+ if getattr(self, "fpn1", None) is not None:
1717
+ with tf.name_scope(self.fpn1[0].name):
1718
+ self.fpn1[0].build([None, None, None, self.config.hidden_size])
1719
+ with tf.name_scope(self.fpn1[1].name):
1720
+ self.fpn1[1].build((None, None, None, self.config.hidden_size))
1721
+ with tf.name_scope(self.fpn1[3].name):
1722
+ self.fpn1[3].build([None, None, None, self.config.hidden_size])
1723
+ if getattr(self, "fpn2", None) is not None:
1724
+ with tf.name_scope(self.fpn2[0].name):
1725
+ self.fpn2[0].build([None, None, None, self.config.hidden_size])
evalkit_internvl/lib/python3.10/site-packages/transformers/models/pix2struct/__init__.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_pix2struct": [
21
+ "PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP",
22
+ "Pix2StructConfig",
23
+ "Pix2StructTextConfig",
24
+ "Pix2StructVisionConfig",
25
+ ],
26
+ "processing_pix2struct": ["Pix2StructProcessor"],
27
+ }
28
+
29
+ try:
30
+ if not is_vision_available():
31
+ raise OptionalDependencyNotAvailable()
32
+ except OptionalDependencyNotAvailable:
33
+ pass
34
+ else:
35
+ _import_structure["image_processing_pix2struct"] = ["Pix2StructImageProcessor"]
36
+
37
+
38
+ try:
39
+ if not is_torch_available():
40
+ raise OptionalDependencyNotAvailable()
41
+ except OptionalDependencyNotAvailable:
42
+ pass
43
+ else:
44
+ _import_structure["modeling_pix2struct"] = [
45
+ "PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST",
46
+ "Pix2StructPreTrainedModel",
47
+ "Pix2StructForConditionalGeneration",
48
+ "Pix2StructVisionModel",
49
+ "Pix2StructTextModel",
50
+ ]
51
+
52
+ if TYPE_CHECKING:
53
+ from .configuration_pix2struct import (
54
+ PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
55
+ Pix2StructConfig,
56
+ Pix2StructTextConfig,
57
+ Pix2StructVisionConfig,
58
+ )
59
+ from .processing_pix2struct import Pix2StructProcessor
60
+
61
+ try:
62
+ if not is_vision_available():
63
+ raise OptionalDependencyNotAvailable()
64
+ except OptionalDependencyNotAvailable:
65
+ pass
66
+ else:
67
+ from .image_processing_pix2struct import Pix2StructImageProcessor
68
+
69
+ try:
70
+ if not is_torch_available():
71
+ raise OptionalDependencyNotAvailable()
72
+ except OptionalDependencyNotAvailable:
73
+ pass
74
+ else:
75
+ from .modeling_pix2struct import (
76
+ PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
77
+ Pix2StructForConditionalGeneration,
78
+ Pix2StructPreTrainedModel,
79
+ Pix2StructTextModel,
80
+ Pix2StructVisionModel,
81
+ )
82
+
83
+ else:
84
+ import sys
85
+
86
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
evalkit_internvl/lib/python3.10/site-packages/transformers/models/pix2struct/__pycache__/configuration_pix2struct.cpython-310.pyc ADDED
Binary file (14.7 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/transformers/models/pix2struct/configuration_pix2struct.py ADDED
@@ -0,0 +1,390 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Pix2Struct model configuration"""
16
+
17
+ import os
18
+ from typing import Union
19
+
20
+ from ...configuration_utils import PretrainedConfig
21
+ from ...utils import logging
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+ PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
27
+ "google/pix2struct-textcaps-base": (
28
+ "https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
29
+ ),
30
+ }
31
+
32
+
33
+ class Pix2StructTextConfig(PretrainedConfig):
34
+ r"""
35
+ This is the configuration class to store the configuration of a [`Pix2StructTextModel`]. It is used to instantiate
36
+ a Pix2Struct text model according to the specified arguments, defining the model architecture. Instantiating a
37
+ configuration with the defaults will yield a similar configuration to that of the Pix2Struct text decoder used by
38
+ the [google/pix2struct-base](https://huggingface.co/google/pix2struct-base) architecture.
39
+
40
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
41
+ documentation from [`PretrainedConfig`] for more information.
42
+
43
+ Args:
44
+ vocab_size (`int`, *optional*, defaults to 50244):
45
+ Vocabulary size of the `Pix2Struct` text model. Defines the number of different tokens that can be
46
+ represented by the `inputs_ids` passed when calling [`Pix2StructTextModel`].
47
+ hidden_size (`int`, *optional*, defaults to 768):
48
+ Dimensionality of the encoder layers and the pooler layer.
49
+ d_kv (`int`, *optional*, defaults to 64):
50
+ Dimensionality of the key, query, value projections in each attention head.
51
+ d_ff (`int`, *optional*, defaults to 2048):
52
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
53
+ num_layers (`int`, *optional*, defaults to 12):
54
+ Number of hidden layers in the Transformer encoder.
55
+ num_heads (`int`, *optional*, defaults to 12):
56
+ Number of attention heads for each attention layer in the Transformer encoder.
57
+ relative_attention_num_buckets (`int`, *optional*, defaults to 32):
58
+ The number of buckets to use for each attention layer.
59
+ relative_attention_max_distance (`int`, *optional*, defaults to 128):
60
+ The maximum distance of the longer sequences for the bucket separation.
61
+ dropout_rate (`float`, *optional*, defaults to 0.1):
62
+ The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.
63
+ layer_norm_epsilon (`float`, *optional*, defaults to 1e-6):
64
+ The epsilon used by the layer normalization layers.
65
+ initializer_factor (`float`, *optional*, defaults to 1.0):
66
+ A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
67
+ testing).
68
+ dense_act_fn (`Union[Callable, str]`, *optional*, defaults to `"gelu_new"`):
69
+ The non-linear activation function (function or string).
70
+ decoder_start_token_id (`int`, *optional*, defaults to 0):
71
+ The id of the `decoder_start_token_id` token.
72
+ use_cache (`bool`, *optional*, defaults to `False`):
73
+ Whether or not the model should return the last key/values attentions (not used by all models).
74
+ pad_token_id (`int`, *optional*, defaults to 0):
75
+ The id of the `padding` token.
76
+ eos_token_id (`int`, *optional*, defaults to 1):
77
+ The id of the `end-of-sequence` token.
78
+
79
+ Example:
80
+
81
+ ```python
82
+ >>> from transformers import Pix2StructTextConfig, Pix2StructTextModel
83
+
84
+ >>> # Initializing a Pix2StructTextConfig with google/pix2struct-base style configuration
85
+ >>> configuration = Pix2StructTextConfig()
86
+
87
+ >>> # Initializing a Pix2StructTextModel (with random weights) from the google/pix2struct-base style configuration
88
+ >>> model = Pix2StructTextModel(configuration)
89
+
90
+ >>> # Accessing the model configuration
91
+ >>> configuration = model.config
92
+ ```"""
93
+
94
+ model_type = "pix2struct_text_model"
95
+ keys_to_ignore_at_inference = ["past_key_values"]
96
+ attribute_map = {
97
+ "hidden_size": "hidden_size",
98
+ "num_attention_heads": "num_heads",
99
+ "num_hidden_layers": "num_layers",
100
+ }
101
+
102
+ def __init__(
103
+ self,
104
+ vocab_size=50244,
105
+ hidden_size=768,
106
+ d_kv=64,
107
+ d_ff=2048,
108
+ num_layers=12,
109
+ num_heads=12,
110
+ relative_attention_num_buckets=32,
111
+ relative_attention_max_distance=128,
112
+ dropout_rate=0.1,
113
+ layer_norm_epsilon=1e-6,
114
+ initializer_factor=1.0,
115
+ dense_act_fn="gelu_new",
116
+ decoder_start_token_id=0,
117
+ use_cache=False,
118
+ pad_token_id=0,
119
+ eos_token_id=1,
120
+ tie_word_embeddings=False,
121
+ is_decoder=True,
122
+ **kwargs,
123
+ ):
124
+ self.vocab_size = vocab_size
125
+ self.hidden_size = hidden_size
126
+ self.d_kv = d_kv
127
+ self.d_ff = d_ff
128
+ self.num_layers = num_layers
129
+ self.num_heads = num_heads
130
+ self.relative_attention_num_buckets = relative_attention_num_buckets
131
+ self.relative_attention_max_distance = relative_attention_max_distance
132
+ self.dropout_rate = dropout_rate
133
+ self.layer_norm_epsilon = layer_norm_epsilon
134
+ self.initializer_factor = initializer_factor
135
+ self.use_cache = use_cache
136
+
137
+ self.eos_token_id = eos_token_id
138
+ self.decoder_start_token_id = decoder_start_token_id
139
+
140
+ # for backwards compatibility
141
+ self.dense_act_fn = dense_act_fn
142
+
143
+ super().__init__(
144
+ pad_token_id=pad_token_id,
145
+ eos_token_id=eos_token_id,
146
+ decoder_start_token_id=decoder_start_token_id,
147
+ tie_word_embeddings=tie_word_embeddings,
148
+ is_decoder=is_decoder,
149
+ **kwargs,
150
+ )
151
+
152
+ @classmethod
153
+ def from_pretrained(
154
+ cls, pretrainehidden_size_name_or_path: Union[str, os.PathLike], **kwargs
155
+ ) -> "PretrainedConfig":
156
+ cls._set_token_in_kwargs(kwargs)
157
+
158
+ config_dict, kwargs = cls.get_config_dict(pretrainehidden_size_name_or_path, **kwargs)
159
+
160
+ # get the text config dict if we are loading from Pix2StructConfig
161
+ if config_dict.get("model_type") == "pix2struct":
162
+ config_dict = config_dict["text_config"]
163
+
164
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
165
+ logger.warning(
166
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
167
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
168
+ )
169
+
170
+ return cls.from_dict(config_dict, **kwargs)
171
+
172
+
173
+ class Pix2StructVisionConfig(PretrainedConfig):
174
+ r"""
175
+ This is the configuration class to store the configuration of a [`Pix2StructVisionModel`]. It is used to
176
+ instantiate a Pix2Struct vision model according to the specified arguments, defining the model architecture.
177
+ Instantiating a configuration defaults will yield a similar configuration to that of the Pix2Struct-base
178
+ [google/pix2struct-base](https://huggingface.co/google/pix2struct-base) architecture.
179
+
180
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
181
+ documentation from [`PretrainedConfig`] for more information.
182
+
183
+ Args:
184
+ hidden_size (`int`, *optional*, defaults to 768):
185
+ Dimensionality of the encoder layers and the pooler layer.
186
+ patch_embed_hidden_size (`int`, *optional*, defaults to 768):
187
+ Dimensionality of the input patch_embedding layer in the Transformer encoder.
188
+ d_ff (`int`, *optional*, defaults to 2048):
189
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
190
+ d_kv (`int`, *optional*, defaults to 64):
191
+ Dimensionality of the key, query, value projections per attention head.
192
+ num_hidden_layers (`int`, *optional*, defaults to 12):
193
+ Number of hidden layers in the Transformer encoder.
194
+ num_attention_heads (`int`, *optional*, defaults to 12):
195
+ Number of attention heads for each attention layer in the Transformer encoder.
196
+ dense_act_fn (`str` or `function`, *optional*, defaults to `"gelu_new"`):
197
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
198
+ `"relu"`, `"selu"` and `"gelu_new"` ``"gelu"` are supported.
199
+ layer_norm_eps (`float`, *optional*, defaults to 1e-06):
200
+ The epsilon used by the layer normalization layers.
201
+ dropout_rate (`float`, *optional*, defaults to 0.0):
202
+ The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.
203
+ attention_dropout (`float`, *optional*, defaults to 0.0):
204
+ The dropout ratio for the attention probabilities.
205
+ initializer_range (`float`, *optional*, defaults to 1e-10):
206
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
207
+ initializer_factor (`float`, *optional*, defaults to 1.0):
208
+ A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
209
+ testing).
210
+ seq_len (`int`, *optional*, defaults to 4096):
211
+ Maximum sequence length (here number of patches) supported by the model.
212
+ relative_attention_num_buckets (`int`, *optional*, defaults to 32):
213
+ The number of buckets to use for each attention layer.
214
+ relative_attention_max_distance (`int`, *optional*, defaults to 128):
215
+ The maximum distance (in tokens) to use for each attention layer.
216
+
217
+ Example:
218
+
219
+ ```python
220
+ >>> from transformers import Pix2StructVisionConfig, Pix2StructVisionModel
221
+
222
+ >>> # Initializing a Pix2StructVisionConfig with google/pix2struct-base style configuration
223
+ >>> configuration = Pix2StructVisionConfig()
224
+
225
+ >>> # Initializing a Pix2StructVisionModel (with random weights) from the google/pix2struct-base style configuration
226
+ >>> model = Pix2StructVisionModel(configuration)
227
+
228
+ >>> # Accessing the model configuration
229
+ >>> configuration = model.config
230
+ ```"""
231
+
232
+ model_type = "pix2struct_vision_model"
233
+
234
+ def __init__(
235
+ self,
236
+ hidden_size=768,
237
+ patch_embed_hidden_size=768,
238
+ d_ff=2048,
239
+ d_kv=64,
240
+ num_hidden_layers=12,
241
+ num_attention_heads=12,
242
+ dense_act_fn="gelu_new",
243
+ layer_norm_eps=1e-6,
244
+ dropout_rate=0.0,
245
+ attention_dropout=0.0,
246
+ initializer_range=1e-10,
247
+ initializer_factor=1.0,
248
+ seq_len=4096,
249
+ relative_attention_num_buckets=32,
250
+ relative_attention_max_distance=128,
251
+ **kwargs,
252
+ ):
253
+ super().__init__(**kwargs)
254
+
255
+ self.hidden_size = hidden_size
256
+ self.patch_embed_hidden_size = patch_embed_hidden_size
257
+ self.d_ff = d_ff
258
+ self.dropout_rate = dropout_rate
259
+ self.num_hidden_layers = num_hidden_layers
260
+ self.num_attention_heads = num_attention_heads
261
+ self.initializer_range = initializer_range
262
+ self.initializer_factor = initializer_factor
263
+ self.attention_dropout = attention_dropout
264
+ self.layer_norm_eps = layer_norm_eps
265
+ self.dense_act_fn = dense_act_fn
266
+ self.seq_len = seq_len
267
+ self.relative_attention_num_buckets = relative_attention_num_buckets
268
+ self.relative_attention_max_distance = relative_attention_max_distance
269
+ self.d_kv = d_kv
270
+
271
+ @classmethod
272
+ def from_pretrained(
273
+ cls, pretrainehidden_size_name_or_path: Union[str, os.PathLike], **kwargs
274
+ ) -> "PretrainedConfig":
275
+ cls._set_token_in_kwargs(kwargs)
276
+
277
+ config_dict, kwargs = cls.get_config_dict(pretrainehidden_size_name_or_path, **kwargs)
278
+
279
+ # get the vision config dict if we are loading from Pix2StructConfig
280
+ if config_dict.get("model_type") == "pix2struct":
281
+ config_dict = config_dict["vision_config"]
282
+
283
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
284
+ logger.warning(
285
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
286
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
287
+ )
288
+
289
+ return cls.from_dict(config_dict, **kwargs)
290
+
291
+
292
+ class Pix2StructConfig(PretrainedConfig):
293
+ r"""
294
+ [`Pix2StructConfig`] is the configuration class to store the configuration of a
295
+ [`Pix2StructForConditionalGeneration`]. It is used to instantiate a Pix2Struct model according to the specified
296
+ arguments, defining the text model and vision model configs. Instantiating a configuration with the defaults will
297
+ yield a similar configuration to that of the Pix2Struct-base
298
+ [google/pix2struct-base](https://huggingface.co/google/pix2struct-base) architecture.
299
+
300
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
301
+ documentation from [`PretrainedConfig`] for more information.
302
+
303
+ Args:
304
+ text_config (`dict`, *optional*):
305
+ Dictionary of configuration options used to initialize [`Pix2StructTextConfig`].
306
+ vision_config (`dict`, *optional*):
307
+ Dictionary of configuration options used to initialize [`Pix2StructVisionConfig`].
308
+ initializer_factor (`float`, *optional*, defaults to 1.0):
309
+ Factor to multiply the initialization range with.
310
+ initializer_range (`float`, *optional*, defaults to 0.02):
311
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
312
+ is_vqa (`bool`, *optional*, defaults to `False`):
313
+ Whether the model has been fine-tuned for VQA or not.
314
+ kwargs (*optional*):
315
+ Dictionary of keyword arguments.
316
+
317
+ Example:
318
+
319
+ ```python
320
+ >>> from transformers import Pix2StructConfig, Pix2StructForConditionalGeneration
321
+
322
+ >>> # Initializing a Pix2StructConfig with google/pix2struct-base style configuration
323
+ >>> configuration = Pix2StructConfig()
324
+
325
+ >>> # Initializing a Pix2StructForConditionalGeneration (with random weights) from the google/pix2struct-base style configuration
326
+ >>> model = Pix2StructForConditionalGeneration(configuration)
327
+
328
+ >>> # Accessing the model configuration
329
+ >>> configuration = model.config
330
+
331
+ >>> # We can also initialize a Pix2StructConfig from a Pix2StructTextConfig and a Pix2StructVisionConfig
332
+
333
+ >>> # Initializing a Pix2Struct text and Pix2Struct vision configuration
334
+ >>> config_text = Pix2StructTextConfig()
335
+ >>> config_vision = Pix2StructVisionConfig()
336
+
337
+ >>> config = Pix2StructConfig.from_text_vision_configs(config_text, config_vision)
338
+ ```"""
339
+
340
+ model_type = "pix2struct"
341
+
342
+ def __init__(
343
+ self,
344
+ text_config=None,
345
+ vision_config=None,
346
+ initializer_factor=1.0,
347
+ initializer_range=0.02,
348
+ is_vqa=False,
349
+ tie_word_embeddings=False,
350
+ is_encoder_decoder=True,
351
+ **kwargs,
352
+ ):
353
+ super().__init__(tie_word_embeddings=tie_word_embeddings, is_encoder_decoder=is_encoder_decoder, **kwargs)
354
+
355
+ if text_config is None:
356
+ text_config = {}
357
+ logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values.")
358
+
359
+ if vision_config is None:
360
+ vision_config = {}
361
+ logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values.")
362
+
363
+ self.text_config = Pix2StructTextConfig(**text_config)
364
+ self.vision_config = Pix2StructVisionConfig(**vision_config)
365
+
366
+ self.decoder_start_token_id = self.text_config.decoder_start_token_id
367
+ self.pad_token_id = self.text_config.pad_token_id
368
+ self.eos_token_id = self.text_config.eos_token_id
369
+
370
+ self.initializer_factor = initializer_factor
371
+ self.initializer_range = initializer_range
372
+
373
+ self.text_config.initializer_range = self.initializer_range
374
+ self.vision_config.initializer_range = self.initializer_range
375
+
376
+ self.is_vqa = is_vqa
377
+
378
+ @classmethod
379
+ def from_text_vision_configs(
380
+ cls, text_config: Pix2StructTextConfig, vision_config: Pix2StructVisionConfig, **kwargs
381
+ ):
382
+ r"""
383
+ Instantiate a [`Pix2StructConfig`] (or a derived class) from pix2struct text model configuration and pix2struct
384
+ vision model configuration.
385
+
386
+ Returns:
387
+ [`Pix2StructConfig`]: An instance of a configuration object
388
+ """
389
+
390
+ return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)
evalkit_internvl/lib/python3.10/site-packages/transformers/models/pix2struct/convert_pix2struct_original_pytorch_to_hf.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ import argparse
16
+ import os
17
+ import re
18
+
19
+ import torch
20
+ from flax.traverse_util import flatten_dict
21
+ from t5x import checkpoints
22
+
23
+ from transformers import (
24
+ AutoTokenizer,
25
+ Pix2StructConfig,
26
+ Pix2StructForConditionalGeneration,
27
+ Pix2StructImageProcessor,
28
+ Pix2StructProcessor,
29
+ Pix2StructTextConfig,
30
+ Pix2StructVisionConfig,
31
+ )
32
+
33
+
34
+ def get_flax_param(t5x_checkpoint_path):
35
+ flax_params = checkpoints.load_t5x_checkpoint(t5x_checkpoint_path)
36
+ flax_params = flatten_dict(flax_params)
37
+ return flax_params
38
+
39
+
40
+ def rename_and_convert_flax_params(flax_dict):
41
+ converted_dict = {}
42
+
43
+ CONVERSION_MAPPING = {
44
+ "token_embedder": "embeddings",
45
+ "encoder_norm": "layernorm",
46
+ "kernel": "weight",
47
+ ".out": ".output",
48
+ "scale": "weight",
49
+ "embedders_0.pos_embedding": "row_embedder.weight",
50
+ "embedders_1.pos_embedding": "column_embedder.weight",
51
+ }
52
+
53
+ DECODER_CONVERSION_MAPPING = {
54
+ "query": "attention.query",
55
+ "key": "attention.key",
56
+ "value": "attention.value",
57
+ "output.dense": "output",
58
+ "encoder_decoder_attention.o": "encoder_decoder_attention.attention.o",
59
+ "pre_self_attention_layer_norm": "self_attention.layer_norm",
60
+ "pre_cross_attention_layer_norm": "encoder_decoder_attention.layer_norm",
61
+ "mlp.": "mlp.DenseReluDense.",
62
+ "pre_mlp_layer_norm": "mlp.layer_norm",
63
+ "self_attention.o": "self_attention.attention.o",
64
+ "decoder.embeddings.embedding": "decoder.embed_tokens.weight",
65
+ "decoder.relpos_bias.rel_embedding": "decoder.layer.0.self_attention.attention.relative_attention_bias.weight",
66
+ "decoder.decoder_norm.weight": "decoder.final_layer_norm.weight",
67
+ "decoder.logits_dense.weight": "decoder.lm_head.weight",
68
+ }
69
+
70
+ for key in flax_dict.keys():
71
+ if "target" in key:
72
+ # remove the first prefix from the key
73
+ new_key = ".".join(key[1:])
74
+
75
+ # rename the key
76
+ for old, new in CONVERSION_MAPPING.items():
77
+ new_key = new_key.replace(old, new)
78
+
79
+ if "decoder" in new_key:
80
+ for old, new in DECODER_CONVERSION_MAPPING.items():
81
+ new_key = new_key.replace(old, new)
82
+
83
+ if "layers" in new_key and "decoder" not in new_key:
84
+ # use regex to replace the layer number
85
+ new_key = re.sub(r"layers_(\d+)", r"layer.\1", new_key)
86
+ new_key = new_key.replace("encoder", "encoder.encoder")
87
+
88
+ elif "layers" in new_key and "decoder" in new_key:
89
+ # use regex to replace the layer number
90
+ new_key = re.sub(r"layers_(\d+)", r"layer.\1", new_key)
91
+
92
+ converted_dict[new_key] = flax_dict[key]
93
+
94
+ converted_torch_dict = {}
95
+ # convert converted_dict into torch format
96
+ for key in converted_dict.keys():
97
+ if ("embed_tokens" not in key) and ("embedder" not in key):
98
+ converted_torch_dict[key] = torch.from_numpy(converted_dict[key].T)
99
+ else:
100
+ converted_torch_dict[key] = torch.from_numpy(converted_dict[key])
101
+
102
+ return converted_torch_dict
103
+
104
+
105
+ def convert_pix2struct_original_pytorch_checkpoint_to_hf(
106
+ t5x_checkpoint_path, pytorch_dump_folder_path, use_large=False, is_vqa=False
107
+ ):
108
+ flax_params = get_flax_param(t5x_checkpoint_path)
109
+
110
+ if not use_large:
111
+ encoder_config = Pix2StructVisionConfig()
112
+ decoder_config = Pix2StructTextConfig()
113
+ else:
114
+ encoder_config = Pix2StructVisionConfig(
115
+ hidden_size=1536, d_ff=3968, num_attention_heads=24, num_hidden_layers=18
116
+ )
117
+ decoder_config = Pix2StructTextConfig(hidden_size=1536, d_ff=3968, num_heads=24, num_layers=18)
118
+ config = Pix2StructConfig(
119
+ vision_config=encoder_config.to_dict(), text_config=decoder_config.to_dict(), is_vqa=is_vqa
120
+ )
121
+
122
+ model = Pix2StructForConditionalGeneration(config)
123
+
124
+ torch_params = rename_and_convert_flax_params(flax_params)
125
+ model.load_state_dict(torch_params)
126
+
127
+ tok = AutoTokenizer.from_pretrained("ybelkada/test-pix2struct-tokenizer")
128
+ image_processor = Pix2StructImageProcessor()
129
+ processor = Pix2StructProcessor(image_processor=image_processor, tokenizer=tok)
130
+
131
+ if use_large:
132
+ processor.image_processor.max_patches = 4096
133
+
134
+ processor.image_processor.is_vqa = True
135
+
136
+ # mkdir if needed
137
+ os.makedirs(pytorch_dump_folder_path, exist_ok=True)
138
+
139
+ model.save_pretrained(pytorch_dump_folder_path)
140
+ processor.save_pretrained(pytorch_dump_folder_path)
141
+
142
+ print("Model saved in {}".format(pytorch_dump_folder_path))
143
+
144
+
145
+ if __name__ == "__main__":
146
+ parser = argparse.ArgumentParser()
147
+ parser.add_argument("--t5x_checkpoint_path", default=None, type=str, help="Path to the original T5x checkpoint.")
148
+ parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
149
+ parser.add_argument("--use_large", action="store_true", help="Use large model.")
150
+ parser.add_argument("--is_vqa", action="store_true", help="Use large model.")
151
+ args = parser.parse_args()
152
+
153
+ convert_pix2struct_original_pytorch_checkpoint_to_hf(
154
+ args.t5x_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
155
+ )
evalkit_internvl/lib/python3.10/site-packages/transformers/models/pix2struct/image_processing_pix2struct.py ADDED
@@ -0,0 +1,460 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for Pix2Struct."""
16
+ import io
17
+ import math
18
+ from typing import Dict, Optional, Union
19
+
20
+ import numpy as np
21
+ from huggingface_hub import hf_hub_download
22
+
23
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature
24
+ from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
25
+ from ...image_utils import (
26
+ ChannelDimension,
27
+ ImageInput,
28
+ get_image_size,
29
+ infer_channel_dimension_format,
30
+ make_list_of_images,
31
+ to_numpy_array,
32
+ valid_images,
33
+ )
34
+ from ...utils import TensorType, is_torch_available, is_vision_available, logging
35
+ from ...utils.import_utils import requires_backends
36
+
37
+
38
+ if is_vision_available():
39
+ import textwrap
40
+
41
+ from PIL import Image, ImageDraw, ImageFont
42
+
43
+ if is_torch_available():
44
+ import torch
45
+
46
+ logger = logging.get_logger(__name__)
47
+ DEFAULT_FONT_PATH = "ybelkada/fonts"
48
+
49
+
50
+ # adapted from: https://discuss.pytorch.org/t/tf-image-extract-patches-in-pytorch/171409/2
51
+ def torch_extract_patches(image_tensor, patch_height, patch_width):
52
+ """
53
+ Utiliy function to extract patches from a given image tensor. Returns a tensor of shape (1, `patch_height`,
54
+ `patch_width`, `num_channels`x `patch_height` x `patch_width`)
55
+
56
+ Args:
57
+ image_tensor (torch.Tensor):
58
+ The image tensor to extract patches from.
59
+ patch_height (int):
60
+ The height of the patches to extract.
61
+ patch_width (int):
62
+ The width of the patches to extract.
63
+ """
64
+ requires_backends(torch_extract_patches, ["torch"])
65
+
66
+ image_tensor = image_tensor.unsqueeze(0)
67
+ patches = torch.nn.functional.unfold(image_tensor, (patch_height, patch_width), stride=(patch_height, patch_width))
68
+ patches = patches.reshape(image_tensor.size(0), image_tensor.size(1), patch_height, patch_width, -1)
69
+ patches = patches.permute(0, 4, 2, 3, 1).reshape(
70
+ image_tensor.size(2) // patch_height,
71
+ image_tensor.size(3) // patch_width,
72
+ image_tensor.size(1) * patch_height * patch_width,
73
+ )
74
+ return patches.unsqueeze(0)
75
+
76
+
77
+ # Adapted from https://github.com/google-research/pix2struct/blob/0e1779af0f4db4b652c1d92b3bbd2550a7399123/pix2struct/preprocessing/preprocessing_utils.py#L106
78
+ def render_text(
79
+ text: str,
80
+ text_size: int = 36,
81
+ text_color: str = "black",
82
+ background_color: str = "white",
83
+ left_padding: int = 5,
84
+ right_padding: int = 5,
85
+ top_padding: int = 5,
86
+ bottom_padding: int = 5,
87
+ font_bytes: Optional[bytes] = None,
88
+ font_path: Optional[str] = None,
89
+ ) -> Image.Image:
90
+ """
91
+ Render text. This script is entirely adapted from the original script that can be found here:
92
+ https://github.com/google-research/pix2struct/blob/main/pix2struct/preprocessing/preprocessing_utils.py
93
+
94
+ Args:
95
+ text (`str`, *optional*, defaults to ):
96
+ Text to render.
97
+ text_size (`int`, *optional*, defaults to 36):
98
+ Size of the text.
99
+ text_color (`str`, *optional*, defaults to `"black"`):
100
+ Color of the text.
101
+ background_color (`str`, *optional*, defaults to `"white"`):
102
+ Color of the background.
103
+ left_padding (`int`, *optional*, defaults to 5):
104
+ Padding on the left.
105
+ right_padding (`int`, *optional*, defaults to 5):
106
+ Padding on the right.
107
+ top_padding (`int`, *optional*, defaults to 5):
108
+ Padding on the top.
109
+ bottom_padding (`int`, *optional*, defaults to 5):
110
+ Padding on the bottom.
111
+ font_bytes (`bytes`, *optional*):
112
+ Bytes of the font to use. If `None`, the default font will be used.
113
+ font_path (`str`, *optional*):
114
+ Path to the font to use. If `None`, the default font will be used.
115
+ """
116
+ requires_backends(render_text, "vision")
117
+ # Add new lines so that each line is no more than 80 characters.
118
+
119
+ wrapper = textwrap.TextWrapper(width=80)
120
+ lines = wrapper.wrap(text=text)
121
+ wrapped_text = "\n".join(lines)
122
+
123
+ if font_bytes is not None and font_path is None:
124
+ font = io.BytesIO(font_bytes)
125
+ elif font_path is not None:
126
+ font = font_path
127
+ else:
128
+ font = hf_hub_download(DEFAULT_FONT_PATH, "Arial.TTF")
129
+ font = ImageFont.truetype(font, encoding="UTF-8", size=text_size)
130
+
131
+ # Use a temporary canvas to determine the width and height in pixels when
132
+ # rendering the text.
133
+ temp_draw = ImageDraw.Draw(Image.new("RGB", (1, 1), background_color))
134
+ _, _, text_width, text_height = temp_draw.textbbox((0, 0), wrapped_text, font)
135
+
136
+ # Create the actual image with a bit of padding around the text.
137
+ image_width = text_width + left_padding + right_padding
138
+ image_height = text_height + top_padding + bottom_padding
139
+ image = Image.new("RGB", (image_width, image_height), background_color)
140
+ draw = ImageDraw.Draw(image)
141
+ draw.text(xy=(left_padding, top_padding), text=wrapped_text, fill=text_color, font=font)
142
+ return image
143
+
144
+
145
+ # Adapted from https://github.com/google-research/pix2struct/blob/0e1779af0f4db4b652c1d92b3bbd2550a7399123/pix2struct/preprocessing/preprocessing_utils.py#L87
146
+ def render_header(
147
+ image: np.ndarray, header: str, input_data_format: Optional[Union[str, ChildProcessError]] = None, **kwargs
148
+ ):
149
+ """
150
+ Renders the input text as a header on the input image.
151
+
152
+ Args:
153
+ image (`np.ndarray`):
154
+ The image to render the header on.
155
+ header (`str`):
156
+ The header text.
157
+ data_format (`Union[ChannelDimension, str]`, *optional*):
158
+ The data format of the image. Can be either "ChannelDimension.channels_first" or
159
+ "ChannelDimension.channels_last".
160
+
161
+ Returns:
162
+ `np.ndarray`: The image with the header rendered.
163
+ """
164
+ requires_backends(render_header, "vision")
165
+
166
+ # Convert to PIL image if necessary
167
+ image = to_pil_image(image, input_data_format=input_data_format)
168
+
169
+ header_image = render_text(header, **kwargs)
170
+ new_width = max(header_image.width, image.width)
171
+
172
+ new_height = int(image.height * (new_width / image.width))
173
+ new_header_height = int(header_image.height * (new_width / header_image.width))
174
+
175
+ new_image = Image.new("RGB", (new_width, new_height + new_header_height), "white")
176
+ new_image.paste(header_image.resize((new_width, new_header_height)), (0, 0))
177
+ new_image.paste(image.resize((new_width, new_height)), (0, new_header_height))
178
+
179
+ # Convert back to the original framework if necessary
180
+ new_image = to_numpy_array(new_image)
181
+
182
+ if infer_channel_dimension_format(new_image) == ChannelDimension.LAST:
183
+ new_image = to_channel_dimension_format(new_image, ChannelDimension.LAST)
184
+
185
+ return new_image
186
+
187
+
188
+ class Pix2StructImageProcessor(BaseImageProcessor):
189
+ r"""
190
+ Constructs a Pix2Struct image processor.
191
+
192
+ Args:
193
+ do_convert_rgb (`bool`, *optional*, defaults to `True`):
194
+ Whether to convert the image to RGB.
195
+ do_normalize (`bool`, *optional*, defaults to `True`):
196
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
197
+ method. According to Pix2Struct paper and code, the image is normalized with its own mean and standard
198
+ deviation.
199
+ patch_size (`Dict[str, int]`, *optional*, defaults to `{"height": 16, "width": 16}`):
200
+ The patch size to use for the image. According to Pix2Struct paper and code, the patch size is 16x16.
201
+ max_patches (`int`, *optional*, defaults to 2048):
202
+ The maximum number of patches to extract from the image as per the [Pix2Struct
203
+ paper](https://arxiv.org/pdf/2210.03347.pdf).
204
+ is_vqa (`bool`, *optional*, defaults to `False`):
205
+ Whether or not the image processor is for the VQA task. If `True` and `header_text` is passed in, text is
206
+ rendered onto the input images.
207
+ """
208
+
209
+ model_input_names = ["flattened_patches"]
210
+
211
+ def __init__(
212
+ self,
213
+ do_convert_rgb: bool = True,
214
+ do_normalize: bool = True,
215
+ patch_size: Dict[str, int] = None,
216
+ max_patches: int = 2048,
217
+ is_vqa: bool = False,
218
+ **kwargs,
219
+ ) -> None:
220
+ super().__init__(**kwargs)
221
+ self.patch_size = patch_size if patch_size is not None else {"height": 16, "width": 16}
222
+ self.do_normalize = do_normalize
223
+ self.do_convert_rgb = do_convert_rgb
224
+ self.max_patches = max_patches
225
+ self.is_vqa = is_vqa
226
+
227
+ def extract_flattened_patches(
228
+ self,
229
+ image: np.ndarray,
230
+ max_patches: int,
231
+ patch_size: dict,
232
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
233
+ **kwargs,
234
+ ) -> np.ndarray:
235
+ """
236
+ Extract flattened patches from an image.
237
+
238
+ Args:
239
+ image (`np.ndarray`):
240
+ Image to extract flattened patches from.
241
+ max_patches (`int`):
242
+ Maximum number of patches to extract.
243
+ patch_size (`dict`):
244
+ Dictionary containing the patch height and width.
245
+
246
+ Returns:
247
+ result (`np.ndarray`):
248
+ A sequence of `max_patches` flattened patches.
249
+ """
250
+ requires_backends(self.extract_flattened_patches, "torch")
251
+
252
+ # convert to torch
253
+ image = to_channel_dimension_format(image, ChannelDimension.FIRST, input_data_format)
254
+ image = torch.from_numpy(image)
255
+
256
+ patch_height, patch_width = patch_size["height"], patch_size["width"]
257
+ image_height, image_width = get_image_size(image, ChannelDimension.FIRST)
258
+
259
+ # maximize scale s.t.
260
+ scale = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width))
261
+ num_feasible_rows = max(min(math.floor(scale * image_height / patch_height), max_patches), 1)
262
+ num_feasible_cols = max(min(math.floor(scale * image_width / patch_width), max_patches), 1)
263
+ resized_height = max(num_feasible_rows * patch_height, 1)
264
+ resized_width = max(num_feasible_cols * patch_width, 1)
265
+
266
+ image = torch.nn.functional.interpolate(
267
+ image.unsqueeze(0),
268
+ size=(resized_height, resized_width),
269
+ mode="bilinear",
270
+ align_corners=False,
271
+ antialias=True,
272
+ ).squeeze(0)
273
+
274
+ # [1, rows, columns, patch_height * patch_width * image_channels]
275
+ patches = torch_extract_patches(image, patch_height, patch_width)
276
+
277
+ patches_shape = patches.shape
278
+ rows = patches_shape[1]
279
+ columns = patches_shape[2]
280
+ depth = patches_shape[3]
281
+
282
+ # [rows * columns, patch_height * patch_width * image_channels]
283
+ patches = patches.reshape([rows * columns, depth])
284
+
285
+ # [rows * columns, 1]
286
+ row_ids = torch.arange(rows).reshape([rows, 1]).repeat(1, columns).reshape([rows * columns, 1])
287
+ col_ids = torch.arange(columns).reshape([1, columns]).repeat(rows, 1).reshape([rows * columns, 1])
288
+
289
+ # Offset by 1 so the ids do not contain zeros, which represent padding.
290
+ row_ids += 1
291
+ col_ids += 1
292
+
293
+ # Prepare additional patch features.
294
+ # [rows * columns, 1]
295
+ row_ids = row_ids.to(torch.float32)
296
+ col_ids = col_ids.to(torch.float32)
297
+
298
+ # [rows * columns, 2 + patch_height * patch_width * image_channels]
299
+ result = torch.cat([row_ids, col_ids, patches], -1)
300
+
301
+ # [max_patches, 2 + patch_height * patch_width * image_channels]
302
+ result = torch.nn.functional.pad(result, [0, 0, 0, max_patches - (rows * columns)]).float()
303
+
304
+ result = to_numpy_array(result)
305
+
306
+ return result
307
+
308
+ def normalize(
309
+ self,
310
+ image: np.ndarray,
311
+ data_format: Optional[Union[str, ChannelDimension]] = None,
312
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
313
+ **kwargs,
314
+ ) -> np.ndarray:
315
+ """
316
+ Normalize an image. image = (image - image_mean) / image_std.
317
+
318
+ The image std is to mimic the tensorflow implementation of the `per_image_standardization`:
319
+ https://www.tensorflow.org/api_docs/python/tf/image/per_image_standardization
320
+
321
+ Args:
322
+ image (`np.ndarray`):
323
+ Image to normalize.
324
+ data_format (`str` or `ChannelDimension`, *optional*):
325
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
326
+ image is used.
327
+ input_data_format (`str` or `ChannelDimension`, *optional*):
328
+ The channel dimension format of the input image. If not provided, it will be inferred.
329
+ """
330
+ if image.dtype == np.uint8:
331
+ image = image.astype(np.float32)
332
+
333
+ # take mean across the whole `image`
334
+ mean = np.mean(image)
335
+ std = np.std(image)
336
+ adjusted_stddev = max(std, 1.0 / math.sqrt(np.prod(image.shape)))
337
+
338
+ return normalize(
339
+ image,
340
+ mean=mean,
341
+ std=adjusted_stddev,
342
+ data_format=data_format,
343
+ input_data_format=input_data_format,
344
+ **kwargs,
345
+ )
346
+
347
+ def preprocess(
348
+ self,
349
+ images: ImageInput,
350
+ header_text: Optional[str] = None,
351
+ do_convert_rgb: bool = None,
352
+ do_normalize: Optional[bool] = None,
353
+ max_patches: Optional[int] = None,
354
+ patch_size: Optional[Dict[str, int]] = None,
355
+ return_tensors: Optional[Union[str, TensorType]] = None,
356
+ data_format: ChannelDimension = ChannelDimension.FIRST,
357
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
358
+ **kwargs,
359
+ ) -> ImageInput:
360
+ """
361
+ Preprocess an image or batch of images. The processor first computes the maximum possible number of
362
+ aspect-ratio preserving patches of size `patch_size` that can be extracted from the image. It then pads the
363
+ image with zeros to make the image respect the constraint of `max_patches`. Before extracting the patches the
364
+ images are standardized following the tensorflow implementation of `per_image_standardization`
365
+ (https://www.tensorflow.org/api_docs/python/tf/image/per_image_standardization).
366
+
367
+
368
+ Args:
369
+ images (`ImageInput`):
370
+ Image to preprocess. Expects a single or batch of images.
371
+ header_text (`Union[List[str], str]`, *optional*):
372
+ Text to render as a header. Only has an effect if `image_processor.is_vqa` is `True`.
373
+ do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
374
+ Whether to convert the image to RGB.
375
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
376
+ Whether to normalize the image.
377
+ max_patches (`int`, *optional*, defaults to `self.max_patches`):
378
+ Maximum number of patches to extract.
379
+ patch_size (`dict`, *optional*, defaults to `self.patch_size`):
380
+ Dictionary containing the patch height and width.
381
+ return_tensors (`str` or `TensorType`, *optional*):
382
+ The type of tensors to return. Can be one of:
383
+ - Unset: Return a list of `np.ndarray`.
384
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
385
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
386
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
387
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
388
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
389
+ The channel dimension format for the output image. Can be one of:
390
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
391
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
392
+ - Unset: Use the channel dimension format of the input image.
393
+ input_data_format (`ChannelDimension` or `str`, *optional*):
394
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
395
+ from the input image. Can be one of:
396
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
397
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
398
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
399
+ """
400
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
401
+ do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
402
+ patch_size = patch_size if patch_size is not None else self.patch_size
403
+ max_patches = max_patches if max_patches is not None else self.max_patches
404
+ is_vqa = self.is_vqa
405
+
406
+ if kwargs.get("data_format", None) is not None:
407
+ raise ValueError("data_format is not an accepted input as the outputs are ")
408
+
409
+ images = make_list_of_images(images)
410
+
411
+ if not valid_images(images):
412
+ raise ValueError(
413
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
414
+ "torch.Tensor, tf.Tensor or jax.ndarray."
415
+ )
416
+
417
+ # PIL RGBA images are converted to RGB
418
+ if do_convert_rgb:
419
+ images = [convert_to_rgb(image) for image in images]
420
+
421
+ # All transformations expect numpy arrays.
422
+ images = [to_numpy_array(image) for image in images]
423
+
424
+ if input_data_format is None:
425
+ # We assume that all images have the same channel dimension format.
426
+ input_data_format = infer_channel_dimension_format(images[0])
427
+
428
+ if is_vqa:
429
+ if header_text is None:
430
+ raise ValueError("A header text must be provided for VQA models.")
431
+ font_bytes = kwargs.pop("font_bytes", None)
432
+ font_path = kwargs.pop("font_path", None)
433
+
434
+ if isinstance(header_text, str):
435
+ header_text = [header_text] * len(images)
436
+
437
+ images = [
438
+ render_header(image, header_text[i], font_bytes=font_bytes, font_path=font_path)
439
+ for i, image in enumerate(images)
440
+ ]
441
+
442
+ if do_normalize:
443
+ images = [self.normalize(image=image, input_data_format=input_data_format) for image in images]
444
+
445
+ # convert to torch tensor and permute
446
+ images = [
447
+ self.extract_flattened_patches(
448
+ image=image, max_patches=max_patches, patch_size=patch_size, input_data_format=input_data_format
449
+ )
450
+ for image in images
451
+ ]
452
+
453
+ # create attention mask in numpy
454
+ attention_masks = [(image.sum(axis=-1) != 0).astype(np.float32) for image in images]
455
+
456
+ encoded_outputs = BatchFeature(
457
+ data={"flattened_patches": images, "attention_mask": attention_masks}, tensor_type=return_tensors
458
+ )
459
+
460
+ return encoded_outputs
evalkit_internvl/lib/python3.10/site-packages/transformers/models/pix2struct/modeling_pix2struct.py ADDED
@@ -0,0 +1,1805 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. & Google team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Pix2Struct modeling file"""
16
+
17
+ import math
18
+ from typing import Dict, List, Optional, Tuple, Union
19
+
20
+ import torch
21
+ import torch.utils.checkpoint
22
+ from torch import nn
23
+
24
+ from ...activations import ACT2FN
25
+ from ...modeling_outputs import (
26
+ BaseModelOutput,
27
+ BaseModelOutputWithPooling,
28
+ CausalLMOutputWithCrossAttentions,
29
+ Seq2SeqLMOutput,
30
+ Seq2SeqModelOutput,
31
+ )
32
+ from ...modeling_utils import PreTrainedModel
33
+ from ...pytorch_utils import ALL_LAYERNORM_LAYERS
34
+ from ...utils import (
35
+ DUMMY_INPUTS,
36
+ DUMMY_MASK,
37
+ add_start_docstrings,
38
+ add_start_docstrings_to_model_forward,
39
+ is_torch_fx_proxy,
40
+ logging,
41
+ replace_return_docstrings,
42
+ )
43
+ from .configuration_pix2struct import Pix2StructConfig, Pix2StructTextConfig, Pix2StructVisionConfig
44
+
45
+
46
+ logger = logging.get_logger(__name__)
47
+
48
+ # General docstring
49
+ _CONFIG_FOR_DOC = "Pix2StructConfig"
50
+
51
+
52
+ PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST = [
53
+ "google/pix2struct-textcaps-base",
54
+ "google/pix2struct-textcaps-large",
55
+ "google/pix2struct-base",
56
+ "google/pix2struct-large",
57
+ "google/pix2struct-ai2d-base",
58
+ "google/pix2struct-ai2d-large",
59
+ "google/pix2struct-widget-captioning-base",
60
+ "google/pix2struct-widget-captioning-large",
61
+ "google/pix2struct-screen2words-base",
62
+ "google/pix2struct-screen2words-large",
63
+ "google/pix2struct-docvqa-base",
64
+ "google/pix2struct-docvqa-large",
65
+ "google/pix2struct-ocrvqa-base",
66
+ "google/pix2struct-ocrvqa-large",
67
+ "google/pix2struct-chartqa-base",
68
+ "google/pix2struct-inforgraphics-vqa-base",
69
+ "google/pix2struct-inforgraphics-vqa-large",
70
+ # See all Pix2StructVision models at https://huggingface.co/models?filter=pix2struct
71
+ ]
72
+
73
+
74
+ # Adapted from transformers.models.t5.modeling_t5.T5LayerNorm with T5->Pix2Struct
75
+ class Pix2StructLayerNorm(nn.Module):
76
+ def __init__(self, hidden_size, eps=1e-6):
77
+ """
78
+ Construct a layernorm module in the T5 style. No bias and no subtraction of mean.
79
+ """
80
+ super().__init__()
81
+ self.weight = nn.Parameter(torch.ones(hidden_size))
82
+ self.variance_epsilon = eps
83
+
84
+ def forward(self, hidden_states):
85
+ # T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
86
+ # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus varience is calculated
87
+ # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
88
+ # half-precision inputs is done in fp32
89
+
90
+ variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
91
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
92
+
93
+ # convert into half-precision if necessary
94
+ if self.weight.dtype in [torch.float16, torch.bfloat16]:
95
+ hidden_states = hidden_states.to(self.weight.dtype)
96
+
97
+ return self.weight * hidden_states
98
+
99
+
100
+ try:
101
+ from apex.normalization import FusedRMSNorm
102
+
103
+ Pix2StructLayerNorm = FusedRMSNorm # noqa
104
+
105
+ logger.info("Discovered apex.normalization.FusedRMSNorm - will use it instead of Pix2StructLayerNorm")
106
+ except ImportError:
107
+ # using the normal Pix2StructLayerNorm
108
+ pass
109
+ except Exception:
110
+ logger.warning("Discovered apex but it failed to load, falling back to Pix2StructLayerNorm")
111
+ pass
112
+
113
+ ALL_LAYERNORM_LAYERS.append(Pix2StructLayerNorm)
114
+
115
+
116
+ class Pix2StructVisionEmbeddings(nn.Module):
117
+ r"""
118
+ Construct the embeddings from patch. In `Pix2Struct` the input is different from classic Vision-transformer models.
119
+ Here the input is a sequence of `seq_len` flattened patches that also combines padding patches (tokens). Each patch
120
+ is represented by a vector of `hidden_size` values.
121
+ """
122
+
123
+ def __init__(self, config: Pix2StructConfig) -> None:
124
+ super().__init__()
125
+ self.patch_projection = nn.Linear(config.patch_embed_hidden_size, config.hidden_size)
126
+
127
+ self.row_embedder = nn.Embedding(config.seq_len, config.hidden_size)
128
+ self.column_embedder = nn.Embedding(config.seq_len, config.hidden_size)
129
+
130
+ self.dropout = nn.Dropout(config.dropout_rate)
131
+
132
+ def forward(self, flattened_patches: torch.Tensor) -> torch.Tensor:
133
+ # the row and column indices are stored in the first and second position of the flattened_patches
134
+ # flattened_patches: `batch_size`, `seq_len`, `hidden_size` + 2
135
+ row_indices = flattened_patches[:, :, 0].long()
136
+ col_indices = flattened_patches[:, :, 1].long()
137
+
138
+ flattened_patches = flattened_patches[:, :, 2:]
139
+
140
+ embeddings = self.patch_projection(flattened_patches)
141
+ row_embeddings = self.row_embedder(row_indices)
142
+ col_embeddings = self.column_embedder(col_indices)
143
+
144
+ # sum all embeddings together
145
+ embeddings = embeddings + row_embeddings + col_embeddings
146
+
147
+ embeddings = self.dropout(embeddings)
148
+
149
+ return embeddings
150
+
151
+
152
+ class Pix2StructVisionAttention(nn.Module):
153
+ def __init__(self, config):
154
+ super().__init__()
155
+ self.hidden_size = config.hidden_size
156
+ self.key_value_proj_dim = config.d_kv
157
+ self.n_heads = config.num_attention_heads
158
+ self.dropout = config.attention_dropout
159
+ self.inner_dim = self.n_heads * self.key_value_proj_dim
160
+
161
+ # Mesh TensorFlow initialization to avoid scaling before softmax
162
+ self.query = nn.Linear(self.hidden_size, self.inner_dim, bias=False)
163
+ self.key = nn.Linear(self.hidden_size, self.inner_dim, bias=False)
164
+ self.value = nn.Linear(self.hidden_size, self.inner_dim, bias=False)
165
+ self.output = nn.Linear(self.inner_dim, self.hidden_size, bias=False)
166
+
167
+ self.gradient_checkpointing = False
168
+
169
+ def forward(
170
+ self,
171
+ hidden_states,
172
+ attention_mask=None,
173
+ position_bias=None,
174
+ layer_head_mask=None,
175
+ output_attentions=False,
176
+ ):
177
+ """
178
+ Self-attention block
179
+ """
180
+ # Input is (batch_size, seq_length, dim)
181
+ # Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length)
182
+ # past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head)
183
+ batch_size, seq_length = hidden_states.shape[:2]
184
+
185
+ def to_projection_shape(states):
186
+ """projection"""
187
+ return states.contiguous().view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
188
+
189
+ # get query states
190
+ # (batch_size, n_heads, seq_length, dim_per_head)
191
+ query_states = to_projection_shape(self.query(hidden_states))
192
+
193
+ # get key/value states
194
+ key_states = to_projection_shape(self.key(hidden_states))
195
+ value_states = to_projection_shape(self.value(hidden_states))
196
+
197
+ # compute scores
198
+ # equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9
199
+ scores = torch.matmul(query_states, key_states.transpose(3, 2))
200
+
201
+ if position_bias is None:
202
+ position_bias = torch.zeros(
203
+ (1, self.n_heads, seq_length, seq_length), device=scores.device, dtype=scores.dtype
204
+ )
205
+ if self.gradient_checkpointing and self.training:
206
+ position_bias.requires_grad = True
207
+
208
+ if attention_mask is None:
209
+ attention_mask = torch.ones((batch_size, seq_length), device=scores.device, dtype=scores.dtype)
210
+
211
+ if attention_mask.dim() == 2:
212
+ position_bias = position_bias + attention_mask[:, None, None, :].to(position_bias.device)
213
+ else:
214
+ # (batch_size, n_heads, seq_length, key_length)
215
+ position_bias = position_bias + attention_mask.to(position_bias.device)
216
+ position_bias = 1 - position_bias
217
+
218
+ position_bias_masked = position_bias.masked_fill(position_bias == 1, torch.finfo(scores.dtype).min)
219
+ scores += position_bias_masked
220
+ scores = torch.max(scores, torch.tensor(torch.finfo(scores.dtype).min))
221
+
222
+ # (batch_size, n_heads, seq_length, key_length)
223
+ attn_weights = nn.functional.softmax(scores, dim=-1, dtype=torch.float32).type_as(scores)
224
+
225
+ # (batch_size, n_heads, seq_length, key_length)
226
+ attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
227
+
228
+ # Mask heads if we want to
229
+ if layer_head_mask is not None:
230
+ attn_weights = attn_weights * layer_head_mask
231
+
232
+ attn_output = torch.matmul(attn_weights, value_states)
233
+
234
+ # (batch_size, seq_length, dim)
235
+ attn_output = attn_output.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim)
236
+
237
+ attn_output = self.output(attn_output)
238
+
239
+ outputs = (attn_output,) + (position_bias,)
240
+
241
+ if output_attentions:
242
+ outputs = outputs + (attn_weights,)
243
+ return outputs
244
+
245
+
246
+ # Copied from transformers.models.t5.modeling_t5.T5DenseGatedActDense with T5DenseGatedActDense->Pix2StructVisionMlp,T5Config->Pix2StructVisionConfig,config.d_model->config.hidden_size,dropout_rate->dropout_rate
247
+ class Pix2StructVisionMlp(nn.Module):
248
+ def __init__(self, config: Pix2StructVisionConfig):
249
+ super().__init__()
250
+ self.wi_0 = nn.Linear(config.hidden_size, config.d_ff, bias=False)
251
+ self.wi_1 = nn.Linear(config.hidden_size, config.d_ff, bias=False)
252
+ self.wo = nn.Linear(config.d_ff, config.hidden_size, bias=False)
253
+ self.dropout = nn.Dropout(config.dropout_rate)
254
+ self.act = ACT2FN[config.dense_act_fn]
255
+
256
+ def forward(self, hidden_states):
257
+ hidden_gelu = self.act(self.wi_0(hidden_states))
258
+ hidden_linear = self.wi_1(hidden_states)
259
+ hidden_states = hidden_gelu * hidden_linear
260
+ hidden_states = self.dropout(hidden_states)
261
+
262
+ # To make 8bit quantization work for google/flan-t5-xxl, self.wo is kept in float32.
263
+ # See https://github.com/huggingface/transformers/issues/20287
264
+ # we also make sure the weights are not in `int8` in case users will force `_keep_in_fp32_modules` to be `None``
265
+ if (
266
+ isinstance(self.wo.weight, torch.Tensor)
267
+ and hidden_states.dtype != self.wo.weight.dtype
268
+ and self.wo.weight.dtype != torch.int8
269
+ ):
270
+ hidden_states = hidden_states.to(self.wo.weight.dtype)
271
+
272
+ hidden_states = self.wo(hidden_states)
273
+ return hidden_states
274
+
275
+
276
+ class Pix2StructVisionLayer(nn.Module):
277
+ def __init__(self, config: Pix2StructConfig) -> None:
278
+ super().__init__()
279
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
280
+ self.seq_len_dim = 1
281
+ self.attention = Pix2StructVisionAttention(config)
282
+ self.mlp = Pix2StructVisionMlp(config)
283
+ self.pre_mlp_layer_norm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
284
+ self.pre_attention_layer_norm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
285
+
286
+ def forward(
287
+ self,
288
+ hidden_states: torch.Tensor,
289
+ attention_mask: Optional[torch.Tensor] = None,
290
+ head_mask: Optional[torch.Tensor] = None,
291
+ output_attentions: bool = False,
292
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
293
+ residual = hidden_states
294
+
295
+ # in Pix2StructVision, layernorm is applied before self-attention
296
+ hidden_states = self.pre_attention_layer_norm(hidden_states)
297
+
298
+ self_attention_outputs = self.attention(
299
+ hidden_states,
300
+ attention_mask=attention_mask,
301
+ layer_head_mask=head_mask,
302
+ output_attentions=output_attentions,
303
+ )
304
+ attention_output = self_attention_outputs[0]
305
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
306
+
307
+ # first residual connection
308
+ hidden_states = attention_output + residual
309
+
310
+ # in Pix2StructVision, layernorm is also applied after self-attention
311
+ layer_output = self.pre_mlp_layer_norm(hidden_states)
312
+ layer_output = self.mlp(layer_output) + hidden_states # second residual connection
313
+
314
+ outputs = (layer_output,) + outputs
315
+
316
+ return outputs
317
+
318
+
319
+ class Pix2StructVisionEncoder(nn.Module):
320
+ def __init__(self, config: Pix2StructConfig) -> None:
321
+ super().__init__()
322
+ self.config = config
323
+ self.layer = nn.ModuleList([Pix2StructVisionLayer(config) for _ in range(config.num_hidden_layers)])
324
+ self.gradient_checkpointing = False
325
+
326
+ def forward(
327
+ self,
328
+ hidden_states: torch.Tensor,
329
+ attention_mask: Optional[torch.Tensor] = None,
330
+ head_mask: Optional[torch.Tensor] = None,
331
+ output_attentions: bool = False,
332
+ output_hidden_states: bool = False,
333
+ return_dict: bool = True,
334
+ ) -> Union[tuple, BaseModelOutput]:
335
+ all_hidden_states = () if output_hidden_states else None
336
+ all_self_attentions = () if output_attentions else None
337
+
338
+ for i, layer_module in enumerate(self.layer):
339
+ if output_hidden_states:
340
+ all_hidden_states = all_hidden_states + (hidden_states,)
341
+
342
+ layer_head_mask = head_mask[i] if head_mask is not None else None
343
+
344
+ if self.gradient_checkpointing and self.training:
345
+ layer_outputs = self._gradient_checkpointing_func(
346
+ layer_module.__call__,
347
+ hidden_states,
348
+ attention_mask,
349
+ layer_head_mask,
350
+ output_attentions,
351
+ )
352
+ else:
353
+ layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, output_attentions)
354
+
355
+ hidden_states = layer_outputs[0]
356
+
357
+ if output_attentions:
358
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
359
+
360
+ if output_hidden_states:
361
+ all_hidden_states = all_hidden_states + (hidden_states,)
362
+
363
+ if not return_dict:
364
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
365
+ return BaseModelOutput(
366
+ last_hidden_state=hidden_states,
367
+ hidden_states=all_hidden_states,
368
+ attentions=all_self_attentions,
369
+ )
370
+
371
+
372
+ class Pix2StructPreTrainedModel(PreTrainedModel):
373
+ """
374
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
375
+ models.
376
+ """
377
+
378
+ config_class = Pix2StructConfig
379
+
380
+ @property
381
+ def dummy_inputs(self):
382
+ input_ids = torch.tensor(DUMMY_INPUTS)
383
+ input_mask = torch.tensor(DUMMY_MASK)
384
+ dummy_inputs = {
385
+ "decoder_input_ids": input_ids,
386
+ "input_ids": input_ids,
387
+ "decoder_attention_mask": input_mask,
388
+ }
389
+ return dummy_inputs
390
+
391
+ def _init_weights(self, module):
392
+ """Initialize the weights"""
393
+ factor = self.config.initializer_factor # Used for testing weights initialization
394
+ if isinstance(module, Pix2StructLayerNorm):
395
+ module.weight.data.fill_(factor * 1.0)
396
+ elif isinstance(module, Pix2StructTextDenseGatedActDense):
397
+ hidden_size = (
398
+ self.config.text_config.hidden_size
399
+ if isinstance(self.config, Pix2StructConfig)
400
+ else self.config.hidden_size
401
+ )
402
+ d_ff = self.config.text_config.d_ff if isinstance(self.config, Pix2StructConfig) else self.config.d_ff
403
+
404
+ module.wi_0.weight.data.normal_(mean=0.0, std=factor * ((hidden_size) ** -0.5))
405
+ if hasattr(module.wi_0, "bias") and module.wi_0.bias is not None:
406
+ module.wi_0.bias.data.zero_()
407
+ module.wi_1.weight.data.normal_(mean=0.0, std=factor * ((hidden_size) ** -0.5))
408
+ if hasattr(module.wi_1, "bias") and module.wi_1.bias is not None:
409
+ module.wi_1.bias.data.zero_()
410
+ module.wo.weight.data.normal_(mean=0.0, std=factor * ((d_ff) ** -0.5))
411
+ if hasattr(module.wo, "bias") and module.wo.bias is not None:
412
+ module.wo.bias.data.zero_()
413
+ elif isinstance(module, Pix2StructTextAttention):
414
+ # Mesh TensorFlow attention initialization to avoid scaling before softmax
415
+ # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136
416
+ hidden_size = (
417
+ self.config.text_config.hidden_size
418
+ if isinstance(self.config, Pix2StructConfig)
419
+ else self.config.hidden_size
420
+ )
421
+ key_value_proj_dim = (
422
+ self.config.text_config.d_kv if isinstance(self.config, Pix2StructConfig) else self.config.hidden_size
423
+ )
424
+ n_heads = (
425
+ self.config.text_config.num_heads
426
+ if isinstance(self.config, Pix2StructConfig)
427
+ else self.config.num_heads
428
+ )
429
+
430
+ module.query.weight.data.normal_(mean=0.0, std=factor * ((hidden_size * key_value_proj_dim) ** -0.5))
431
+ module.key.weight.data.normal_(mean=0.0, std=factor * (hidden_size**-0.5))
432
+ module.value.weight.data.normal_(mean=0.0, std=factor * (hidden_size**-0.5))
433
+ module.output.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5))
434
+ if module.has_relative_attention_bias:
435
+ module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((hidden_size) ** -0.5))
436
+ elif isinstance(module, nn.Embedding):
437
+ hidden_size = (
438
+ self.config.text_config.hidden_size
439
+ if isinstance(self.config, Pix2StructConfig)
440
+ else self.config.hidden_size
441
+ )
442
+
443
+ module.weight.data.normal_(mean=0.0, std=factor * ((hidden_size) ** -0.5))
444
+ if module.padding_idx is not None:
445
+ module.weight.data[module.padding_idx].zero_()
446
+ elif isinstance(module, Pix2StructTextModel):
447
+ hidden_size = (
448
+ self.config.text_config.hidden_size
449
+ if isinstance(self.config, Pix2StructConfig)
450
+ else self.config.hidden_size
451
+ )
452
+
453
+ module.lm_head.weight.data.normal_(mean=0.0, std=factor * ((hidden_size) ** -0.5))
454
+ elif isinstance(module, (nn.Linear, nn.Conv2d)):
455
+ # Upcast the input in `fp32` and cast it back to desired `dtype` to avoid
456
+ # `trunc_normal_cpu` not implemented in `half` issues
457
+ module.weight.data = nn.init.trunc_normal_(
458
+ module.weight.data.to(torch.float32), mean=0.0, std=self.config.initializer_range
459
+ ).to(module.weight.dtype)
460
+ if module.bias is not None:
461
+ module.bias.data.zero_()
462
+ elif isinstance(module, Pix2StructLayerNorm):
463
+ if module.weight is not None:
464
+ module.weight.data.fill_(1.0)
465
+ elif isinstance(module, nn.Embedding):
466
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
467
+ if module.padding_idx is not None:
468
+ module.weight.data[module.padding_idx].zero_()
469
+
470
+ # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._shift_right with T5->Pix2Struct
471
+ def _shift_right(self, input_ids):
472
+ decoder_start_token_id = self.config.decoder_start_token_id
473
+ pad_token_id = self.config.pad_token_id
474
+
475
+ if decoder_start_token_id is None:
476
+ raise ValueError(
477
+ "self.model.config.decoder_start_token_id has to be defined. In Pix2Struct it is usually set to the pad_token_id. "
478
+ "See Pix2Struct docs for more information."
479
+ )
480
+
481
+ # shift inputs to the right
482
+ if is_torch_fx_proxy(input_ids):
483
+ # Item assignment is not supported natively for proxies.
484
+ shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id)
485
+ shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1)
486
+ else:
487
+ shifted_input_ids = input_ids.new_zeros(input_ids.shape)
488
+ shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
489
+ shifted_input_ids[..., 0] = decoder_start_token_id
490
+
491
+ if pad_token_id is None:
492
+ raise ValueError("self.model.config.pad_token_id has to be defined.")
493
+ # replace possible -100 values in labels by `pad_token_id`
494
+ shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
495
+
496
+ return shifted_input_ids
497
+
498
+
499
+ PIX2STRUCT_VISION_START_DOCSTRING = r"""
500
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
501
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
502
+ behavior.
503
+
504
+ Parameters:
505
+ config ([`Pix2StructConfig`]): Model configuration class with all the parameters of the model.
506
+ Initializing with a config file does not load the weights associated with the model, only the
507
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
508
+ """
509
+
510
+ PIX2STRUCT_VISION_INPUTS_DOCSTRING = r"""
511
+ Args:
512
+ flattened_patches (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_channels x patch_height x patch_width)`):
513
+ Flattened and padded pixel values. These values can be obtained using [`AutoImageProcessor`]. See
514
+ [`Pix2StructVisionImageProcessor.__call__`] for details. Check the [original
515
+ paper](https://arxiv.org/abs/2210.03347) (figure 5) for more details.
516
+
517
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
518
+ Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`:
519
+
520
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
521
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
522
+
523
+ - 1 indicates the head is **not masked**,
524
+ - 0 indicates the head is **masked**.
525
+
526
+ output_attentions (`bool`, *optional*):
527
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
528
+ tensors for more detail.
529
+ output_hidden_states (`bool`, *optional*):
530
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
531
+ more detail.
532
+ return_dict (`bool`, *optional*):
533
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
534
+ """
535
+
536
+
537
+ @add_start_docstrings(
538
+ "The bare Pix2StructVision Model transformer outputting raw hidden-states without any specific head on top.",
539
+ PIX2STRUCT_VISION_START_DOCSTRING,
540
+ )
541
+ class Pix2StructVisionModel(Pix2StructPreTrainedModel):
542
+ config_class = Pix2StructVisionConfig
543
+ main_input_name = "flattened_patches"
544
+ supports_gradient_checkpointing = True
545
+ _no_split_modules = ["Pix2StructVisionLayer"]
546
+
547
+ def __init__(self, config: Pix2StructConfig):
548
+ super().__init__(config)
549
+ self.config = config
550
+
551
+ self.embeddings = Pix2StructVisionEmbeddings(config)
552
+ self.encoder = Pix2StructVisionEncoder(config)
553
+
554
+ self.layernorm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
555
+
556
+ # Initialize weights and apply final processing
557
+ self.post_init()
558
+
559
+ def get_input_embeddings(self):
560
+ return self.embeddings.patch_projection
561
+
562
+ def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None:
563
+ """
564
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
565
+ class PreTrainedModel
566
+ """
567
+ for layer, heads in heads_to_prune.items():
568
+ self.encoder.layer[layer].attention.prune_heads(heads)
569
+
570
+ @add_start_docstrings_to_model_forward(PIX2STRUCT_VISION_INPUTS_DOCSTRING)
571
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)
572
+ def forward(
573
+ self,
574
+ flattened_patches: Optional[torch.Tensor] = None,
575
+ attention_mask: Optional[torch.Tensor] = None,
576
+ head_mask: Optional[torch.Tensor] = None,
577
+ output_attentions: Optional[bool] = None,
578
+ output_hidden_states: Optional[bool] = None,
579
+ return_dict: Optional[bool] = None,
580
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
581
+ r"""
582
+ Returns:
583
+
584
+ Example:
585
+
586
+ ```python
587
+ >>> import requests
588
+ >>> from PIL import Image
589
+ >>> from transformers import AutoProcessor, Pix2StructVisionModel
590
+
591
+ >>> image_processor = AutoProcessor.from_pretrained("google/pix2struct-textcaps-base")
592
+ >>> model = Pix2StructVisionModel.from_pretrained("google/pix2struct-textcaps-base")
593
+
594
+ >>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
595
+ >>> image = Image.open(requests.get(url, stream=True).raw)
596
+
597
+ >>> inputs = image_processor(images=image, return_tensors="pt")
598
+ >>> with torch.no_grad():
599
+ ... outputs = model(**inputs)
600
+
601
+ >>> last_hidden_states = outputs.last_hidden_state
602
+ >>> list(last_hidden_states.shape)
603
+ [1, 2048, 768]
604
+ ```
605
+ """
606
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
607
+ output_hidden_states = (
608
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
609
+ )
610
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
611
+
612
+ if flattened_patches is None:
613
+ raise ValueError("You have to specify flattened_patches")
614
+
615
+ if attention_mask is None:
616
+ # check where `flattened_patches` is not 0
617
+ attention_mask = (flattened_patches.sum(dim=-1) != 0).float()
618
+
619
+ # Prepare head mask if needed
620
+ # 1.0 in head_mask indicate we keep the head
621
+ # attention_probs has shape bsz x n_heads x N x N
622
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
623
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
624
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
625
+
626
+ embedding_output = self.embeddings(flattened_patches)
627
+
628
+ encoder_outputs = self.encoder(
629
+ embedding_output,
630
+ attention_mask=attention_mask,
631
+ head_mask=head_mask,
632
+ output_attentions=output_attentions,
633
+ output_hidden_states=output_hidden_states,
634
+ return_dict=return_dict,
635
+ )
636
+ sequence_output = encoder_outputs[0]
637
+ sequence_output = self.layernorm(sequence_output)
638
+
639
+ if not return_dict:
640
+ head_outputs = (sequence_output,)
641
+ return head_outputs + encoder_outputs[1:]
642
+
643
+ return BaseModelOutput(
644
+ last_hidden_state=sequence_output,
645
+ hidden_states=encoder_outputs.hidden_states,
646
+ attentions=encoder_outputs.attentions,
647
+ )
648
+
649
+
650
+ # Copied from transformers.models.t5.modeling_t5.T5DenseGatedActDense with T5->Pix2StructText,d_model->hidden_size
651
+ class Pix2StructTextDenseGatedActDense(nn.Module):
652
+ def __init__(self, config: Pix2StructTextConfig):
653
+ super().__init__()
654
+ self.wi_0 = nn.Linear(config.hidden_size, config.d_ff, bias=False)
655
+ self.wi_1 = nn.Linear(config.hidden_size, config.d_ff, bias=False)
656
+ self.wo = nn.Linear(config.d_ff, config.hidden_size, bias=False)
657
+ self.dropout = nn.Dropout(config.dropout_rate)
658
+ self.act = ACT2FN[config.dense_act_fn]
659
+
660
+ def forward(self, hidden_states):
661
+ hidden_gelu = self.act(self.wi_0(hidden_states))
662
+ hidden_linear = self.wi_1(hidden_states)
663
+ hidden_states = hidden_gelu * hidden_linear
664
+ hidden_states = self.dropout(hidden_states)
665
+
666
+ # To make 8bit quantization work for google/flan-t5-xxl, self.wo is kept in float32.
667
+ # See https://github.com/huggingface/transformers/issues/20287
668
+ # we also make sure the weights are not in `int8` in case users will force `_keep_in_fp32_modules` to be `None``
669
+ if (
670
+ isinstance(self.wo.weight, torch.Tensor)
671
+ and hidden_states.dtype != self.wo.weight.dtype
672
+ and self.wo.weight.dtype != torch.int8
673
+ ):
674
+ hidden_states = hidden_states.to(self.wo.weight.dtype)
675
+
676
+ hidden_states = self.wo(hidden_states)
677
+ return hidden_states
678
+
679
+
680
+ class Pix2StructTextLayerFF(nn.Module):
681
+ def __init__(self, config: Pix2StructTextConfig):
682
+ super().__init__()
683
+ self.DenseReluDense = Pix2StructTextDenseGatedActDense(config)
684
+
685
+ self.layer_norm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_epsilon)
686
+ self.dropout = nn.Dropout(config.dropout_rate)
687
+
688
+ # Copied from transformers.models.t5.modeling_t5.T5LayerFF.forward
689
+ def forward(self, hidden_states):
690
+ forwarded_states = self.layer_norm(hidden_states)
691
+ forwarded_states = self.DenseReluDense(forwarded_states)
692
+ hidden_states = hidden_states + self.dropout(forwarded_states)
693
+ return hidden_states
694
+
695
+
696
+ class Pix2StructTextAttention(nn.Module):
697
+ def __init__(self, config: Pix2StructTextConfig, has_relative_attention_bias=False):
698
+ super().__init__()
699
+ self.has_relative_attention_bias = has_relative_attention_bias
700
+ self.relative_attention_num_buckets = config.relative_attention_num_buckets
701
+ self.relative_attention_max_distance = config.relative_attention_max_distance
702
+ self.hidden_size = config.hidden_size
703
+ self.key_value_proj_dim = config.d_kv
704
+ self.n_heads = config.num_heads
705
+ self.dropout = config.dropout_rate
706
+ self.inner_dim = self.n_heads * self.key_value_proj_dim
707
+
708
+ # Mesh TensorFlow initialization to avoid scaling before softmax
709
+ self.query = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
710
+ self.key = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
711
+ self.value = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
712
+ self.output = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
713
+
714
+ if self.has_relative_attention_bias:
715
+ self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)
716
+ self.pruned_heads = set()
717
+ self.gradient_checkpointing = False
718
+
719
+ @staticmethod
720
+ # Copied from transformers.models.t5.modeling_t5.T5Attention._relative_position_bucket
721
+ def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
722
+ """
723
+ Adapted from Mesh Tensorflow:
724
+ https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
725
+
726
+ Translate relative position to a bucket number for relative attention. The relative position is defined as
727
+ memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
728
+ position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
729
+ small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
730
+ positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
731
+ This should allow for more graceful generalization to longer sequences than the model has been trained on
732
+
733
+ Args:
734
+ relative_position: an int32 Tensor
735
+ bidirectional: a boolean - whether the attention is bidirectional
736
+ num_buckets: an integer
737
+ max_distance: an integer
738
+
739
+ Returns:
740
+ a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
741
+ """
742
+ relative_buckets = 0
743
+ if bidirectional:
744
+ num_buckets //= 2
745
+ relative_buckets += (relative_position > 0).to(torch.long) * num_buckets
746
+ relative_position = torch.abs(relative_position)
747
+ else:
748
+ relative_position = -torch.min(relative_position, torch.zeros_like(relative_position))
749
+ # now relative_position is in the range [0, inf)
750
+
751
+ # half of the buckets are for exact increments in positions
752
+ max_exact = num_buckets // 2
753
+ is_small = relative_position < max_exact
754
+
755
+ # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
756
+ relative_position_if_large = max_exact + (
757
+ torch.log(relative_position.float() / max_exact)
758
+ / math.log(max_distance / max_exact)
759
+ * (num_buckets - max_exact)
760
+ ).to(torch.long)
761
+ relative_position_if_large = torch.min(
762
+ relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1)
763
+ )
764
+
765
+ relative_buckets += torch.where(is_small, relative_position, relative_position_if_large)
766
+ return relative_buckets
767
+
768
+ # Adapted from transformers.models.t5.modeling_t5.T5Attention.compute_bias
769
+ def compute_bias(self, query_length, key_length, device=None):
770
+ """Compute binned relative position bias"""
771
+ if device is None:
772
+ device = self.relative_attention_bias.weight.device
773
+ context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None]
774
+ memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :]
775
+ relative_position = memory_position - context_position # shape (query_length, key_length)
776
+ relative_position_bucket = self._relative_position_bucket(
777
+ relative_position, # shape (query_length, key_length)
778
+ bidirectional=False,
779
+ num_buckets=self.relative_attention_num_buckets,
780
+ max_distance=self.relative_attention_max_distance,
781
+ )
782
+ values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads)
783
+ values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length)
784
+ return values
785
+
786
+ def forward(
787
+ self,
788
+ hidden_states,
789
+ mask=None,
790
+ key_value_states=None,
791
+ position_bias=None,
792
+ past_key_value=None,
793
+ layer_head_mask=None,
794
+ query_length=None,
795
+ use_cache=False,
796
+ output_attentions=False,
797
+ ):
798
+ """
799
+ Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states).
800
+ """
801
+ # Input is (batch_size, seq_length, dim)
802
+ # Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length)
803
+ # past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head)
804
+ batch_size, seq_length = hidden_states.shape[:2]
805
+
806
+ real_seq_length = seq_length
807
+
808
+ if past_key_value is not None:
809
+ if len(past_key_value) != 2:
810
+ raise ValueError(
811
+ f"past_key_value should have 2 past states: keys and values. Got { len(past_key_value)} past states"
812
+ )
813
+ real_seq_length += past_key_value[0].shape[2] if query_length is None else query_length
814
+
815
+ key_length = real_seq_length if key_value_states is None else key_value_states.shape[1]
816
+
817
+ def to_projection_shape(states):
818
+ """projection"""
819
+ return states.contiguous().view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
820
+
821
+ def project(hidden_states, proj_layer, key_value_states, past_key_value):
822
+ """projects hidden states correctly to key/query states"""
823
+ if key_value_states is None:
824
+ # self-attn
825
+ # (batch_size, n_heads, seq_length, dim_per_head)
826
+ hidden_states = to_projection_shape(proj_layer(hidden_states))
827
+ elif past_key_value is None:
828
+ # cross-attn
829
+ # (batch_size, n_heads, seq_length, dim_per_head)
830
+ hidden_states = to_projection_shape(proj_layer(key_value_states))
831
+
832
+ if past_key_value is not None:
833
+ if key_value_states is None:
834
+ # self-attn
835
+ # (batch_size, n_heads, key_length, dim_per_head)
836
+ hidden_states = torch.cat([past_key_value, hidden_states], dim=2)
837
+ elif past_key_value.shape[2] != key_value_states.shape[1]:
838
+ # checking that the `sequence_length` of the `past_key_value` is the same as
839
+ # the provided `key_value_states` to support prefix tuning
840
+ # cross-attn
841
+ # (batch_size, n_heads, seq_length, dim_per_head)
842
+ hidden_states = to_projection_shape(proj_layer(key_value_states))
843
+ else:
844
+ # cross-attn
845
+ hidden_states = past_key_value
846
+ return hidden_states
847
+
848
+ # get query states
849
+ # (batch_size, n_heads, seq_length, dim_per_head)
850
+ query_states = to_projection_shape(self.query(hidden_states))
851
+
852
+ # get key/value states
853
+ key_states = project(
854
+ hidden_states, self.key, key_value_states, past_key_value[0] if past_key_value is not None else None
855
+ )
856
+ value_states = project(
857
+ hidden_states, self.value, key_value_states, past_key_value[1] if past_key_value is not None else None
858
+ )
859
+
860
+ # compute scores
861
+ scores = torch.matmul(
862
+ query_states, key_states.transpose(3, 2)
863
+ ) # equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9
864
+
865
+ if position_bias is None:
866
+ if not self.has_relative_attention_bias:
867
+ position_bias = torch.zeros(
868
+ (1, self.n_heads, real_seq_length, key_length), device=scores.device, dtype=scores.dtype
869
+ )
870
+ if self.gradient_checkpointing and self.training:
871
+ position_bias.requires_grad = True
872
+ else:
873
+ position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device)
874
+
875
+ # if key and values are already calculated
876
+ # we want only the last query position bias
877
+ if past_key_value is not None:
878
+ position_bias = position_bias[:, :, -hidden_states.size(1) :, :]
879
+
880
+ if mask is not None:
881
+ position_bias = position_bias + mask # (batch_size, n_heads, seq_length, key_length)
882
+
883
+ if self.pruned_heads:
884
+ mask = torch.ones(position_bias.shape[1])
885
+ mask[list(self.pruned_heads)] = 0
886
+ position_bias_masked = position_bias[:, mask.bool()]
887
+ else:
888
+ position_bias_masked = position_bias
889
+
890
+ scores += position_bias_masked
891
+ # (batch_size, n_heads, seq_length, key_length)
892
+ attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores)
893
+
894
+ # (batch_size, n_heads, seq_length, key_length)
895
+ attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
896
+
897
+ # Mask heads if we want to
898
+ if layer_head_mask is not None:
899
+ attn_weights = attn_weights * layer_head_mask
900
+
901
+ attn_output = torch.matmul(attn_weights, value_states)
902
+ # (batch_size, seq_length, dim)
903
+ attn_output = attn_output.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim)
904
+
905
+ attn_output = self.output(attn_output)
906
+
907
+ present_key_value_state = (key_states, value_states) if use_cache else None
908
+ outputs = (attn_output,) + (present_key_value_state,) + (position_bias,)
909
+
910
+ if output_attentions:
911
+ outputs = outputs + (attn_weights,)
912
+ return outputs
913
+
914
+
915
+ # Copied from transformers.models.t5.modeling_t5.T5LayerSelfAttention with T5LayerNorm->Pix2StructLayerNorm,T5Attention->Pix2StructTextAttention,self.SelfAttention->self.attention,config.d_model->config.hidden_size
916
+ class Pix2StructTextLayerSelfAttention(nn.Module):
917
+ def __init__(self, config, has_relative_attention_bias=False):
918
+ super().__init__()
919
+ self.attention = Pix2StructTextAttention(config, has_relative_attention_bias=has_relative_attention_bias)
920
+ self.layer_norm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_epsilon)
921
+ self.dropout = nn.Dropout(config.dropout_rate)
922
+
923
+ def forward(
924
+ self,
925
+ hidden_states,
926
+ attention_mask=None,
927
+ position_bias=None,
928
+ layer_head_mask=None,
929
+ past_key_value=None,
930
+ use_cache=False,
931
+ output_attentions=False,
932
+ ):
933
+ normed_hidden_states = self.layer_norm(hidden_states)
934
+ attention_output = self.attention(
935
+ normed_hidden_states,
936
+ mask=attention_mask,
937
+ position_bias=position_bias,
938
+ layer_head_mask=layer_head_mask,
939
+ past_key_value=past_key_value,
940
+ use_cache=use_cache,
941
+ output_attentions=output_attentions,
942
+ )
943
+ hidden_states = hidden_states + self.dropout(attention_output[0])
944
+ outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them
945
+ return outputs
946
+
947
+
948
+ # Copied from transformers.models.t5.modeling_t5.T5LayerCrossAttention with T5LayerNorm->Pix2StructLayerNorm,T5Attention->Pix2StructTextAttention,self.EncDecAttention->self.attention,config.d_model->config.hidden_size
949
+ class Pix2StructTextLayerCrossAttention(nn.Module):
950
+ def __init__(self, config):
951
+ super().__init__()
952
+ self.attention = Pix2StructTextAttention(config, has_relative_attention_bias=False)
953
+ self.layer_norm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_epsilon)
954
+ self.dropout = nn.Dropout(config.dropout_rate)
955
+
956
+ def forward(
957
+ self,
958
+ hidden_states,
959
+ key_value_states,
960
+ attention_mask=None,
961
+ position_bias=None,
962
+ layer_head_mask=None,
963
+ past_key_value=None,
964
+ use_cache=False,
965
+ query_length=None,
966
+ output_attentions=False,
967
+ ):
968
+ normed_hidden_states = self.layer_norm(hidden_states)
969
+ attention_output = self.attention(
970
+ normed_hidden_states,
971
+ mask=attention_mask,
972
+ key_value_states=key_value_states,
973
+ position_bias=position_bias,
974
+ layer_head_mask=layer_head_mask,
975
+ past_key_value=past_key_value,
976
+ use_cache=use_cache,
977
+ query_length=query_length,
978
+ output_attentions=output_attentions,
979
+ )
980
+ layer_output = hidden_states + self.dropout(attention_output[0])
981
+ outputs = (layer_output,) + attention_output[1:] # add attentions if we output them
982
+ return outputs
983
+
984
+
985
+ class Pix2StructTextBlock(nn.Module):
986
+ def __init__(self, config, has_relative_attention_bias=False):
987
+ super().__init__()
988
+
989
+ self.self_attention = Pix2StructTextLayerSelfAttention(
990
+ config, has_relative_attention_bias=has_relative_attention_bias
991
+ )
992
+
993
+ self.encoder_decoder_attention = Pix2StructTextLayerCrossAttention(config)
994
+
995
+ self.mlp = Pix2StructTextLayerFF(config)
996
+
997
+ def forward(
998
+ self,
999
+ hidden_states,
1000
+ attention_mask=None,
1001
+ position_bias=None,
1002
+ encoder_hidden_states=None,
1003
+ encoder_attention_mask=None,
1004
+ encoder_decoder_position_bias=None,
1005
+ layer_head_mask=None,
1006
+ cross_attn_layer_head_mask=None,
1007
+ past_key_value=None,
1008
+ use_cache=False,
1009
+ output_attentions=False,
1010
+ return_dict=True,
1011
+ ):
1012
+ if past_key_value is not None:
1013
+ expected_num_past_key_values = 2 if encoder_hidden_states is None else 4
1014
+
1015
+ if len(past_key_value) != expected_num_past_key_values:
1016
+ raise ValueError(
1017
+ f"There should be {expected_num_past_key_values} past states. "
1018
+ f"{'2 (past / key) for cross attention. ' if expected_num_past_key_values == 4 else ''}"
1019
+ f"Got {len(past_key_value)} past key / value states"
1020
+ )
1021
+
1022
+ self_attn_past_key_value = past_key_value[:2]
1023
+ cross_attn_past_key_value = past_key_value[2:]
1024
+ else:
1025
+ self_attn_past_key_value, cross_attn_past_key_value = None, None
1026
+
1027
+ self_attention_outputs = self.self_attention(
1028
+ hidden_states,
1029
+ attention_mask=attention_mask,
1030
+ position_bias=position_bias,
1031
+ layer_head_mask=layer_head_mask,
1032
+ past_key_value=self_attn_past_key_value,
1033
+ use_cache=use_cache,
1034
+ output_attentions=output_attentions,
1035
+ )
1036
+ hidden_states, present_key_value_state = self_attention_outputs[:2]
1037
+ attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs and relative position weights
1038
+
1039
+ # clamp inf values to enable fp16 training
1040
+ if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
1041
+ clamp_value = torch.finfo(hidden_states.dtype).max - 1000
1042
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
1043
+
1044
+ do_cross_attention = encoder_hidden_states is not None
1045
+ if do_cross_attention:
1046
+ # the actual query length is unknown for cross attention
1047
+ # if using past key value states. Need to inject it here
1048
+ if present_key_value_state is not None:
1049
+ query_length = present_key_value_state[0].shape[2]
1050
+ else:
1051
+ query_length = None
1052
+
1053
+ cross_attention_outputs = self.encoder_decoder_attention(
1054
+ hidden_states,
1055
+ key_value_states=encoder_hidden_states,
1056
+ attention_mask=encoder_attention_mask,
1057
+ position_bias=encoder_decoder_position_bias,
1058
+ layer_head_mask=cross_attn_layer_head_mask,
1059
+ past_key_value=cross_attn_past_key_value,
1060
+ query_length=query_length,
1061
+ use_cache=use_cache,
1062
+ output_attentions=output_attentions,
1063
+ )
1064
+ hidden_states = cross_attention_outputs[0]
1065
+
1066
+ # clamp inf values to enable fp16 training
1067
+ if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
1068
+ clamp_value = torch.finfo(hidden_states.dtype).max - 1000
1069
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
1070
+
1071
+ # Combine self attn and cross attn key value states
1072
+ if present_key_value_state is not None:
1073
+ present_key_value_state = present_key_value_state + cross_attention_outputs[1]
1074
+
1075
+ # Keep cross-attention outputs and relative position weights
1076
+ attention_outputs = attention_outputs + cross_attention_outputs[2:]
1077
+
1078
+ # Apply Feed Forward layer
1079
+ hidden_states = self.mlp(hidden_states)
1080
+
1081
+ # clamp inf values to enable fp16 training
1082
+ if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
1083
+ clamp_value = torch.finfo(hidden_states.dtype).max - 1000
1084
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
1085
+
1086
+ outputs = (hidden_states,)
1087
+
1088
+ if use_cache:
1089
+ outputs = outputs + (present_key_value_state,) + attention_outputs
1090
+ else:
1091
+ outputs = outputs + attention_outputs
1092
+
1093
+ return outputs
1094
+
1095
+
1096
+ PIX2STRUCT_START_DOCSTRING = r"""
1097
+
1098
+ The Pix2Struct model was proposed in [Pix2Struct: Screenshot Parsing as Pretraining for Visual Language
1099
+ Understanding](https://arxiv.org/abs/2210.03347) by Kenton Lee, Mandar Joshi, Iulia Turc, Hexiang Hu, Fangyu Liu,
1100
+ Julian Eisenschlos, Urvashi Khandelwal, Peter Shaw, Ming-Wei Chang, Kristina Toutanova. It's an encoder decoder
1101
+ transformer pre-trained in a image-to-text setting.
1102
+
1103
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
1104
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
1105
+ etc.)
1106
+
1107
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
1108
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
1109
+ and behavior.
1110
+
1111
+ Parameters:
1112
+ config (Union[`Pix2StructConfig`, `Pix2StructTextConfig`]):
1113
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
1114
+ load the weights associated with the model, only the configuration. Check out the
1115
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
1116
+ """
1117
+
1118
+ PIX2STRUCT_TEXT_INPUTS_DOCSTRING = r"""
1119
+ Args:
1120
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
1121
+ Indices of input sequence tokens in the vocabulary. Pix2StructText is a model with relative position
1122
+ embeddings so you should be able to pad the inputs on both the right and the left.
1123
+
1124
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1125
+ [`PreTrainedTokenizer.__call__`] for detail.
1126
+
1127
+ [What are input IDs?](../glossary#input-ids)
1128
+
1129
+ To know more on how to prepare `input_ids` for pretraining take a look a [Pix2StructText
1130
+ Training](./t5#training).
1131
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
1132
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1133
+
1134
+ - 1 for tokens that are **not masked**,
1135
+ - 0 for tokens that are **masked**.
1136
+
1137
+ [What are attention masks?](../glossary#attention-mask)
1138
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
1139
+ Indices of decoder input sequence tokens in the vocabulary.
1140
+
1141
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1142
+ [`PreTrainedTokenizer.__call__`] for details.
1143
+
1144
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
1145
+
1146
+ Pix2StructText uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
1147
+ `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
1148
+ `past_key_values`).
1149
+
1150
+ To know more on how to prepare `decoder_input_ids` for pretraining take a look at [Pix2StructText
1151
+ Training](./t5#training).
1152
+ decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
1153
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
1154
+ be used by default.
1155
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
1156
+ Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in `[0,
1157
+ 1]`:
1158
+
1159
+ - 1 indicates the head is **not masked**,
1160
+ - 0 indicates the head is **masked**.
1161
+
1162
+ decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
1163
+ Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0,
1164
+ 1]`:
1165
+
1166
+ - 1 indicates the head is **not masked**,
1167
+ - 0 indicates the head is **masked**.
1168
+
1169
+ cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
1170
+ Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in
1171
+ `[0, 1]`:
1172
+
1173
+ - 1 indicates the head is **not masked**,
1174
+ - 0 indicates the head is **masked**.
1175
+
1176
+ encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
1177
+ Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*)
1178
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at
1179
+ the output of the last layer of the encoder. Used in the cross-attention of the decoder.
1180
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
1181
+ Contains precomputed key and value hidden states of the attention layers. Can be used to speed up decoding.
1182
+
1183
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1184
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1185
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1186
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1187
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1188
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
1189
+ model's internal embedding lookup matrix.
1190
+ decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
1191
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
1192
+ representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
1193
+ input (see `past_key_values`). This is useful if you want more control over how to convert
1194
+ `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
1195
+
1196
+ If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
1197
+ of `inputs_embeds`.
1198
+
1199
+ use_cache (`bool`, *optional*):
1200
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1201
+ `past_key_values`).
1202
+
1203
+ output_attentions (`bool`, *optional*):
1204
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1205
+ tensors for more detail.
1206
+ output_hidden_states (`bool`, *optional*):
1207
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1208
+ more detail.
1209
+ return_dict (`bool`, *optional*):
1210
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1211
+ """
1212
+
1213
+ PIX2STRUCT_INPUTS_DOCSTRING = r"""
1214
+ Args:
1215
+ flattened_patches (`torch.FloatTensor` of shape `(batch_size, seq_length, hidden_size)`):
1216
+ Flattened pixel patches. the `hidden_size` is obtained by the following formula: `hidden_size` =
1217
+ `num_channels` * `patch_size` * `patch_size`
1218
+
1219
+ The process of flattening the pixel patches is done by `Pix2StructProcessor`.
1220
+
1221
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
1222
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1223
+
1224
+ - 1 for tokens that are **not masked**,
1225
+ - 0 for tokens that are **masked**.
1226
+
1227
+ [What are attention masks?](../glossary#attention-mask)
1228
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
1229
+ Indices of decoder input sequence tokens in the vocabulary.
1230
+
1231
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1232
+ [`PreTrainedTokenizer.__call__`] for details.
1233
+
1234
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
1235
+
1236
+ Pix2StructText uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
1237
+ `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
1238
+ `past_key_values`).
1239
+
1240
+ To know more on how to prepare `decoder_input_ids` for pretraining take a look at [Pix2StructText
1241
+ Training](./t5#training).
1242
+ decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
1243
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
1244
+ be used by default.
1245
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
1246
+ Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in `[0,
1247
+ 1]`:
1248
+
1249
+ - 1 indicates the head is **not masked**,
1250
+ - 0 indicates the head is **masked**.
1251
+
1252
+ decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
1253
+ Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0,
1254
+ 1]`:
1255
+
1256
+ - 1 indicates the head is **not masked**,
1257
+ - 0 indicates the head is **masked**.
1258
+
1259
+ cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
1260
+ Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in
1261
+ `[0, 1]`:
1262
+
1263
+ - 1 indicates the head is **not masked**,
1264
+ - 0 indicates the head is **masked**.
1265
+
1266
+ encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
1267
+ Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*)
1268
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at
1269
+ the output of the last layer of the encoder. Used in the cross-attention of the decoder.
1270
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
1271
+ Contains precomputed key and value hidden states of the attention layers. Can be used to speed up decoding.
1272
+
1273
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1274
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1275
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1276
+ decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
1277
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
1278
+ representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
1279
+ input (see `past_key_values`). This is useful if you want more control over how to convert
1280
+ `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
1281
+
1282
+ If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
1283
+ of `inputs_embeds`.
1284
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1285
+ Labels for computing the masked language modeling loss for the decoder.
1286
+
1287
+ use_cache (`bool`, *optional*):
1288
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1289
+ `past_key_values`).
1290
+
1291
+ output_attentions (`bool`, *optional*):
1292
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1293
+ tensors for more detail.
1294
+ output_hidden_states (`bool`, *optional*):
1295
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1296
+ more detail.
1297
+ return_dict (`bool`, *optional*):
1298
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1299
+ """
1300
+
1301
+
1302
+ @add_start_docstrings(
1303
+ "The standalone text decoder of Pix2Struct",
1304
+ PIX2STRUCT_START_DOCSTRING,
1305
+ )
1306
+ class Pix2StructTextModel(Pix2StructPreTrainedModel):
1307
+ config_class = Pix2StructTextConfig
1308
+ _no_split_modules = ["Pix2StructTextBlock"]
1309
+ _tied_weights_keys = ["lm_head.weight"]
1310
+ supports_gradient_checkpointing = True
1311
+
1312
+ def __init__(self, config):
1313
+ super().__init__(config)
1314
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size)
1315
+
1316
+ self.layer = nn.ModuleList(
1317
+ [Pix2StructTextBlock(config, has_relative_attention_bias=bool(i == 0)) for i in range(config.num_layers)]
1318
+ )
1319
+ self.final_layer_norm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_epsilon)
1320
+ self.dropout = nn.Dropout(config.dropout_rate)
1321
+
1322
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1323
+
1324
+ # Initialize weights and apply final processing
1325
+ self.post_init()
1326
+ self.gradient_checkpointing = False
1327
+
1328
+ # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._reorder_cache
1329
+ def _reorder_cache(self, past_key_values, beam_idx):
1330
+ # if decoder past is not included in output
1331
+ # speedy decoding is disabled and no need to reorder
1332
+ if past_key_values is None:
1333
+ logger.warning("You might want to consider setting `use_cache=True` to speed up decoding")
1334
+ return past_key_values
1335
+
1336
+ reordered_decoder_past = ()
1337
+ for layer_past_states in past_key_values:
1338
+ # get the correct batch idx from layer past batch dim
1339
+ # batch dim of `past` is at 2nd position
1340
+ reordered_layer_past_states = ()
1341
+ for layer_past_state in layer_past_states:
1342
+ # need to set correct `past` for each of the four key / value states
1343
+ reordered_layer_past_states = reordered_layer_past_states + (
1344
+ layer_past_state.index_select(0, beam_idx.to(layer_past_state.device)),
1345
+ )
1346
+
1347
+ if reordered_layer_past_states[0].shape != layer_past_states[0].shape:
1348
+ raise ValueError(
1349
+ f"reordered_layer_past_states[0] shape {reordered_layer_past_states[0].shape} and layer_past_states[0] shape {layer_past_states[0].shape} mismatched"
1350
+ )
1351
+ if len(reordered_layer_past_states) != len(layer_past_states):
1352
+ raise ValueError(
1353
+ f"length of reordered_layer_past_states {len(reordered_layer_past_states)} and length of layer_past_states {len(layer_past_states)} mismatched"
1354
+ )
1355
+
1356
+ reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,)
1357
+ return reordered_decoder_past
1358
+
1359
+ def get_input_embeddings(self):
1360
+ return self.embed_tokens
1361
+
1362
+ def set_input_embeddings(self, new_embeddings):
1363
+ self.embed_tokens = new_embeddings
1364
+
1365
+ def get_output_embeddings(self):
1366
+ return self.lm_head
1367
+
1368
+ def set_output_embeddings(self, new_embeddings):
1369
+ self.lm_head = new_embeddings
1370
+
1371
+ @add_start_docstrings_to_model_forward(PIX2STRUCT_TEXT_INPUTS_DOCSTRING)
1372
+ @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
1373
+ def forward(
1374
+ self,
1375
+ input_ids: Optional[torch.LongTensor] = None,
1376
+ attention_mask: Optional[torch.FloatTensor] = None,
1377
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
1378
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
1379
+ inputs_embeds: Optional[torch.LongTensor] = None,
1380
+ head_mask: Optional[torch.FloatTensor] = None,
1381
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
1382
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
1383
+ use_cache: Optional[bool] = None,
1384
+ output_attentions: Optional[bool] = None,
1385
+ output_hidden_states: Optional[bool] = None,
1386
+ labels: Optional[torch.LongTensor] = None,
1387
+ return_dict: Optional[bool] = None,
1388
+ **kwargs,
1389
+ ) -> Union[Tuple[torch.FloatTensor, ...], CausalLMOutputWithCrossAttentions]:
1390
+ r"""
1391
+ Returns:
1392
+
1393
+ Example:
1394
+
1395
+ ```python
1396
+ >>> from transformers import AutoProcessor, Pix2StructTextModel
1397
+
1398
+ >>> processor = AutoProcessor.from_pretrained("google/pix2struct-textcaps-base")
1399
+ >>> model = Pix2StructTextModel.from_pretrained("google/pix2struct-textcaps-base")
1400
+
1401
+ >>> inputs = processor(text="Hello, my dog is cute", return_tensors="pt")
1402
+ >>> outputs = model(**inputs)
1403
+ >>> loss = outputs.loss
1404
+ ```
1405
+ """
1406
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1407
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1408
+ output_hidden_states = (
1409
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1410
+ )
1411
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1412
+
1413
+ if input_ids is not None and inputs_embeds is not None:
1414
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
1415
+ elif input_ids is not None:
1416
+ input_shape = input_ids.size()
1417
+ input_ids = input_ids.view(-1, input_shape[-1])
1418
+ elif inputs_embeds is not None:
1419
+ input_shape = inputs_embeds.size()[:-1]
1420
+ else:
1421
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
1422
+
1423
+ if inputs_embeds is None:
1424
+ assert self.embed_tokens is not None, "You have to initialize the model with valid token embeddings"
1425
+ inputs_embeds = self.embed_tokens(input_ids)
1426
+
1427
+ batch_size, seq_length = input_shape
1428
+
1429
+ # required mask seq length can be calculated via length of past
1430
+ mask_seq_length = past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else seq_length
1431
+
1432
+ if attention_mask is None:
1433
+ attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device)
1434
+ if encoder_attention_mask is None and encoder_hidden_states is not None:
1435
+ encoder_seq_length = encoder_hidden_states.shape[1]
1436
+ encoder_attention_mask = torch.ones(
1437
+ batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long
1438
+ )
1439
+
1440
+ # initialize past_key_values with `None` if past does not exist
1441
+ if past_key_values is None:
1442
+ past_key_values = [None] * len(self.layer)
1443
+
1444
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
1445
+ # ourselves in which case we just need to make it broadcastable to all heads.
1446
+ extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape)
1447
+
1448
+ # If a 2D or 3D attention mask is provided for the cross-attention
1449
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
1450
+ if encoder_hidden_states is not None:
1451
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
1452
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
1453
+ if encoder_attention_mask is None:
1454
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device)
1455
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
1456
+ else:
1457
+ encoder_extended_attention_mask = None
1458
+
1459
+ # Prepare head mask if needed
1460
+ head_mask = self.get_head_mask(head_mask, self.config.num_layers)
1461
+ cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers)
1462
+ present_key_value_states = () if use_cache else None
1463
+ all_hidden_states = () if output_hidden_states else None
1464
+ all_attentions = () if output_attentions else None
1465
+ all_cross_attentions = () if (output_attentions) else None
1466
+ position_bias = None
1467
+ encoder_decoder_position_bias = None
1468
+
1469
+ hidden_states = self.dropout(inputs_embeds)
1470
+
1471
+ for i, (layer_module, past_key_value) in enumerate(zip(self.layer, past_key_values)):
1472
+ layer_head_mask = head_mask[i]
1473
+ cross_attn_layer_head_mask = cross_attn_head_mask[i]
1474
+ if output_hidden_states:
1475
+ all_hidden_states = all_hidden_states + (hidden_states,)
1476
+
1477
+ if self.gradient_checkpointing and self.training:
1478
+ if use_cache:
1479
+ logger.warning(
1480
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
1481
+ )
1482
+ use_cache = False
1483
+ layer_outputs = self._gradient_checkpointing_func(
1484
+ layer_module.forward,
1485
+ hidden_states,
1486
+ extended_attention_mask,
1487
+ position_bias,
1488
+ encoder_hidden_states,
1489
+ encoder_extended_attention_mask,
1490
+ encoder_decoder_position_bias,
1491
+ layer_head_mask,
1492
+ cross_attn_layer_head_mask,
1493
+ None, # past_key_value is always None with gradient checkpointing
1494
+ use_cache,
1495
+ output_attentions,
1496
+ )
1497
+ else:
1498
+ layer_outputs = layer_module(
1499
+ hidden_states,
1500
+ attention_mask=extended_attention_mask,
1501
+ position_bias=position_bias,
1502
+ encoder_hidden_states=encoder_hidden_states,
1503
+ encoder_attention_mask=encoder_extended_attention_mask,
1504
+ encoder_decoder_position_bias=encoder_decoder_position_bias,
1505
+ layer_head_mask=layer_head_mask,
1506
+ cross_attn_layer_head_mask=cross_attn_layer_head_mask,
1507
+ past_key_value=past_key_value,
1508
+ use_cache=use_cache,
1509
+ output_attentions=output_attentions,
1510
+ )
1511
+
1512
+ # layer_outputs is a tuple with:
1513
+ # hidden-states, key-value-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights)
1514
+ if use_cache is False:
1515
+ layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:]
1516
+
1517
+ hidden_states, present_key_value_state = layer_outputs[:2]
1518
+
1519
+ # We share the position biases between the layers - the first layer store them
1520
+ # layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights),
1521
+ # (cross-attention position bias), (cross-attention weights)
1522
+ position_bias = layer_outputs[2]
1523
+ if encoder_hidden_states is not None:
1524
+ encoder_decoder_position_bias = layer_outputs[4 if output_attentions else 3]
1525
+ # append next layer key value states
1526
+ if use_cache:
1527
+ present_key_value_states = present_key_value_states + (present_key_value_state,)
1528
+
1529
+ if output_attentions:
1530
+ all_attentions = all_attentions + (layer_outputs[3],)
1531
+ if encoder_hidden_states is not None:
1532
+ all_cross_attentions = all_cross_attentions + (layer_outputs[5],)
1533
+
1534
+ hidden_states = self.final_layer_norm(hidden_states)
1535
+ hidden_states = self.dropout(hidden_states)
1536
+
1537
+ logits = self.lm_head(hidden_states)
1538
+
1539
+ # Add last layer
1540
+ if output_hidden_states:
1541
+ all_hidden_states = all_hidden_states + (hidden_states,)
1542
+
1543
+ loss = None
1544
+ if labels is not None:
1545
+ # move labels to correct device to enable model parallelism
1546
+ labels = labels.to(logits.device)
1547
+ loss_fct = nn.CrossEntropyLoss(ignore_index=-100, reduction="mean")
1548
+
1549
+ loss = loss_fct(logits.contiguous().view(-1, logits.size(-1)), labels.contiguous().view(-1))
1550
+
1551
+ if not return_dict:
1552
+ return tuple(
1553
+ v
1554
+ for v in [
1555
+ loss,
1556
+ logits,
1557
+ present_key_value_states,
1558
+ all_hidden_states,
1559
+ all_attentions,
1560
+ all_cross_attentions,
1561
+ ]
1562
+ if v is not None
1563
+ )
1564
+ return CausalLMOutputWithCrossAttentions(
1565
+ loss=loss,
1566
+ logits=logits,
1567
+ past_key_values=present_key_value_states,
1568
+ hidden_states=all_hidden_states,
1569
+ attentions=all_attentions,
1570
+ cross_attentions=all_cross_attentions,
1571
+ )
1572
+
1573
+
1574
+ @add_start_docstrings(
1575
+ "A conditional generation model with a language modeling head. Can be used for sequence generation tasks.",
1576
+ PIX2STRUCT_START_DOCSTRING,
1577
+ )
1578
+ class Pix2StructForConditionalGeneration(Pix2StructPreTrainedModel):
1579
+ config_class = Pix2StructConfig
1580
+ main_input_name = "flattened_patches"
1581
+ _tied_weights_keys = ["decoder.lm_head.weight"]
1582
+
1583
+ def __init__(self, config: Pix2StructConfig):
1584
+ super().__init__(config)
1585
+
1586
+ self.encoder = Pix2StructVisionModel(config.vision_config)
1587
+ self.decoder = Pix2StructTextModel(config.text_config)
1588
+
1589
+ self.is_vqa = config.is_vqa
1590
+
1591
+ # Initialize weights and apply final processing
1592
+ self.post_init()
1593
+
1594
+ def get_input_embeddings(self):
1595
+ return self.decoder.get_input_embeddings()
1596
+
1597
+ def set_input_embeddings(self, new_embeddings):
1598
+ self.decoder.set_input_embeddings(new_embeddings)
1599
+
1600
+ def get_output_embeddings(self) -> nn.Module:
1601
+ return self.decoder.get_output_embeddings()
1602
+
1603
+ def set_output_embeddings(self, new_embeddings):
1604
+ self.decoder.set_output_embeddings(new_embeddings)
1605
+
1606
+ def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> nn.Embedding:
1607
+ model_embeds = self.decoder.resize_token_embeddings(new_num_tokens)
1608
+
1609
+ # update vocab size
1610
+ self.config.text_config.vocab_size = new_num_tokens
1611
+
1612
+ return model_embeds
1613
+
1614
+ def get_decoder(self):
1615
+ return self.decoder
1616
+
1617
+ def get_encoder(self):
1618
+ return self.encoder
1619
+
1620
+ @add_start_docstrings_to_model_forward(PIX2STRUCT_INPUTS_DOCSTRING)
1621
+ @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)
1622
+ def forward(
1623
+ self,
1624
+ flattened_patches: Optional[torch.FloatTensor] = None,
1625
+ attention_mask: Optional[torch.FloatTensor] = None,
1626
+ decoder_input_ids: Optional[torch.LongTensor] = None,
1627
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
1628
+ head_mask: Optional[torch.FloatTensor] = None,
1629
+ decoder_head_mask: Optional[torch.FloatTensor] = None,
1630
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
1631
+ encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
1632
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
1633
+ labels: Optional[torch.LongTensor] = None,
1634
+ decoder_inputs_embeds: Optional[torch.Tensor] = None,
1635
+ use_cache: Optional[bool] = None,
1636
+ output_attentions: Optional[bool] = None,
1637
+ output_hidden_states: Optional[bool] = None,
1638
+ return_dict: Optional[bool] = None,
1639
+ ) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]:
1640
+ r"""
1641
+ Returns:
1642
+
1643
+ Example:
1644
+
1645
+ Inference:
1646
+
1647
+ ```python
1648
+ >>> from PIL import Image
1649
+ >>> import requests
1650
+ >>> from transformers import AutoProcessor, Pix2StructForConditionalGeneration
1651
+
1652
+ >>> processor = AutoProcessor.from_pretrained("google/pix2struct-textcaps-base")
1653
+ >>> model = Pix2StructForConditionalGeneration.from_pretrained("google/pix2struct-textcaps-base")
1654
+
1655
+ >>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
1656
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1657
+
1658
+ >>> inputs = processor(images=image, return_tensors="pt")
1659
+
1660
+ >>> # autoregressive generation
1661
+ >>> generated_ids = model.generate(**inputs, max_new_tokens=50)
1662
+ >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
1663
+ >>> print(generated_text)
1664
+ A stop sign is on a street corner.
1665
+
1666
+ >>> # conditional generation
1667
+ >>> text = "A picture of"
1668
+ >>> inputs = processor(text=text, images=image, return_tensors="pt", add_special_tokens=False)
1669
+
1670
+ >>> generated_ids = model.generate(**inputs, max_new_tokens=50)
1671
+ >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
1672
+ >>> print(generated_text)
1673
+ A picture of a stop sign with a red stop sign
1674
+ ```
1675
+
1676
+ Training:
1677
+
1678
+ ```python
1679
+ >>> from PIL import Image
1680
+ >>> import requests
1681
+ >>> from transformers import AutoProcessor, Pix2StructForConditionalGeneration
1682
+
1683
+ >>> processor = AutoProcessor.from_pretrained("google/pix2struct-base")
1684
+ >>> model = Pix2StructForConditionalGeneration.from_pretrained("google/pix2struct-base")
1685
+
1686
+ >>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
1687
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1688
+ >>> text = "A stop sign is on the street corner."
1689
+
1690
+ >>> inputs = processor(images=image, return_tensors="pt")
1691
+ >>> labels = processor(text=text, return_tensors="pt").input_ids
1692
+
1693
+ >>> # forward pass
1694
+ >>> outputs = model(**inputs, labels=labels)
1695
+ >>> loss = outputs.loss
1696
+ >>> print(f"{loss.item():.5f}")
1697
+ 5.94282
1698
+ ```"""
1699
+ use_cache = use_cache if use_cache is not None else self.config.text_config.use_cache
1700
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1701
+
1702
+ # Encode if needed (training, first prediction pass)
1703
+ if encoder_outputs is None:
1704
+ encoder_outputs = self.encoder(
1705
+ flattened_patches=flattened_patches,
1706
+ attention_mask=attention_mask,
1707
+ head_mask=head_mask,
1708
+ output_attentions=output_attentions,
1709
+ output_hidden_states=output_hidden_states,
1710
+ return_dict=return_dict,
1711
+ )
1712
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
1713
+ encoder_outputs = BaseModelOutput(
1714
+ last_hidden_state=encoder_outputs[0],
1715
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
1716
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
1717
+ )
1718
+
1719
+ hidden_states = encoder_outputs[0]
1720
+
1721
+ if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:
1722
+ # get decoder inputs from shifting lm labels to the right
1723
+ decoder_input_ids = self._shift_right(labels)
1724
+ decoder_attention_mask = (
1725
+ decoder_attention_mask
1726
+ if decoder_attention_mask is not None
1727
+ else decoder_input_ids.ne(self.config.pad_token_id).float()
1728
+ )
1729
+ # Always attend to the first token
1730
+ decoder_attention_mask[:, 0] = 1
1731
+
1732
+ # Decode
1733
+ decoder_outputs = self.decoder(
1734
+ input_ids=decoder_input_ids,
1735
+ attention_mask=decoder_attention_mask,
1736
+ inputs_embeds=decoder_inputs_embeds,
1737
+ past_key_values=past_key_values,
1738
+ encoder_hidden_states=hidden_states,
1739
+ encoder_attention_mask=attention_mask,
1740
+ head_mask=decoder_head_mask,
1741
+ cross_attn_head_mask=cross_attn_head_mask,
1742
+ use_cache=use_cache,
1743
+ output_attentions=output_attentions,
1744
+ output_hidden_states=output_hidden_states,
1745
+ labels=labels,
1746
+ return_dict=return_dict,
1747
+ )
1748
+
1749
+ if not return_dict:
1750
+ return decoder_outputs + encoder_outputs
1751
+
1752
+ return Seq2SeqLMOutput(
1753
+ loss=decoder_outputs.loss,
1754
+ logits=decoder_outputs.logits,
1755
+ past_key_values=decoder_outputs.past_key_values,
1756
+ decoder_hidden_states=decoder_outputs.hidden_states,
1757
+ decoder_attentions=decoder_outputs.attentions,
1758
+ cross_attentions=decoder_outputs.cross_attentions,
1759
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
1760
+ encoder_hidden_states=encoder_outputs.hidden_states,
1761
+ encoder_attentions=encoder_outputs.attentions,
1762
+ )
1763
+
1764
+ def prepare_inputs_for_generation(
1765
+ self,
1766
+ input_ids,
1767
+ flattened_patches: Optional[torch.FloatTensor] = None,
1768
+ attention_mask: Optional[torch.FloatTensor] = None,
1769
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
1770
+ past_key_values=None,
1771
+ head_mask=None,
1772
+ decoder_head_mask=None,
1773
+ cross_attn_head_mask=None,
1774
+ use_cache=None,
1775
+ encoder_outputs=None,
1776
+ **kwargs,
1777
+ ):
1778
+ if decoder_attention_mask is None:
1779
+ decoder_attention_mask = torch.ones_like(input_ids).to(input_ids.device)
1780
+
1781
+ # cut decoder_input_ids if past_key_values is used
1782
+ if past_key_values is not None:
1783
+ past_length = past_key_values[0][0].shape[2]
1784
+
1785
+ # Some generation methods already pass only the last input ID
1786
+ if input_ids.shape[1] > past_length:
1787
+ remove_prefix_length = past_length
1788
+ else:
1789
+ # Default to old behavior: keep only final ID
1790
+ remove_prefix_length = input_ids.shape[1] - 1
1791
+
1792
+ input_ids = input_ids[:, remove_prefix_length:]
1793
+
1794
+ return {
1795
+ "flattened_patches": flattened_patches,
1796
+ "decoder_input_ids": input_ids,
1797
+ "past_key_values": past_key_values,
1798
+ "encoder_outputs": encoder_outputs,
1799
+ "attention_mask": attention_mask,
1800
+ "decoder_attention_mask": decoder_attention_mask,
1801
+ "head_mask": head_mask,
1802
+ "decoder_head_mask": decoder_head_mask,
1803
+ "cross_attn_head_mask": cross_attn_head_mask,
1804
+ "use_cache": use_cache,
1805
+ }
evalkit_internvl/lib/python3.10/site-packages/transformers/models/pix2struct/processing_pix2struct.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Processor class for Pix2Struct.
17
+ """
18
+
19
+ from typing import List, Optional, Union
20
+
21
+ from ...processing_utils import ProcessorMixin
22
+ from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
23
+ from ...utils import TensorType
24
+
25
+
26
+ class Pix2StructProcessor(ProcessorMixin):
27
+ r"""
28
+ Constructs a PIX2STRUCT processor which wraps a BERT tokenizer and PIX2STRUCT image processor into a single
29
+ processor.
30
+
31
+ [`Pix2StructProcessor`] offers all the functionalities of [`Pix2StructImageProcessor`] and [`T5TokenizerFast`]. See
32
+ the docstring of [`~Pix2StructProcessor.__call__`] and [`~Pix2StructProcessor.decode`] for more information.
33
+
34
+ Args:
35
+ image_processor (`Pix2StructImageProcessor`):
36
+ An instance of [`Pix2StructImageProcessor`]. The image processor is a required input.
37
+ tokenizer (Union[`T5TokenizerFast`, `T5Tokenizer`]):
38
+ An instance of ['T5TokenizerFast`] or ['T5Tokenizer`]. The tokenizer is a required input.
39
+ """
40
+
41
+ attributes = ["image_processor", "tokenizer"]
42
+ image_processor_class = "Pix2StructImageProcessor"
43
+ tokenizer_class = ("T5Tokenizer", "T5TokenizerFast")
44
+
45
+ def __init__(self, image_processor, tokenizer):
46
+ tokenizer.return_token_type_ids = False
47
+ super().__init__(image_processor, tokenizer)
48
+
49
+ def __call__(
50
+ self,
51
+ images=None,
52
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
53
+ add_special_tokens: bool = True,
54
+ padding: Union[bool, str, PaddingStrategy] = False,
55
+ truncation: Union[bool, str, TruncationStrategy] = None,
56
+ max_length: Optional[int] = None,
57
+ max_patches: Optional[int] = 2048,
58
+ stride: int = 0,
59
+ pad_to_multiple_of: Optional[int] = None,
60
+ return_attention_mask: Optional[bool] = None,
61
+ return_overflowing_tokens: bool = False,
62
+ return_special_tokens_mask: bool = False,
63
+ return_offsets_mapping: bool = False,
64
+ return_token_type_ids: bool = False,
65
+ return_length: bool = False,
66
+ verbose: bool = True,
67
+ return_tensors: Optional[Union[str, TensorType]] = None,
68
+ **kwargs,
69
+ ) -> BatchEncoding:
70
+ """
71
+ This method uses [`Pix2StructImageProcessor.preprocess`] method to prepare image(s) for the model, and
72
+ [`T5TokenizerFast.__call__`] to prepare text for the model.
73
+
74
+ Please refer to the docstring of the above two methods for more information.
75
+ """
76
+ if images is None and text is None:
77
+ raise ValueError("You have to specify either images or text.")
78
+
79
+ # Get only text
80
+ if images is None and not self.image_processor.is_vqa:
81
+ self.current_processor = self.tokenizer
82
+ text_encoding = self.tokenizer(
83
+ text=text,
84
+ add_special_tokens=add_special_tokens,
85
+ padding=padding,
86
+ truncation=truncation,
87
+ max_length=max_length,
88
+ stride=stride,
89
+ pad_to_multiple_of=pad_to_multiple_of,
90
+ return_attention_mask=return_attention_mask,
91
+ return_overflowing_tokens=return_overflowing_tokens,
92
+ return_special_tokens_mask=return_special_tokens_mask,
93
+ return_offsets_mapping=return_offsets_mapping,
94
+ return_token_type_ids=return_token_type_ids,
95
+ return_length=return_length,
96
+ verbose=verbose,
97
+ return_tensors=return_tensors,
98
+ **kwargs,
99
+ )
100
+ return text_encoding
101
+
102
+ if not self.image_processor.is_vqa:
103
+ # add pixel_values
104
+ encoding_image_processor = self.image_processor(
105
+ images, return_tensors=return_tensors, max_patches=max_patches, **kwargs
106
+ )
107
+ else:
108
+ # add pixel_values and bbox
109
+ encoding_image_processor = self.image_processor(
110
+ images, return_tensors=return_tensors, max_patches=max_patches, header_text=text, **kwargs
111
+ )
112
+
113
+ if text is not None and not self.image_processor.is_vqa:
114
+ text_encoding = self.tokenizer(
115
+ text=text,
116
+ add_special_tokens=add_special_tokens,
117
+ padding=padding,
118
+ truncation=truncation,
119
+ max_length=max_length,
120
+ stride=stride,
121
+ pad_to_multiple_of=pad_to_multiple_of,
122
+ return_attention_mask=return_attention_mask,
123
+ return_overflowing_tokens=return_overflowing_tokens,
124
+ return_special_tokens_mask=return_special_tokens_mask,
125
+ return_offsets_mapping=return_offsets_mapping,
126
+ return_token_type_ids=return_token_type_ids,
127
+ return_length=return_length,
128
+ verbose=verbose,
129
+ return_tensors=return_tensors,
130
+ **kwargs,
131
+ )
132
+
133
+ if "attention_mask" in text_encoding:
134
+ text_encoding["decoder_attention_mask"] = text_encoding.pop("attention_mask")
135
+ if "input_ids" in text_encoding:
136
+ text_encoding["decoder_input_ids"] = text_encoding.pop("input_ids")
137
+ else:
138
+ text_encoding = None
139
+
140
+ if text_encoding is not None:
141
+ encoding_image_processor.update(text_encoding)
142
+
143
+ return encoding_image_processor
144
+
145
+ def batch_decode(self, *args, **kwargs):
146
+ """
147
+ This method forwards all its arguments to Pix2StructTokenizerFast's [`~PreTrainedTokenizer.batch_decode`].
148
+ Please refer to the docstring of this method for more information.
149
+ """
150
+ return self.tokenizer.batch_decode(*args, **kwargs)
151
+
152
+ def decode(self, *args, **kwargs):
153
+ """
154
+ This method forwards all its arguments to Pix2StructTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please
155
+ refer to the docstring of this method for more information.
156
+ """
157
+ return self.tokenizer.decode(*args, **kwargs)
158
+
159
+ @property
160
+ def model_input_names(self):
161
+ tokenizer_input_names = self.tokenizer.model_input_names
162
+ image_processor_input_names = self.image_processor.model_input_names
163
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
evalkit_internvl/lib/python3.10/site-packages/transformers/models/vivit/__init__.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa
2
+ # There's no way to ignore "F401 '...' imported but unused" warnings in this
3
+ # module, but to preserve other warnings. So, don't check this module at all.
4
+
5
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
6
+ #
7
+ # Licensed under the Apache License, Version 2.0 (the "License");
8
+ # you may not use this file except in compliance with the License.
9
+ # You may obtain a copy of the License at
10
+ #
11
+ # http://www.apache.org/licenses/LICENSE-2.0
12
+ #
13
+ # Unless required by applicable law or agreed to in writing, software
14
+ # distributed under the License is distributed on an "AS IS" BASIS,
15
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16
+ # See the License for the specific language governing permissions and
17
+ # limitations under the License.
18
+ from typing import TYPE_CHECKING
19
+
20
+ # rely on isort to merge the imports
21
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
22
+
23
+
24
+ _import_structure = {
25
+ "configuration_vivit": ["VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VivitConfig"],
26
+ }
27
+ try:
28
+ if not is_vision_available():
29
+ raise OptionalDependencyNotAvailable()
30
+ except OptionalDependencyNotAvailable:
31
+ pass
32
+ else:
33
+ _import_structure["image_processing_vivit"] = ["VivitImageProcessor"]
34
+
35
+
36
+ try:
37
+ if not is_torch_available():
38
+ raise OptionalDependencyNotAvailable()
39
+ except OptionalDependencyNotAvailable:
40
+ pass
41
+ else:
42
+ _import_structure["modeling_vivit"] = [
43
+ "VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
44
+ "VivitModel",
45
+ "VivitPreTrainedModel",
46
+ "VivitForVideoClassification",
47
+ ]
48
+
49
+
50
+ if TYPE_CHECKING:
51
+ from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
52
+
53
+ try:
54
+ if not is_vision_available():
55
+ raise OptionalDependencyNotAvailable()
56
+ except OptionalDependencyNotAvailable:
57
+ pass
58
+ else:
59
+ from .image_processing_vivit import VivitImageProcessor
60
+
61
+ try:
62
+ if not is_torch_available():
63
+ raise OptionalDependencyNotAvailable()
64
+ except OptionalDependencyNotAvailable:
65
+ pass
66
+ else:
67
+ from .modeling_vivit import (
68
+ VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
69
+ VivitForVideoClassification,
70
+ VivitModel,
71
+ VivitPreTrainedModel,
72
+ )
73
+
74
+
75
+ else:
76
+ import sys
77
+
78
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
evalkit_internvl/lib/python3.10/site-packages/transformers/models/vivit/__pycache__/configuration_vivit.cpython-310.pyc ADDED
Binary file (4.6 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/transformers/models/vivit/__pycache__/convert_vivit_flax_to_pytorch.cpython-310.pyc ADDED
Binary file (7.39 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/transformers/models/vivit/__pycache__/image_processing_vivit.cpython-310.pyc ADDED
Binary file (15.4 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/transformers/models/vivit/__pycache__/modeling_vivit.cpython-310.pyc ADDED
Binary file (24.5 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/transformers/models/vivit/configuration_vivit.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ ViViT model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+ VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
24
+ "google/vivit-b-16x2-kinetics400": (
25
+ "https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"
26
+ ),
27
+ # See all Vivit models at https://huggingface.co/models?filter=vivit
28
+ }
29
+
30
+
31
+ class VivitConfig(PretrainedConfig):
32
+ r"""
33
+ This is the configuration class to store the configuration of a [`VivitModel`]. It is used to instantiate a ViViT
34
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
35
+ defaults will yield a similar configuration to that of the ViViT
36
+ [google/vivit-b-16x2-kinetics400](https://huggingface.co/google/vivit-b-16x2-kinetics400) architecture.
37
+
38
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
39
+ documentation from [`PretrainedConfig`] for more information.
40
+
41
+ Args:
42
+ image_size (`int`, *optional*, defaults to 224):
43
+ The size (resolution) of each image.
44
+ num_frames (`int`, *optional*, defaults to 32):
45
+ The number of frames in each video.
46
+ tubelet_size (`List[int]`, *optional*, defaults to `[2, 16, 16]`):
47
+ The size (resolution) of each tubelet.
48
+ num_channels (`int`, *optional*, defaults to 3):
49
+ The number of input channels.
50
+ hidden_size (`int`, *optional*, defaults to 768):
51
+ Dimensionality of the encoder layers and the pooler layer.
52
+ num_hidden_layers (`int`, *optional*, defaults to 12):
53
+ Number of hidden layers in the Transformer encoder.
54
+ num_attention_heads (`int`, *optional*, defaults to 12):
55
+ Number of attention heads for each attention layer in the Transformer encoder.
56
+ intermediate_size (`int`, *optional*, defaults to 3072):
57
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
58
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu_fast"`):
59
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
60
+ `"relu"`, `"selu"`, `"gelu_fast"` and `"gelu_new"` are supported.
61
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
62
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
63
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
64
+ The dropout ratio for the attention probabilities.
65
+ initializer_range (`float`, *optional*, defaults to 0.02):
66
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
67
+ layer_norm_eps (`float`, *optional*, defaults to 1e-06):
68
+ The epsilon used by the layer normalization layers.
69
+ qkv_bias (`bool`, *optional*, defaults to `True`):
70
+ Whether to add a bias to the queries, keys and values.
71
+
72
+ Example:
73
+
74
+ ```python
75
+ >>> from transformers import VivitConfig, VivitModel
76
+
77
+ >>> # Initializing a ViViT google/vivit-b-16x2-kinetics400 style configuration
78
+ >>> configuration = VivitConfig()
79
+
80
+ >>> # Initializing a model (with random weights) from the google/vivit-b-16x2-kinetics400 style configuration
81
+ >>> model = VivitModel(configuration)
82
+
83
+ >>> # Accessing the model configuration
84
+ >>> configuration = model.config
85
+ ```"""
86
+
87
+ model_type = "vivit"
88
+
89
+ def __init__(
90
+ self,
91
+ image_size=224,
92
+ num_frames=32,
93
+ tubelet_size=[2, 16, 16],
94
+ num_channels=3,
95
+ hidden_size=768,
96
+ num_hidden_layers=12,
97
+ num_attention_heads=12,
98
+ intermediate_size=3072,
99
+ hidden_act="gelu_fast",
100
+ hidden_dropout_prob=0.0,
101
+ attention_probs_dropout_prob=0.0,
102
+ initializer_range=0.02,
103
+ layer_norm_eps=1e-06,
104
+ qkv_bias=True,
105
+ **kwargs,
106
+ ):
107
+ self.hidden_size = hidden_size
108
+ self.num_hidden_layers = num_hidden_layers
109
+ self.num_attention_heads = num_attention_heads
110
+ self.intermediate_size = intermediate_size
111
+ self.hidden_act = hidden_act
112
+ self.hidden_dropout_prob = hidden_dropout_prob
113
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
114
+ self.initializer_range = initializer_range
115
+ self.layer_norm_eps = layer_norm_eps
116
+
117
+ self.image_size = image_size
118
+ self.num_frames = num_frames
119
+ self.tubelet_size = tubelet_size
120
+ self.num_channels = num_channels
121
+ self.qkv_bias = qkv_bias
122
+
123
+ super().__init__(**kwargs)
evalkit_internvl/lib/python3.10/site-packages/transformers/models/vivit/convert_vivit_flax_to_pytorch.py ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert Flax ViViT checkpoints from the original repository to PyTorch. URL:
16
+ https://github.com/google-research/scenic/tree/main/scenic/projects/vivit
17
+ """
18
+ import argparse
19
+ import json
20
+ import os.path
21
+ from collections import OrderedDict
22
+
23
+ import numpy as np
24
+ import requests
25
+ import torch
26
+ from flax.training.checkpoints import restore_checkpoint
27
+ from huggingface_hub import hf_hub_download
28
+
29
+ from transformers import VivitConfig, VivitForVideoClassification, VivitImageProcessor
30
+ from transformers.image_utils import PILImageResampling
31
+
32
+
33
+ def download_checkpoint(path):
34
+ url = "https://storage.googleapis.com/scenic-bucket/vivit/kinetics_400/vivit_base_16x2_unfactorized/checkpoint"
35
+
36
+ with open(path, "wb") as f:
37
+ with requests.get(url, stream=True) as req:
38
+ for chunk in req.iter_content(chunk_size=2048):
39
+ f.write(chunk)
40
+
41
+
42
+ def get_vivit_config() -> VivitConfig:
43
+ config = VivitConfig()
44
+
45
+ config.num_labels = 400
46
+ repo_id = "huggingface/label-files"
47
+ filename = "kinetics400-id2label.json"
48
+
49
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
50
+ id2label = {int(k): v for k, v in id2label.items()}
51
+ config.id2label = id2label
52
+ config.label2id = {v: k for k, v in id2label.items()}
53
+ return config
54
+
55
+
56
+ # We will verify our results on a video of eating spaghetti
57
+ # Frame indices used: [ 47, 51, 55, 59, 63, 67, 71, 75, 80, 84, 88, 92, 96, 100, 104, 108, 113, 117,
58
+ # 121, 125, 129, 133, 137, 141, 146, 150, 154, 158, 162, 166, 170, 174]
59
+ def prepare_video():
60
+ file = hf_hub_download(
61
+ repo_id="hf-internal-testing/spaghetti-video", filename="eating_spaghetti_32_frames.npy", repo_type="dataset"
62
+ )
63
+ video = np.load(file)
64
+ return list(video)
65
+
66
+
67
+ def transform_attention(current: np.ndarray):
68
+ if np.ndim(current) == 2:
69
+ return transform_attention_bias(current)
70
+
71
+ elif np.ndim(current) == 3:
72
+ return transform_attention_kernel(current)
73
+
74
+ else:
75
+ raise Exception(f"Invalid number of dimesions: {np.ndim(current)}")
76
+
77
+
78
+ def transform_attention_bias(current: np.ndarray):
79
+ return current.flatten()
80
+
81
+
82
+ def transform_attention_kernel(current: np.ndarray):
83
+ return np.reshape(current, (current.shape[0], current.shape[1] * current.shape[2])).T
84
+
85
+
86
+ def transform_attention_output_weight(current: np.ndarray):
87
+ return np.reshape(current, (current.shape[0] * current.shape[1], current.shape[2])).T
88
+
89
+
90
+ def transform_state_encoder_block(state_dict, i):
91
+ state = state_dict["optimizer"]["target"]["Transformer"][f"encoderblock_{i}"]
92
+
93
+ prefix = f"encoder.layer.{i}."
94
+ new_state = {
95
+ prefix + "intermediate.dense.bias": state["MlpBlock_0"]["Dense_0"]["bias"],
96
+ prefix + "intermediate.dense.weight": np.transpose(state["MlpBlock_0"]["Dense_0"]["kernel"]),
97
+ prefix + "output.dense.bias": state["MlpBlock_0"]["Dense_1"]["bias"],
98
+ prefix + "output.dense.weight": np.transpose(state["MlpBlock_0"]["Dense_1"]["kernel"]),
99
+ prefix + "layernorm_before.bias": state["LayerNorm_0"]["bias"],
100
+ prefix + "layernorm_before.weight": state["LayerNorm_0"]["scale"],
101
+ prefix + "layernorm_after.bias": state["LayerNorm_1"]["bias"],
102
+ prefix + "layernorm_after.weight": state["LayerNorm_1"]["scale"],
103
+ prefix + "attention.attention.query.bias": transform_attention(
104
+ state["MultiHeadDotProductAttention_0"]["query"]["bias"]
105
+ ),
106
+ prefix + "attention.attention.query.weight": transform_attention(
107
+ state["MultiHeadDotProductAttention_0"]["query"]["kernel"]
108
+ ),
109
+ prefix + "attention.attention.key.bias": transform_attention(
110
+ state["MultiHeadDotProductAttention_0"]["key"]["bias"]
111
+ ),
112
+ prefix + "attention.attention.key.weight": transform_attention(
113
+ state["MultiHeadDotProductAttention_0"]["key"]["kernel"]
114
+ ),
115
+ prefix + "attention.attention.value.bias": transform_attention(
116
+ state["MultiHeadDotProductAttention_0"]["value"]["bias"]
117
+ ),
118
+ prefix + "attention.attention.value.weight": transform_attention(
119
+ state["MultiHeadDotProductAttention_0"]["value"]["kernel"]
120
+ ),
121
+ prefix + "attention.output.dense.bias": state["MultiHeadDotProductAttention_0"]["out"]["bias"],
122
+ prefix + "attention.output.dense.weight": transform_attention_output_weight(
123
+ state["MultiHeadDotProductAttention_0"]["out"]["kernel"]
124
+ ),
125
+ }
126
+
127
+ return new_state
128
+
129
+
130
+ def get_n_layers(state_dict):
131
+ return sum([1 if "encoderblock_" in k else 0 for k in state_dict["optimizer"]["target"]["Transformer"].keys()])
132
+
133
+
134
+ def transform_state(state_dict, classification_head=False):
135
+ transformer_layers = get_n_layers(state_dict)
136
+
137
+ new_state = OrderedDict()
138
+
139
+ new_state["layernorm.bias"] = state_dict["optimizer"]["target"]["Transformer"]["encoder_norm"]["bias"]
140
+ new_state["layernorm.weight"] = state_dict["optimizer"]["target"]["Transformer"]["encoder_norm"]["scale"]
141
+
142
+ new_state["embeddings.patch_embeddings.projection.weight"] = np.transpose(
143
+ state_dict["optimizer"]["target"]["embedding"]["kernel"], (4, 3, 0, 1, 2)
144
+ )
145
+ new_state["embeddings.patch_embeddings.projection.bias"] = state_dict["optimizer"]["target"]["embedding"]["bias"]
146
+
147
+ new_state["embeddings.cls_token"] = state_dict["optimizer"]["target"]["cls"]
148
+ new_state["embeddings.position_embeddings"] = state_dict["optimizer"]["target"]["Transformer"]["posembed_input"][
149
+ "pos_embedding"
150
+ ]
151
+
152
+ for i in range(transformer_layers):
153
+ new_state.update(transform_state_encoder_block(state_dict, i))
154
+
155
+ if classification_head:
156
+ new_state = {"vivit." + k: v for k, v in new_state.items()}
157
+ new_state["classifier.weight"] = np.transpose(state_dict["optimizer"]["target"]["output_projection"]["kernel"])
158
+ new_state["classifier.bias"] = np.transpose(state_dict["optimizer"]["target"]["output_projection"]["bias"])
159
+
160
+ return {k: torch.tensor(v) for k, v in new_state.items()}
161
+
162
+
163
+ # checks that image processor settings are the same as in the original implementation
164
+ # original: https://github.com/google-research/scenic/blob/main/scenic/projects/vivit/data/video_tfrecord_dataset.py
165
+ # dataset specific config:
166
+ # https://github.com/google-research/scenic/blob/main/scenic/projects/vivit/configs/kinetics400/vivit_base_k400.py
167
+ def get_processor() -> VivitImageProcessor:
168
+ extractor = VivitImageProcessor()
169
+
170
+ assert extractor.do_resize is True
171
+ assert extractor.size == {"shortest_edge": 256}
172
+ assert extractor.do_center_crop is True
173
+ assert extractor.crop_size == {"width": 224, "height": 224}
174
+ assert extractor.resample == PILImageResampling.BILINEAR
175
+
176
+ # here: https://github.com/deepmind/dmvr/blob/master/dmvr/modalities.py
177
+ # one can seen that add_image has default values for normalization_mean and normalization_std set to 0 and 1
178
+ # which effectively means no normalization (and ViViT does not overwrite those when calling this func)
179
+ assert extractor.do_normalize is False
180
+ assert extractor.do_rescale is True
181
+ assert extractor.rescale_factor == 1 / 255
182
+
183
+ # zero-centering = True in original implementation
184
+ assert extractor.do_zero_centering is True
185
+
186
+ return extractor
187
+
188
+
189
+ def convert(output_path: str):
190
+ flax_model_path = "checkpoint"
191
+
192
+ if not os.path.exists(flax_model_path):
193
+ download_checkpoint(flax_model_path)
194
+
195
+ state_dict = restore_checkpoint(flax_model_path, None)
196
+ new_state = transform_state(state_dict, classification_head=True)
197
+
198
+ config = get_vivit_config()
199
+
200
+ assert config.image_size == 224
201
+ assert config.num_frames == 32
202
+
203
+ model = VivitForVideoClassification(config)
204
+ model.load_state_dict(new_state)
205
+ model.eval()
206
+
207
+ extractor = get_processor()
208
+
209
+ video = prepare_video()
210
+ inputs = extractor(video, return_tensors="pt")
211
+
212
+ outputs = model(**inputs)
213
+
214
+ expected_shape = torch.Size([1, 400])
215
+ expected_slice = torch.tensor([-1.0543, 2.0764, -0.2104, 0.4439, -0.9658])
216
+
217
+ assert outputs.logits.shape == expected_shape
218
+ assert torch.allclose(outputs.logits[0, :5], expected_slice, atol=1e-4), outputs.logits[0, :5]
219
+
220
+ model.save_pretrained(output_path)
221
+ extractor.save_pretrained(output_path)
222
+
223
+
224
+ if __name__ == "__main__":
225
+ parser = argparse.ArgumentParser()
226
+
227
+ parser.add_argument("--output_model_name", "-o", type=str, help="Output path for the converted HuggingFace model")
228
+
229
+ args = parser.parse_args()
230
+ convert(args.output_model_name)
evalkit_internvl/lib/python3.10/site-packages/transformers/models/vivit/image_processing_vivit.py ADDED
@@ -0,0 +1,400 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for Vivit."""
16
+ from typing import Dict, List, Optional, Union
17
+
18
+ import numpy as np
19
+
20
+ from transformers.utils import is_vision_available
21
+ from transformers.utils.generic import TensorType
22
+
23
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
24
+ from ...image_transforms import (
25
+ get_resize_output_image_size,
26
+ rescale,
27
+ resize,
28
+ to_channel_dimension_format,
29
+ )
30
+ from ...image_utils import (
31
+ IMAGENET_STANDARD_MEAN,
32
+ IMAGENET_STANDARD_STD,
33
+ ChannelDimension,
34
+ ImageInput,
35
+ PILImageResampling,
36
+ infer_channel_dimension_format,
37
+ is_scaled_image,
38
+ is_valid_image,
39
+ to_numpy_array,
40
+ valid_images,
41
+ )
42
+ from ...utils import logging
43
+
44
+
45
+ if is_vision_available():
46
+ import PIL
47
+
48
+ logger = logging.get_logger(__name__)
49
+
50
+
51
+ def make_batched(videos) -> List[List[ImageInput]]:
52
+ if isinstance(videos, (list, tuple)) and isinstance(videos[0], (list, tuple)) and is_valid_image(videos[0][0]):
53
+ return videos
54
+
55
+ elif isinstance(videos, (list, tuple)) and is_valid_image(videos[0]):
56
+ return [videos]
57
+
58
+ elif is_valid_image(videos):
59
+ return [[videos]]
60
+
61
+ raise ValueError(f"Could not make batched video from {videos}")
62
+
63
+
64
+ class VivitImageProcessor(BaseImageProcessor):
65
+ r"""
66
+ Constructs a Vivit image processor.
67
+
68
+ Args:
69
+ do_resize (`bool`, *optional*, defaults to `True`):
70
+ Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
71
+ `do_resize` parameter in the `preprocess` method.
72
+ size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 256}`):
73
+ Size of the output image after resizing. The shortest edge of the image will be resized to
74
+ `size["shortest_edge"]` while maintaining the aspect ratio of the original image. Can be overriden by
75
+ `size` in the `preprocess` method.
76
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
77
+ Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
78
+ `preprocess` method.
79
+ do_center_crop (`bool`, *optional*, defaults to `True`):
80
+ Whether to center crop the image to the specified `crop_size`. Can be overridden by the `do_center_crop`
81
+ parameter in the `preprocess` method.
82
+ crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`):
83
+ Size of the image after applying the center crop. Can be overridden by the `crop_size` parameter in the
84
+ `preprocess` method.
85
+ do_rescale (`bool`, *optional*, defaults to `True`):
86
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
87
+ parameter in the `preprocess` method.
88
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/127.5`):
89
+ Defines the scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter
90
+ in the `preprocess` method.
91
+ offset (`bool`, *optional*, defaults to `True`):
92
+ Whether to scale the image in both negative and positive directions. Can be overriden by the `offset` in
93
+ the `preprocess` method.
94
+ do_normalize (`bool`, *optional*, defaults to `True`):
95
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
96
+ method.
97
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
98
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
99
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
100
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
101
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
102
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
103
+ """
104
+
105
+ model_input_names = ["pixel_values"]
106
+
107
+ def __init__(
108
+ self,
109
+ do_resize: bool = True,
110
+ size: Dict[str, int] = None,
111
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
112
+ do_center_crop: bool = True,
113
+ crop_size: Dict[str, int] = None,
114
+ do_rescale: bool = True,
115
+ rescale_factor: Union[int, float] = 1 / 127.5,
116
+ offset: bool = True,
117
+ do_normalize: bool = True,
118
+ image_mean: Optional[Union[float, List[float]]] = None,
119
+ image_std: Optional[Union[float, List[float]]] = None,
120
+ **kwargs,
121
+ ) -> None:
122
+ super().__init__(**kwargs)
123
+ size = size if size is not None else {"shortest_edge": 256}
124
+ size = get_size_dict(size, default_to_square=False)
125
+ crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
126
+ crop_size = get_size_dict(crop_size, param_name="crop_size")
127
+
128
+ self.do_resize = do_resize
129
+ self.size = size
130
+ self.do_center_crop = do_center_crop
131
+ self.crop_size = crop_size
132
+ self.resample = resample
133
+ self.do_rescale = do_rescale
134
+ self.rescale_factor = rescale_factor
135
+ self.offset = offset
136
+ self.do_normalize = do_normalize
137
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
138
+ self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
139
+
140
+ def resize(
141
+ self,
142
+ image: np.ndarray,
143
+ size: Dict[str, int],
144
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
145
+ data_format: Optional[Union[str, ChannelDimension]] = None,
146
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
147
+ **kwargs,
148
+ ) -> np.ndarray:
149
+ """
150
+ Resize an image.
151
+
152
+ Args:
153
+ image (`np.ndarray`):
154
+ Image to resize.
155
+ size (`Dict[str, int]`):
156
+ Size of the output image. If `size` is of the form `{"height": h, "width": w}`, the output image will
157
+ have the size `(h, w)`. If `size` is of the form `{"shortest_edge": s}`, the output image will have its
158
+ shortest edge of length `s` while keeping the aspect ratio of the original image.
159
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
160
+ Resampling filter to use when resiizing the image.
161
+ data_format (`str` or `ChannelDimension`, *optional*):
162
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
163
+ input_data_format (`str` or `ChannelDimension`, *optional*):
164
+ The channel dimension format of the input image. If not provided, it will be inferred.
165
+ """
166
+ size = get_size_dict(size, default_to_square=False)
167
+ if "shortest_edge" in size:
168
+ output_size = get_resize_output_image_size(
169
+ image, size["shortest_edge"], default_to_square=False, input_data_format=input_data_format
170
+ )
171
+ elif "height" in size and "width" in size:
172
+ output_size = (size["height"], size["width"])
173
+ else:
174
+ raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}")
175
+ return resize(
176
+ image,
177
+ size=output_size,
178
+ resample=resample,
179
+ data_format=data_format,
180
+ input_data_format=input_data_format,
181
+ **kwargs,
182
+ )
183
+
184
+ # Copied from transformers.models.efficientnet.image_processing_efficientnet.EfficientNetImageProcessor.rescale
185
+ def rescale(
186
+ self,
187
+ image: np.ndarray,
188
+ scale: Union[int, float],
189
+ offset: bool = True,
190
+ data_format: Optional[Union[str, ChannelDimension]] = None,
191
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
192
+ **kwargs,
193
+ ):
194
+ """
195
+ Rescale an image by a scale factor.
196
+
197
+ If `offset` is `True`, the image has its values rescaled by `scale` and then offset by 1. If `scale` is
198
+ 1/127.5, the image is rescaled between [-1, 1].
199
+ image = image * scale - 1
200
+
201
+ If `offset` is `False`, and `scale` is 1/255, the image is rescaled between [0, 1].
202
+ image = image * scale
203
+
204
+ Args:
205
+ image (`np.ndarray`):
206
+ Image to rescale.
207
+ scale (`int` or `float`):
208
+ Scale to apply to the image.
209
+ offset (`bool`, *optional*):
210
+ Whether to scale the image in both negative and positive directions.
211
+ data_format (`str` or `ChannelDimension`, *optional*):
212
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
213
+ input_data_format (`ChannelDimension` or `str`, *optional*):
214
+ The channel dimension format of the input image. If not provided, it will be inferred.
215
+ """
216
+ rescaled_image = rescale(
217
+ image, scale=scale, data_format=data_format, input_data_format=input_data_format, **kwargs
218
+ )
219
+
220
+ if offset:
221
+ rescaled_image = rescaled_image - 1
222
+
223
+ return rescaled_image
224
+
225
+ def _preprocess_image(
226
+ self,
227
+ image: ImageInput,
228
+ do_resize: bool = None,
229
+ size: Dict[str, int] = None,
230
+ resample: PILImageResampling = None,
231
+ do_center_crop: bool = None,
232
+ crop_size: Dict[str, int] = None,
233
+ do_rescale: bool = None,
234
+ rescale_factor: float = None,
235
+ offset: bool = None,
236
+ do_normalize: bool = None,
237
+ image_mean: Optional[Union[float, List[float]]] = None,
238
+ image_std: Optional[Union[float, List[float]]] = None,
239
+ data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
240
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
241
+ ) -> np.ndarray:
242
+ """Preprocesses a single image."""
243
+ if do_resize and size is None or resample is None:
244
+ raise ValueError("Size and resample must be specified if do_resize is True.")
245
+
246
+ if do_center_crop and crop_size is None:
247
+ raise ValueError("Crop size must be specified if do_center_crop is True.")
248
+
249
+ if do_rescale and rescale_factor is None:
250
+ raise ValueError("Rescale factor must be specified if do_rescale is True.")
251
+
252
+ if do_normalize and (image_mean is None or image_std is None):
253
+ raise ValueError("Image mean and std must be specified if do_normalize is True.")
254
+
255
+ if offset and not do_rescale:
256
+ raise ValueError("For offset, do_rescale must also be set to True.")
257
+
258
+ # All transformations expect numpy arrays.
259
+ image = to_numpy_array(image)
260
+
261
+ if is_scaled_image(image) and do_rescale:
262
+ logger.warning_once(
263
+ "It looks like you are trying to rescale already rescaled images. If the input"
264
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
265
+ )
266
+
267
+ if input_data_format is None:
268
+ input_data_format = infer_channel_dimension_format(image)
269
+
270
+ if do_resize:
271
+ image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
272
+
273
+ if do_center_crop:
274
+ image = self.center_crop(image, size=crop_size, input_data_format=input_data_format)
275
+
276
+ if do_rescale:
277
+ image = self.rescale(image=image, scale=rescale_factor, offset=offset, input_data_format=input_data_format)
278
+
279
+ if do_normalize:
280
+ image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
281
+
282
+ image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
283
+ return image
284
+
285
+ def preprocess(
286
+ self,
287
+ videos: ImageInput,
288
+ do_resize: bool = None,
289
+ size: Dict[str, int] = None,
290
+ resample: PILImageResampling = None,
291
+ do_center_crop: bool = None,
292
+ crop_size: Dict[str, int] = None,
293
+ do_rescale: bool = None,
294
+ rescale_factor: float = None,
295
+ offset: bool = None,
296
+ do_normalize: bool = None,
297
+ image_mean: Optional[Union[float, List[float]]] = None,
298
+ image_std: Optional[Union[float, List[float]]] = None,
299
+ return_tensors: Optional[Union[str, TensorType]] = None,
300
+ data_format: ChannelDimension = ChannelDimension.FIRST,
301
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
302
+ **kwargs,
303
+ ) -> PIL.Image.Image:
304
+ """
305
+ Preprocess an image or batch of images.
306
+
307
+ Args:
308
+ videos (`ImageInput`):
309
+ Video frames to preprocess. Expects a single or batch of video frames with pixel values ranging from 0
310
+ to 255. If passing in frames with pixel values between 0 and 1, set `do_rescale=False`.
311
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
312
+ Whether to resize the image.
313
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
314
+ Size of the image after applying resize.
315
+ resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
316
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
317
+ has an effect if `do_resize` is set to `True`.
318
+ do_center_crop (`bool`, *optional*, defaults to `self.do_centre_crop`):
319
+ Whether to centre crop the image.
320
+ crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
321
+ Size of the image after applying the centre crop.
322
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
323
+ Whether to rescale the image values between `[-1 - 1]` if `offset` is `True`, `[0, 1]` otherwise.
324
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
325
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
326
+ offset (`bool`, *optional*, defaults to `self.offset`):
327
+ Whether to scale the image in both negative and positive directions.
328
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
329
+ Whether to normalize the image.
330
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
331
+ Image mean.
332
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
333
+ Image standard deviation.
334
+ return_tensors (`str` or `TensorType`, *optional*):
335
+ The type of tensors to return. Can be one of:
336
+ - Unset: Return a list of `np.ndarray`.
337
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
338
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
339
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
340
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
341
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
342
+ The channel dimension format for the output image. Can be one of:
343
+ - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
344
+ - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
345
+ - Unset: Use the inferred channel dimension format of the input image.
346
+ input_data_format (`ChannelDimension` or `str`, *optional*):
347
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
348
+ from the input image. Can be one of:
349
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
350
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
351
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
352
+ """
353
+ do_resize = do_resize if do_resize is not None else self.do_resize
354
+ resample = resample if resample is not None else self.resample
355
+ do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
356
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
357
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
358
+ offset = offset if offset is not None else self.offset
359
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
360
+ image_mean = image_mean if image_mean is not None else self.image_mean
361
+ image_std = image_std if image_std is not None else self.image_std
362
+
363
+ size = size if size is not None else self.size
364
+ size = get_size_dict(size, default_to_square=False)
365
+ crop_size = crop_size if crop_size is not None else self.crop_size
366
+ crop_size = get_size_dict(crop_size, param_name="crop_size")
367
+
368
+ if not valid_images(videos):
369
+ raise ValueError(
370
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
371
+ "torch.Tensor, tf.Tensor or jax.ndarray."
372
+ )
373
+
374
+ videos = make_batched(videos)
375
+
376
+ videos = [
377
+ [
378
+ self._preprocess_image(
379
+ image=img,
380
+ do_resize=do_resize,
381
+ size=size,
382
+ resample=resample,
383
+ do_center_crop=do_center_crop,
384
+ crop_size=crop_size,
385
+ do_rescale=do_rescale,
386
+ rescale_factor=rescale_factor,
387
+ offset=offset,
388
+ do_normalize=do_normalize,
389
+ image_mean=image_mean,
390
+ image_std=image_std,
391
+ data_format=data_format,
392
+ input_data_format=input_data_format,
393
+ )
394
+ for img in video
395
+ ]
396
+ for video in videos
397
+ ]
398
+
399
+ data = {"pixel_values": videos}
400
+ return BatchFeature(data=data, tensor_type=return_tensors)
evalkit_internvl/lib/python3.10/site-packages/transformers/models/vivit/modeling_vivit.py ADDED
@@ -0,0 +1,745 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Google AI and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch ViViT model."""
16
+
17
+
18
+ import math
19
+ from typing import Optional, Set, Tuple, Union
20
+
21
+ import torch
22
+ import torch.utils.checkpoint
23
+ from torch import nn
24
+ from torch.nn import CrossEntropyLoss, MSELoss
25
+
26
+ from ...activations import ACT2FN
27
+ from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput
28
+ from ...modeling_utils import PreTrainedModel
29
+ from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
30
+ from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
31
+ from .configuration_vivit import VivitConfig
32
+
33
+
34
+ logger = logging.get_logger(__name__)
35
+
36
+ _CHECKPOINT_FOR_DOC = "google/vivit-b-16x2-kinetics400"
37
+ _CONFIG_FOR_DOC = "VivitConfig"
38
+
39
+ VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST = [
40
+ "google/vivit-b-16x2-kinetics400",
41
+ # See all Vivit models at https://huggingface.co/models?filter=vivit
42
+ ]
43
+
44
+
45
+ class VivitTubeletEmbeddings(nn.Module):
46
+ """
47
+ Construct Vivit Tubelet embeddings.
48
+
49
+ This module turns a batch of videos of shape (batch_size, num_frames, num_channels, height, width) into a tensor of
50
+ shape (batch_size, seq_len, hidden_size) to be consumed by a Transformer encoder.
51
+
52
+ The seq_len (the number of patches) equals (number of frames // tubelet_size[0]) * (height // tubelet_size[1]) *
53
+ (width // tubelet_size[2]).
54
+ """
55
+
56
+ def __init__(self, config):
57
+ super().__init__()
58
+ self.num_frames = config.num_frames
59
+ self.image_size = config.image_size
60
+ self.patch_size = config.tubelet_size
61
+ self.num_patches = (
62
+ (self.image_size // self.patch_size[2])
63
+ * (self.image_size // self.patch_size[1])
64
+ * (self.num_frames // self.patch_size[0])
65
+ )
66
+ self.embed_dim = config.hidden_size
67
+
68
+ self.projection = nn.Conv3d(
69
+ config.num_channels, config.hidden_size, kernel_size=config.tubelet_size, stride=config.tubelet_size
70
+ )
71
+
72
+ def forward(self, pixel_values):
73
+ batch_size, num_frames, num_channels, height, width = pixel_values.shape
74
+ if height != self.image_size or width != self.image_size:
75
+ raise ValueError(
76
+ f"Input image size ({height}*{width}) doesn't match model ({self.image_size}*{self.image_size})."
77
+ )
78
+
79
+ # permute to (batch_size, num_channels, num_frames, height, width)
80
+ pixel_values = pixel_values.permute(0, 2, 1, 3, 4)
81
+
82
+ x = self.projection(pixel_values)
83
+ # out_batch_size, out_num_channels, out_num_frames, out_height, out_width = x.shape
84
+ x = self.projection(pixel_values).flatten(2).transpose(1, 2)
85
+ return x
86
+
87
+
88
+ class VivitEmbeddings(nn.Module):
89
+ """
90
+ Vivit Embeddings.
91
+
92
+ Creates embeddings from a video using VivitTubeletEmbeddings, adds CLS token and positional embeddings.
93
+ """
94
+
95
+ def __init__(self, config):
96
+ super().__init__()
97
+
98
+ self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
99
+ self.patch_embeddings = VivitTubeletEmbeddings(config)
100
+
101
+ self.position_embeddings = nn.Parameter(
102
+ torch.zeros(1, self.patch_embeddings.num_patches + 1, config.hidden_size)
103
+ )
104
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
105
+ self.config = config
106
+
107
+ def forward(self, pixel_values):
108
+ batch_size = pixel_values.shape[0]
109
+ embeddings = self.patch_embeddings(pixel_values)
110
+
111
+ cls_tokens = self.cls_token.tile([batch_size, 1, 1])
112
+
113
+ embeddings = torch.cat((cls_tokens, embeddings), dim=1)
114
+
115
+ # add positional encoding to each token
116
+ embeddings = embeddings + self.position_embeddings
117
+
118
+ embeddings = self.dropout(embeddings)
119
+
120
+ return embeddings
121
+
122
+
123
+ # Copied from transformers.models.vit.modeling_vit.ViTSelfAttention with ViT->Vivit
124
+ class VivitSelfAttention(nn.Module):
125
+ def __init__(self, config: VivitConfig) -> None:
126
+ super().__init__()
127
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
128
+ raise ValueError(
129
+ f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
130
+ f"heads {config.num_attention_heads}."
131
+ )
132
+
133
+ self.num_attention_heads = config.num_attention_heads
134
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
135
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
136
+
137
+ self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
138
+ self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
139
+ self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
140
+
141
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
142
+
143
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
144
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
145
+ x = x.view(new_x_shape)
146
+ return x.permute(0, 2, 1, 3)
147
+
148
+ def forward(
149
+ self, hidden_states, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False
150
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
151
+ mixed_query_layer = self.query(hidden_states)
152
+
153
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
154
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
155
+ query_layer = self.transpose_for_scores(mixed_query_layer)
156
+
157
+ # Take the dot product between "query" and "key" to get the raw attention scores.
158
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
159
+
160
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
161
+
162
+ # Normalize the attention scores to probabilities.
163
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
164
+
165
+ # This is actually dropping out entire tokens to attend to, which might
166
+ # seem a bit unusual, but is taken from the original Transformer paper.
167
+ attention_probs = self.dropout(attention_probs)
168
+
169
+ # Mask heads if we want to
170
+ if head_mask is not None:
171
+ attention_probs = attention_probs * head_mask
172
+
173
+ context_layer = torch.matmul(attention_probs, value_layer)
174
+
175
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
176
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
177
+ context_layer = context_layer.view(new_context_layer_shape)
178
+
179
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
180
+
181
+ return outputs
182
+
183
+
184
+ # Copied from transformers.models.vit.modeling_vit.ViTSelfOutput with ViT->Vivit
185
+ class VivitSelfOutput(nn.Module):
186
+ """
187
+ The residual connection is defined in VivitLayer instead of here (as is the case with other models), due to the
188
+ layernorm applied before each block.
189
+ """
190
+
191
+ def __init__(self, config: VivitConfig) -> None:
192
+ super().__init__()
193
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
194
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
195
+
196
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
197
+ hidden_states = self.dense(hidden_states)
198
+ hidden_states = self.dropout(hidden_states)
199
+
200
+ return hidden_states
201
+
202
+
203
+ # Copied from transformers.models.vit.modeling_vit.ViTAttention with ViT->Vivit
204
+ class VivitAttention(nn.Module):
205
+ def __init__(self, config: VivitConfig) -> None:
206
+ super().__init__()
207
+ self.attention = VivitSelfAttention(config)
208
+ self.output = VivitSelfOutput(config)
209
+ self.pruned_heads = set()
210
+
211
+ def prune_heads(self, heads: Set[int]) -> None:
212
+ if len(heads) == 0:
213
+ return
214
+ heads, index = find_pruneable_heads_and_indices(
215
+ heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
216
+ )
217
+
218
+ # Prune linear layers
219
+ self.attention.query = prune_linear_layer(self.attention.query, index)
220
+ self.attention.key = prune_linear_layer(self.attention.key, index)
221
+ self.attention.value = prune_linear_layer(self.attention.value, index)
222
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
223
+
224
+ # Update hyper params and store pruned heads
225
+ self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
226
+ self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
227
+ self.pruned_heads = self.pruned_heads.union(heads)
228
+
229
+ def forward(
230
+ self,
231
+ hidden_states: torch.Tensor,
232
+ head_mask: Optional[torch.Tensor] = None,
233
+ output_attentions: bool = False,
234
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
235
+ self_outputs = self.attention(hidden_states, head_mask, output_attentions)
236
+
237
+ attention_output = self.output(self_outputs[0], hidden_states)
238
+
239
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
240
+ return outputs
241
+
242
+
243
+ class VivitIntermediate(nn.Module):
244
+ def __init__(self, config):
245
+ super().__init__()
246
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
247
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
248
+ if isinstance(config.hidden_act, str):
249
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
250
+ else:
251
+ self.intermediate_act_fn = config.hidden_act
252
+
253
+ def forward(self, hidden_states):
254
+ hidden_states = self.dense(hidden_states)
255
+ hidden_states = self.intermediate_act_fn(hidden_states)
256
+ hidden_states = self.dropout(hidden_states)
257
+
258
+ return hidden_states
259
+
260
+
261
+ class VivitOutput(nn.Module):
262
+ def __init__(self, config):
263
+ super().__init__()
264
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
265
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
266
+
267
+ def forward(self, hidden_states, input_tensor):
268
+ hidden_states = self.dense(hidden_states)
269
+
270
+ hidden_states = self.dropout(hidden_states)
271
+
272
+ hidden_states = hidden_states + input_tensor
273
+
274
+ return hidden_states
275
+
276
+
277
+ class VivitLayer(nn.Module):
278
+ """This corresponds to the EncoderBlock class in the scenic/vivit implementation."""
279
+
280
+ def __init__(self, config):
281
+ super().__init__()
282
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
283
+ self.seq_len_dim = 1
284
+ self.attention = VivitAttention(config)
285
+ self.intermediate = VivitIntermediate(config)
286
+ self.output = VivitOutput(config)
287
+ self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
288
+ self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
289
+
290
+ def forward(self, hidden_states, head_mask=None, output_attentions=False):
291
+ self_attention_outputs = self.attention(
292
+ # in Vivit, layernorm is applied before self-attention
293
+ self.layernorm_before(hidden_states),
294
+ head_mask,
295
+ output_attentions=output_attentions,
296
+ )
297
+ attention_output = self_attention_outputs[0]
298
+ # add self attentions if we output attention weights
299
+ outputs = self_attention_outputs[1:]
300
+
301
+ # first residual connection
302
+ hidden_states = attention_output + hidden_states
303
+
304
+ # in Vivit, layernorm is also applied after self-attention
305
+ layer_output = self.layernorm_after(hidden_states)
306
+ layer_output = self.intermediate(layer_output)
307
+
308
+ # second residual connection is done here
309
+ layer_output = self.output(layer_output, hidden_states)
310
+
311
+ outputs = (layer_output,) + outputs
312
+
313
+ return outputs
314
+
315
+
316
+ class VivitEncoder(nn.Module):
317
+ def __init__(self, config):
318
+ super().__init__()
319
+ self.config = config
320
+ self.layer = nn.ModuleList([VivitLayer(config) for _ in range(config.num_hidden_layers)])
321
+ self.gradient_checkpointing = False
322
+
323
+ def forward(
324
+ self,
325
+ hidden_states,
326
+ head_mask=None,
327
+ output_attentions=False,
328
+ output_hidden_states=False,
329
+ return_dict=True,
330
+ ):
331
+ all_hidden_states = () if output_hidden_states else None
332
+ all_self_attentions = () if output_attentions else None
333
+
334
+ for i, layer_module in enumerate(self.layer):
335
+ if output_hidden_states:
336
+ all_hidden_states = all_hidden_states + (hidden_states,)
337
+
338
+ layer_head_mask = head_mask[i] if head_mask is not None else None
339
+
340
+ if self.gradient_checkpointing and self.training:
341
+ layer_outputs = self._gradient_checkpointing_func(
342
+ layer_module.__call__,
343
+ hidden_states,
344
+ layer_head_mask,
345
+ output_attentions,
346
+ )
347
+ else:
348
+ layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions)
349
+
350
+ hidden_states = layer_outputs[0]
351
+
352
+ if output_attentions:
353
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
354
+
355
+ if output_hidden_states:
356
+ all_hidden_states = all_hidden_states + (hidden_states,)
357
+
358
+ if not return_dict:
359
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
360
+ return BaseModelOutput(
361
+ last_hidden_state=hidden_states,
362
+ hidden_states=all_hidden_states,
363
+ attentions=all_self_attentions,
364
+ )
365
+
366
+
367
+ class VivitPooler(nn.Module):
368
+ def __init__(self, config):
369
+ super().__init__()
370
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
371
+ self.activation = nn.Tanh()
372
+
373
+ def forward(self, hidden_states):
374
+ # We "pool" the model by simply taking the hidden state corresponding
375
+ # to the first token.
376
+ first_token_tensor = hidden_states[:, 0]
377
+ pooled_output = self.dense(first_token_tensor)
378
+ pooled_output = self.activation(pooled_output)
379
+ return pooled_output
380
+
381
+
382
+ class VivitPreTrainedModel(PreTrainedModel):
383
+ """
384
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
385
+ models.
386
+ """
387
+
388
+ config_class = VivitConfig
389
+ base_model_prefix = "vivit"
390
+ main_input_name = "pixel_values"
391
+ supports_gradient_checkpointing = True
392
+
393
+ def _init_weights(self, module):
394
+ """Initialize the weights"""
395
+ if isinstance(module, (nn.Linear, nn.Conv3d)):
396
+ # Slightly different from the TF version which uses truncated_normal for initialization
397
+ # cf https://github.com/pytorch/pytorch/pull/5617
398
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
399
+ if module.bias is not None:
400
+ module.bias.data.zero_()
401
+ elif isinstance(module, nn.Embedding):
402
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
403
+ if module.padding_idx is not None:
404
+ module.weight.data[module.padding_idx].zero_()
405
+ elif isinstance(module, nn.LayerNorm):
406
+ module.bias.data.zero_()
407
+ module.weight.data.fill_(1.0)
408
+ elif isinstance(module, nn.Parameter):
409
+ module.data.normal_(mean=0.0, std=self.config.initializer_range)
410
+
411
+
412
+ VIVIT_START_DOCSTRING = r"""
413
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
414
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
415
+ behavior.
416
+
417
+ Parameters:
418
+ config ([`VivitConfig`]): Model configuration class with all the parameters of the model.
419
+ Initializing with a config file does not load the weights associated with the model, only the
420
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
421
+ """
422
+
423
+ VIVIT_INPUTS_DOCSTRING = r"""
424
+ Args:
425
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, height, width)`):
426
+ Pixel values. Pixel values can be obtained using [`VivitImageProcessor`]. See
427
+ [`VivitImageProcessor.preprocess`] for details.
428
+
429
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
430
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
431
+
432
+ - 1 indicates the head is **not masked**,
433
+ - 0 indicates the head is **masked**.
434
+
435
+ output_attentions (`bool`, *optional*):
436
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
437
+ tensors for more detail.
438
+ output_hidden_states (`bool`, *optional*):
439
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
440
+ more detail.
441
+ return_dict (`bool`, *optional*):
442
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
443
+ """
444
+
445
+
446
+ @add_start_docstrings(
447
+ "The bare ViViT Transformer model outputting raw hidden-states without any specific head on top.",
448
+ VIVIT_START_DOCSTRING,
449
+ )
450
+ class VivitModel(VivitPreTrainedModel):
451
+ def __init__(self, config, add_pooling_layer=True):
452
+ super().__init__(config)
453
+ self.config = config
454
+
455
+ self.embeddings = VivitEmbeddings(config)
456
+ self.encoder = VivitEncoder(config)
457
+
458
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
459
+ self.pooler = VivitPooler(config) if add_pooling_layer else None
460
+
461
+ # Initialize weights and apply final processing
462
+ self.post_init()
463
+
464
+ def get_input_embeddings(self):
465
+ return self.embeddings.patch_embeddings
466
+
467
+ def _prune_heads(self, heads_to_prune):
468
+ """
469
+ Prunes heads of the model.
470
+
471
+ Args:
472
+ heads_to_prune:
473
+ dict of {layer_num: list of heads to prune in this layer}
474
+ """
475
+ for layer, heads in heads_to_prune.items():
476
+ self.encoder.layer[layer].attention.prune_heads(heads)
477
+
478
+ @add_start_docstrings_to_model_forward(VIVIT_INPUTS_DOCSTRING)
479
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)
480
+ def forward(
481
+ self,
482
+ pixel_values: Optional[torch.FloatTensor] = None,
483
+ head_mask: Optional[torch.FloatTensor] = None,
484
+ output_attentions: Optional[bool] = None,
485
+ output_hidden_states: Optional[bool] = None,
486
+ return_dict: Optional[bool] = None,
487
+ ) -> Union[Tuple[torch.FloatTensor], BaseModelOutputWithPooling]:
488
+ r"""
489
+ Returns:
490
+
491
+ Examples:
492
+
493
+ ```python
494
+ >>> import av
495
+ >>> import numpy as np
496
+
497
+ >>> from transformers import VivitImageProcessor, VivitModel
498
+ >>> from huggingface_hub import hf_hub_download
499
+
500
+ >>> np.random.seed(0)
501
+
502
+
503
+ >>> def read_video_pyav(container, indices):
504
+ ... '''
505
+ ... Decode the video with PyAV decoder.
506
+ ... Args:
507
+ ... container (`av.container.input.InputContainer`): PyAV container.
508
+ ... indices (`List[int]`): List of frame indices to decode.
509
+ ... Returns:
510
+ ... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
511
+ ... '''
512
+ ... frames = []
513
+ ... container.seek(0)
514
+ ... start_index = indices[0]
515
+ ... end_index = indices[-1]
516
+ ... for i, frame in enumerate(container.decode(video=0)):
517
+ ... if i > end_index:
518
+ ... break
519
+ ... if i >= start_index and i in indices:
520
+ ... frames.append(frame)
521
+ ... return np.stack([x.to_ndarray(format="rgb24") for x in frames])
522
+
523
+
524
+ >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
525
+ ... '''
526
+ ... Sample a given number of frame indices from the video.
527
+ ... Args:
528
+ ... clip_len (`int`): Total number of frames to sample.
529
+ ... frame_sample_rate (`int`): Sample every n-th frame.
530
+ ... seg_len (`int`): Maximum allowed index of sample's last frame.
531
+ ... Returns:
532
+ ... indices (`List[int]`): List of sampled frame indices
533
+ ... '''
534
+ ... converted_len = int(clip_len * frame_sample_rate)
535
+ ... end_idx = np.random.randint(converted_len, seg_len)
536
+ ... start_idx = end_idx - converted_len
537
+ ... indices = np.linspace(start_idx, end_idx, num=clip_len)
538
+ ... indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
539
+ ... return indices
540
+
541
+
542
+ >>> # video clip consists of 300 frames (10 seconds at 30 FPS)
543
+ >>> file_path = hf_hub_download(
544
+ ... repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
545
+ ... )
546
+ >>> container = av.open(file_path)
547
+
548
+ >>> # sample 32 frames
549
+ >>> indices = sample_frame_indices(clip_len=32, frame_sample_rate=1, seg_len=container.streams.video[0].frames)
550
+ >>> video = read_video_pyav(container=container, indices=indices)
551
+
552
+ >>> image_processor = VivitImageProcessor.from_pretrained("google/vivit-b-16x2-kinetics400")
553
+ >>> model = VivitModel.from_pretrained("google/vivit-b-16x2-kinetics400")
554
+
555
+ >>> # prepare video for the model
556
+ >>> inputs = image_processor(list(video), return_tensors="pt")
557
+
558
+ >>> # forward pass
559
+ >>> outputs = model(**inputs)
560
+ >>> last_hidden_states = outputs.last_hidden_state
561
+ >>> list(last_hidden_states.shape)
562
+ [1, 3137, 768]
563
+ ```"""
564
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
565
+ output_hidden_states = (
566
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
567
+ )
568
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
569
+
570
+ if pixel_values is None:
571
+ raise ValueError("You have to specify pixel_values")
572
+
573
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
574
+
575
+ embedding_output = self.embeddings(pixel_values)
576
+
577
+ encoder_outputs = self.encoder(
578
+ embedding_output,
579
+ head_mask=head_mask,
580
+ output_attentions=output_attentions,
581
+ output_hidden_states=output_hidden_states,
582
+ return_dict=return_dict,
583
+ )
584
+ sequence_output = encoder_outputs[0]
585
+ sequence_output = self.layernorm(sequence_output)
586
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
587
+
588
+ if not return_dict:
589
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
590
+
591
+ return BaseModelOutputWithPooling(
592
+ last_hidden_state=sequence_output,
593
+ pooler_output=pooled_output,
594
+ hidden_states=encoder_outputs.hidden_states,
595
+ attentions=encoder_outputs.attentions,
596
+ )
597
+
598
+
599
+ @add_start_docstrings(
600
+ """ViViT Transformer model with a video classification head on top (a linear layer on top of the final hidden state of the
601
+ [CLS] token) e.g. for Kinetics-400.""",
602
+ VIVIT_START_DOCSTRING,
603
+ )
604
+ class VivitForVideoClassification(VivitPreTrainedModel):
605
+ def __init__(self, config):
606
+ super().__init__(config)
607
+
608
+ self.num_labels = config.num_labels
609
+ self.vivit = VivitModel(config, add_pooling_layer=False)
610
+
611
+ # Classifier head
612
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
613
+
614
+ # Initialize weights and apply final processing
615
+ self.post_init()
616
+
617
+ @add_start_docstrings_to_model_forward(VIVIT_INPUTS_DOCSTRING)
618
+ @replace_return_docstrings(output_type=ImageClassifierOutput, config_class=_CONFIG_FOR_DOC)
619
+ def forward(
620
+ self,
621
+ pixel_values: Optional[torch.FloatTensor] = None,
622
+ head_mask: Optional[torch.FloatTensor] = None,
623
+ labels: Optional[torch.LongTensor] = None,
624
+ output_attentions: Optional[bool] = None,
625
+ output_hidden_states: Optional[bool] = None,
626
+ return_dict: Optional[bool] = None,
627
+ ) -> Union[Tuple[torch.FloatTensor], ImageClassifierOutput]:
628
+ r"""
629
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
630
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
631
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
632
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
633
+
634
+ Returns:
635
+
636
+ Examples:
637
+
638
+ ```python
639
+ >>> import av
640
+ >>> import numpy as np
641
+ >>> import torch
642
+
643
+ >>> from transformers import VivitImageProcessor, VivitForVideoClassification
644
+ >>> from huggingface_hub import hf_hub_download
645
+
646
+ >>> np.random.seed(0)
647
+
648
+
649
+ >>> def read_video_pyav(container, indices):
650
+ ... '''
651
+ ... Decode the video with PyAV decoder.
652
+ ... Args:
653
+ ... container (`av.container.input.InputContainer`): PyAV container.
654
+ ... indices (`List[int]`): List of frame indices to decode.
655
+ ... Returns:
656
+ ... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
657
+ ... '''
658
+ ... frames = []
659
+ ... container.seek(0)
660
+ ... start_index = indices[0]
661
+ ... end_index = indices[-1]
662
+ ... for i, frame in enumerate(container.decode(video=0)):
663
+ ... if i > end_index:
664
+ ... break
665
+ ... if i >= start_index and i in indices:
666
+ ... frames.append(frame)
667
+ ... return np.stack([x.to_ndarray(format="rgb24") for x in frames])
668
+
669
+
670
+ >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
671
+ ... '''
672
+ ... Sample a given number of frame indices from the video.
673
+ ... Args:
674
+ ... clip_len (`int`): Total number of frames to sample.
675
+ ... frame_sample_rate (`int`): Sample every n-th frame.
676
+ ... seg_len (`int`): Maximum allowed index of sample's last frame.
677
+ ... Returns:
678
+ ... indices (`List[int]`): List of sampled frame indices
679
+ ... '''
680
+ ... converted_len = int(clip_len * frame_sample_rate)
681
+ ... end_idx = np.random.randint(converted_len, seg_len)
682
+ ... start_idx = end_idx - converted_len
683
+ ... indices = np.linspace(start_idx, end_idx, num=clip_len)
684
+ ... indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
685
+ ... return indices
686
+
687
+
688
+ >>> # video clip consists of 300 frames (10 seconds at 30 FPS)
689
+ >>> file_path = hf_hub_download(
690
+ ... repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
691
+ ... )
692
+ >>> container = av.open(file_path)
693
+
694
+ >>> # sample 32 frames
695
+ >>> indices = sample_frame_indices(clip_len=32, frame_sample_rate=4, seg_len=container.streams.video[0].frames)
696
+ >>> video = read_video_pyav(container=container, indices=indices)
697
+
698
+ >>> image_processor = VivitImageProcessor.from_pretrained("google/vivit-b-16x2-kinetics400")
699
+ >>> model = VivitForVideoClassification.from_pretrained("google/vivit-b-16x2-kinetics400")
700
+
701
+ >>> inputs = image_processor(list(video), return_tensors="pt")
702
+
703
+ >>> with torch.no_grad():
704
+ ... outputs = model(**inputs)
705
+ ... logits = outputs.logits
706
+
707
+ >>> # model predicts one of the 400 Kinetics-400 classes
708
+ >>> predicted_label = logits.argmax(-1).item()
709
+ >>> print(model.config.id2label[predicted_label])
710
+ LABEL_116
711
+ ```"""
712
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
713
+
714
+ outputs = self.vivit(
715
+ pixel_values,
716
+ head_mask=head_mask,
717
+ output_attentions=output_attentions,
718
+ output_hidden_states=output_hidden_states,
719
+ return_dict=return_dict,
720
+ )
721
+
722
+ sequence_output = outputs[0]
723
+
724
+ logits = self.classifier(sequence_output[:, 0, :])
725
+
726
+ loss = None
727
+ if labels is not None:
728
+ if self.num_labels == 1:
729
+ # We are doing regression
730
+ loss_fct = MSELoss()
731
+ loss = loss_fct(logits.view(-1), labels.view(-1))
732
+ else:
733
+ loss_fct = CrossEntropyLoss()
734
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
735
+
736
+ if not return_dict:
737
+ output = (logits,) + outputs[2:]
738
+ return ((loss,) + output) if loss is not None else output
739
+
740
+ return ImageClassifierOutput(
741
+ loss=loss,
742
+ logits=logits,
743
+ hidden_states=outputs.hidden_states,
744
+ attentions=outputs.attentions,
745
+ )
evalkit_tf437/lib/python3.10/site-packages/filelock/__init__.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ A platform independent file lock that supports the with-statement.
3
+
4
+ .. autodata:: filelock.__version__
5
+ :no-value:
6
+
7
+ """
8
+
9
+ from __future__ import annotations
10
+
11
+ import sys
12
+ import warnings
13
+ from typing import TYPE_CHECKING
14
+
15
+ from ._api import AcquireReturnProxy, BaseFileLock
16
+ from ._error import Timeout
17
+ from ._soft import SoftFileLock
18
+ from ._unix import UnixFileLock, has_fcntl
19
+ from ._windows import WindowsFileLock
20
+ from .asyncio import (
21
+ AsyncAcquireReturnProxy,
22
+ AsyncSoftFileLock,
23
+ AsyncUnixFileLock,
24
+ AsyncWindowsFileLock,
25
+ BaseAsyncFileLock,
26
+ )
27
+ from .version import version
28
+
29
+ #: version of the project as a string
30
+ __version__: str = version
31
+
32
+
33
+ if sys.platform == "win32": # pragma: win32 cover
34
+ _FileLock: type[BaseFileLock] = WindowsFileLock
35
+ _AsyncFileLock: type[BaseAsyncFileLock] = AsyncWindowsFileLock
36
+ else: # pragma: win32 no cover # noqa: PLR5501
37
+ if has_fcntl:
38
+ _FileLock: type[BaseFileLock] = UnixFileLock
39
+ _AsyncFileLock: type[BaseAsyncFileLock] = AsyncUnixFileLock
40
+ else:
41
+ _FileLock = SoftFileLock
42
+ _AsyncFileLock = AsyncSoftFileLock
43
+ if warnings is not None:
44
+ warnings.warn("only soft file lock is available", stacklevel=2)
45
+
46
+ if TYPE_CHECKING:
47
+ FileLock = SoftFileLock
48
+ AsyncFileLock = AsyncSoftFileLock
49
+ else:
50
+ #: Alias for the lock, which should be used for the current platform.
51
+ FileLock = _FileLock
52
+ AsyncFileLock = _AsyncFileLock
53
+
54
+
55
+ __all__ = [
56
+ "AcquireReturnProxy",
57
+ "AsyncAcquireReturnProxy",
58
+ "AsyncFileLock",
59
+ "AsyncSoftFileLock",
60
+ "AsyncUnixFileLock",
61
+ "AsyncWindowsFileLock",
62
+ "BaseAsyncFileLock",
63
+ "BaseFileLock",
64
+ "FileLock",
65
+ "SoftFileLock",
66
+ "Timeout",
67
+ "UnixFileLock",
68
+ "WindowsFileLock",
69
+ "__version__",
70
+ ]
evalkit_tf437/lib/python3.10/site-packages/filelock/__pycache__/_unix.cpython-310.pyc ADDED
Binary file (2.12 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/filelock/__pycache__/_util.cpython-310.pyc ADDED
Binary file (1.5 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/filelock/__pycache__/_windows.cpython-310.pyc ADDED
Binary file (2.07 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/filelock/__pycache__/version.cpython-310.pyc ADDED
Binary file (490 Bytes). View file
 
evalkit_tf437/lib/python3.10/site-packages/filelock/_api.py ADDED
@@ -0,0 +1,403 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import contextlib
4
+ import inspect
5
+ import logging
6
+ import os
7
+ import time
8
+ import warnings
9
+ from abc import ABCMeta, abstractmethod
10
+ from dataclasses import dataclass
11
+ from threading import local
12
+ from typing import TYPE_CHECKING, Any, cast
13
+ from weakref import WeakValueDictionary
14
+
15
+ from ._error import Timeout
16
+
17
+ if TYPE_CHECKING:
18
+ import sys
19
+ from types import TracebackType
20
+
21
+ if sys.version_info >= (3, 11): # pragma: no cover (py311+)
22
+ from typing import Self
23
+ else: # pragma: no cover (<py311)
24
+ from typing_extensions import Self
25
+
26
+
27
+ _LOGGER = logging.getLogger("filelock")
28
+
29
+
30
+ # This is a helper class which is returned by :meth:`BaseFileLock.acquire` and wraps the lock to make sure __enter__
31
+ # is not called twice when entering the with statement. If we would simply return *self*, the lock would be acquired
32
+ # again in the *__enter__* method of the BaseFileLock, but not released again automatically. issue #37 (memory leak)
33
+ class AcquireReturnProxy:
34
+ """A context-aware object that will release the lock file when exiting."""
35
+
36
+ def __init__(self, lock: BaseFileLock) -> None:
37
+ self.lock = lock
38
+
39
+ def __enter__(self) -> BaseFileLock:
40
+ return self.lock
41
+
42
+ def __exit__(
43
+ self,
44
+ exc_type: type[BaseException] | None,
45
+ exc_value: BaseException | None,
46
+ traceback: TracebackType | None,
47
+ ) -> None:
48
+ self.lock.release()
49
+
50
+
51
+ @dataclass
52
+ class FileLockContext:
53
+ """A dataclass which holds the context for a ``BaseFileLock`` object."""
54
+
55
+ # The context is held in a separate class to allow optional use of thread local storage via the
56
+ # ThreadLocalFileContext class.
57
+
58
+ #: The path to the lock file.
59
+ lock_file: str
60
+
61
+ #: The default timeout value.
62
+ timeout: float
63
+
64
+ #: The mode for the lock files
65
+ mode: int
66
+
67
+ #: Whether the lock should be blocking or not
68
+ blocking: bool
69
+
70
+ #: The file descriptor for the *_lock_file* as it is returned by the os.open() function, not None when lock held
71
+ lock_file_fd: int | None = None
72
+
73
+ #: The lock counter is used for implementing the nested locking mechanism.
74
+ lock_counter: int = 0 # When the lock is acquired is increased and the lock is only released, when this value is 0
75
+
76
+
77
+ class ThreadLocalFileContext(FileLockContext, local):
78
+ """A thread local version of the ``FileLockContext`` class."""
79
+
80
+
81
+ class FileLockMeta(ABCMeta):
82
+ def __call__( # noqa: PLR0913
83
+ cls,
84
+ lock_file: str | os.PathLike[str],
85
+ timeout: float = -1,
86
+ mode: int = 0o644,
87
+ thread_local: bool = True, # noqa: FBT001, FBT002
88
+ *,
89
+ blocking: bool = True,
90
+ is_singleton: bool = False,
91
+ **kwargs: Any, # capture remaining kwargs for subclasses # noqa: ANN401
92
+ ) -> BaseFileLock:
93
+ if is_singleton:
94
+ instance = cls._instances.get(str(lock_file)) # type: ignore[attr-defined]
95
+ if instance:
96
+ params_to_check = {
97
+ "thread_local": (thread_local, instance.is_thread_local()),
98
+ "timeout": (timeout, instance.timeout),
99
+ "mode": (mode, instance.mode),
100
+ "blocking": (blocking, instance.blocking),
101
+ }
102
+
103
+ non_matching_params = {
104
+ name: (passed_param, set_param)
105
+ for name, (passed_param, set_param) in params_to_check.items()
106
+ if passed_param != set_param
107
+ }
108
+ if not non_matching_params:
109
+ return cast(BaseFileLock, instance)
110
+
111
+ # parameters do not match; raise error
112
+ msg = "Singleton lock instances cannot be initialized with differing arguments"
113
+ msg += "\nNon-matching arguments: "
114
+ for param_name, (passed_param, set_param) in non_matching_params.items():
115
+ msg += f"\n\t{param_name} (existing lock has {set_param} but {passed_param} was passed)"
116
+ raise ValueError(msg)
117
+
118
+ # Workaround to make `__init__`'s params optional in subclasses
119
+ # E.g. virtualenv changes the signature of the `__init__` method in the `BaseFileLock` class descendant
120
+ # (https://github.com/tox-dev/filelock/pull/340)
121
+
122
+ all_params = {
123
+ "timeout": timeout,
124
+ "mode": mode,
125
+ "thread_local": thread_local,
126
+ "blocking": blocking,
127
+ "is_singleton": is_singleton,
128
+ **kwargs,
129
+ }
130
+
131
+ present_params = inspect.signature(cls.__init__).parameters # type: ignore[misc]
132
+ init_params = {key: value for key, value in all_params.items() if key in present_params}
133
+
134
+ instance = super().__call__(lock_file, **init_params)
135
+
136
+ if is_singleton:
137
+ cls._instances[str(lock_file)] = instance # type: ignore[attr-defined]
138
+
139
+ return cast(BaseFileLock, instance)
140
+
141
+
142
+ class BaseFileLock(contextlib.ContextDecorator, metaclass=FileLockMeta):
143
+ """Abstract base class for a file lock object."""
144
+
145
+ _instances: WeakValueDictionary[str, BaseFileLock]
146
+
147
+ def __init_subclass__(cls, **kwargs: dict[str, Any]) -> None:
148
+ """Setup unique state for lock subclasses."""
149
+ super().__init_subclass__(**kwargs)
150
+ cls._instances = WeakValueDictionary()
151
+
152
+ def __init__( # noqa: PLR0913
153
+ self,
154
+ lock_file: str | os.PathLike[str],
155
+ timeout: float = -1,
156
+ mode: int = 0o644,
157
+ thread_local: bool = True, # noqa: FBT001, FBT002
158
+ *,
159
+ blocking: bool = True,
160
+ is_singleton: bool = False,
161
+ ) -> None:
162
+ """
163
+ Create a new lock object.
164
+
165
+ :param lock_file: path to the file
166
+ :param timeout: default timeout when acquiring the lock, in seconds. It will be used as fallback value in \
167
+ the acquire method, if no timeout value (``None``) is given. If you want to disable the timeout, set it \
168
+ to a negative value. A timeout of 0 means that there is exactly one attempt to acquire the file lock.
169
+ :param mode: file permissions for the lockfile
170
+ :param thread_local: Whether this object's internal context should be thread local or not. If this is set to \
171
+ ``False`` then the lock will be reentrant across threads.
172
+ :param blocking: whether the lock should be blocking or not
173
+ :param is_singleton: If this is set to ``True`` then only one instance of this class will be created \
174
+ per lock file. This is useful if you want to use the lock object for reentrant locking without needing \
175
+ to pass the same object around.
176
+
177
+ """
178
+ self._is_thread_local = thread_local
179
+ self._is_singleton = is_singleton
180
+
181
+ # Create the context. Note that external code should not work with the context directly and should instead use
182
+ # properties of this class.
183
+ kwargs: dict[str, Any] = {
184
+ "lock_file": os.fspath(lock_file),
185
+ "timeout": timeout,
186
+ "mode": mode,
187
+ "blocking": blocking,
188
+ }
189
+ self._context: FileLockContext = (ThreadLocalFileContext if thread_local else FileLockContext)(**kwargs)
190
+
191
+ def is_thread_local(self) -> bool:
192
+ """:return: a flag indicating if this lock is thread local or not"""
193
+ return self._is_thread_local
194
+
195
+ @property
196
+ def is_singleton(self) -> bool:
197
+ """:return: a flag indicating if this lock is singleton or not"""
198
+ return self._is_singleton
199
+
200
+ @property
201
+ def lock_file(self) -> str:
202
+ """:return: path to the lock file"""
203
+ return self._context.lock_file
204
+
205
+ @property
206
+ def timeout(self) -> float:
207
+ """
208
+ :return: the default timeout value, in seconds
209
+
210
+ .. versionadded:: 2.0.0
211
+ """
212
+ return self._context.timeout
213
+
214
+ @timeout.setter
215
+ def timeout(self, value: float | str) -> None:
216
+ """
217
+ Change the default timeout value.
218
+
219
+ :param value: the new value, in seconds
220
+
221
+ """
222
+ self._context.timeout = float(value)
223
+
224
+ @property
225
+ def blocking(self) -> bool:
226
+ """:return: whether the locking is blocking or not"""
227
+ return self._context.blocking
228
+
229
+ @blocking.setter
230
+ def blocking(self, value: bool) -> None:
231
+ """
232
+ Change the default blocking value.
233
+
234
+ :param value: the new value as bool
235
+
236
+ """
237
+ self._context.blocking = value
238
+
239
+ @property
240
+ def mode(self) -> int:
241
+ """:return: the file permissions for the lockfile"""
242
+ return self._context.mode
243
+
244
+ @abstractmethod
245
+ def _acquire(self) -> None:
246
+ """If the file lock could be acquired, self._context.lock_file_fd holds the file descriptor of the lock file."""
247
+ raise NotImplementedError
248
+
249
+ @abstractmethod
250
+ def _release(self) -> None:
251
+ """Releases the lock and sets self._context.lock_file_fd to None."""
252
+ raise NotImplementedError
253
+
254
+ @property
255
+ def is_locked(self) -> bool:
256
+ """
257
+
258
+ :return: A boolean indicating if the lock file is holding the lock currently.
259
+
260
+ .. versionchanged:: 2.0.0
261
+
262
+ This was previously a method and is now a property.
263
+ """
264
+ return self._context.lock_file_fd is not None
265
+
266
+ @property
267
+ def lock_counter(self) -> int:
268
+ """:return: The number of times this lock has been acquired (but not yet released)."""
269
+ return self._context.lock_counter
270
+
271
+ def acquire(
272
+ self,
273
+ timeout: float | None = None,
274
+ poll_interval: float = 0.05,
275
+ *,
276
+ poll_intervall: float | None = None,
277
+ blocking: bool | None = None,
278
+ ) -> AcquireReturnProxy:
279
+ """
280
+ Try to acquire the file lock.
281
+
282
+ :param timeout: maximum wait time for acquiring the lock, ``None`` means use the default :attr:`~timeout` is and
283
+ if ``timeout < 0``, there is no timeout and this method will block until the lock could be acquired
284
+ :param poll_interval: interval of trying to acquire the lock file
285
+ :param poll_intervall: deprecated, kept for backwards compatibility, use ``poll_interval`` instead
286
+ :param blocking: defaults to True. If False, function will return immediately if it cannot obtain a lock on the
287
+ first attempt. Otherwise, this method will block until the timeout expires or the lock is acquired.
288
+ :raises Timeout: if fails to acquire lock within the timeout period
289
+ :return: a context object that will unlock the file when the context is exited
290
+
291
+ .. code-block:: python
292
+
293
+ # You can use this method in the context manager (recommended)
294
+ with lock.acquire():
295
+ pass
296
+
297
+ # Or use an equivalent try-finally construct:
298
+ lock.acquire()
299
+ try:
300
+ pass
301
+ finally:
302
+ lock.release()
303
+
304
+ .. versionchanged:: 2.0.0
305
+
306
+ This method returns now a *proxy* object instead of *self*,
307
+ so that it can be used in a with statement without side effects.
308
+
309
+ """
310
+ # Use the default timeout, if no timeout is provided.
311
+ if timeout is None:
312
+ timeout = self._context.timeout
313
+
314
+ if blocking is None:
315
+ blocking = self._context.blocking
316
+
317
+ if poll_intervall is not None:
318
+ msg = "use poll_interval instead of poll_intervall"
319
+ warnings.warn(msg, DeprecationWarning, stacklevel=2)
320
+ poll_interval = poll_intervall
321
+
322
+ # Increment the number right at the beginning. We can still undo it, if something fails.
323
+ self._context.lock_counter += 1
324
+
325
+ lock_id = id(self)
326
+ lock_filename = self.lock_file
327
+ start_time = time.perf_counter()
328
+ try:
329
+ while True:
330
+ if not self.is_locked:
331
+ _LOGGER.debug("Attempting to acquire lock %s on %s", lock_id, lock_filename)
332
+ self._acquire()
333
+ if self.is_locked:
334
+ _LOGGER.debug("Lock %s acquired on %s", lock_id, lock_filename)
335
+ break
336
+ if blocking is False:
337
+ _LOGGER.debug("Failed to immediately acquire lock %s on %s", lock_id, lock_filename)
338
+ raise Timeout(lock_filename) # noqa: TRY301
339
+ if 0 <= timeout < time.perf_counter() - start_time:
340
+ _LOGGER.debug("Timeout on acquiring lock %s on %s", lock_id, lock_filename)
341
+ raise Timeout(lock_filename) # noqa: TRY301
342
+ msg = "Lock %s not acquired on %s, waiting %s seconds ..."
343
+ _LOGGER.debug(msg, lock_id, lock_filename, poll_interval)
344
+ time.sleep(poll_interval)
345
+ except BaseException: # Something did go wrong, so decrement the counter.
346
+ self._context.lock_counter = max(0, self._context.lock_counter - 1)
347
+ raise
348
+ return AcquireReturnProxy(lock=self)
349
+
350
+ def release(self, force: bool = False) -> None: # noqa: FBT001, FBT002
351
+ """
352
+ Releases the file lock. Please note, that the lock is only completely released, if the lock counter is 0.
353
+ Also note, that the lock file itself is not automatically deleted.
354
+
355
+ :param force: If true, the lock counter is ignored and the lock is released in every case/
356
+
357
+ """
358
+ if self.is_locked:
359
+ self._context.lock_counter -= 1
360
+
361
+ if self._context.lock_counter == 0 or force:
362
+ lock_id, lock_filename = id(self), self.lock_file
363
+
364
+ _LOGGER.debug("Attempting to release lock %s on %s", lock_id, lock_filename)
365
+ self._release()
366
+ self._context.lock_counter = 0
367
+ _LOGGER.debug("Lock %s released on %s", lock_id, lock_filename)
368
+
369
+ def __enter__(self) -> Self:
370
+ """
371
+ Acquire the lock.
372
+
373
+ :return: the lock object
374
+
375
+ """
376
+ self.acquire()
377
+ return self
378
+
379
+ def __exit__(
380
+ self,
381
+ exc_type: type[BaseException] | None,
382
+ exc_value: BaseException | None,
383
+ traceback: TracebackType | None,
384
+ ) -> None:
385
+ """
386
+ Release the lock.
387
+
388
+ :param exc_type: the exception type if raised
389
+ :param exc_value: the exception value if raised
390
+ :param traceback: the exception traceback if raised
391
+
392
+ """
393
+ self.release()
394
+
395
+ def __del__(self) -> None:
396
+ """Called when the lock object is deleted."""
397
+ self.release(force=True)
398
+
399
+
400
+ __all__ = [
401
+ "AcquireReturnProxy",
402
+ "BaseFileLock",
403
+ ]
evalkit_tf437/lib/python3.10/site-packages/filelock/_error.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import Any
4
+
5
+
6
+ class Timeout(TimeoutError): # noqa: N818
7
+ """Raised when the lock could not be acquired in *timeout* seconds."""
8
+
9
+ def __init__(self, lock_file: str) -> None:
10
+ super().__init__()
11
+ self._lock_file = lock_file
12
+
13
+ def __reduce__(self) -> str | tuple[Any, ...]:
14
+ return self.__class__, (self._lock_file,) # Properly pickle the exception
15
+
16
+ def __str__(self) -> str:
17
+ return f"The file lock '{self._lock_file}' could not be acquired."
18
+
19
+ def __repr__(self) -> str:
20
+ return f"{self.__class__.__name__}({self.lock_file!r})"
21
+
22
+ @property
23
+ def lock_file(self) -> str:
24
+ """:return: The path of the file lock."""
25
+ return self._lock_file
26
+
27
+
28
+ __all__ = [
29
+ "Timeout",
30
+ ]
evalkit_tf437/lib/python3.10/site-packages/filelock/_soft.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ import sys
5
+ from contextlib import suppress
6
+ from errno import EACCES, EEXIST
7
+ from pathlib import Path
8
+
9
+ from ._api import BaseFileLock
10
+ from ._util import ensure_directory_exists, raise_on_not_writable_file
11
+
12
+
13
+ class SoftFileLock(BaseFileLock):
14
+ """Simply watches the existence of the lock file."""
15
+
16
+ def _acquire(self) -> None:
17
+ raise_on_not_writable_file(self.lock_file)
18
+ ensure_directory_exists(self.lock_file)
19
+ # first check for exists and read-only mode as the open will mask this case as EEXIST
20
+ flags = (
21
+ os.O_WRONLY # open for writing only
22
+ | os.O_CREAT
23
+ | os.O_EXCL # together with above raise EEXIST if the file specified by filename exists
24
+ | os.O_TRUNC # truncate the file to zero byte
25
+ )
26
+ try:
27
+ file_handler = os.open(self.lock_file, flags, self._context.mode)
28
+ except OSError as exception: # re-raise unless expected exception
29
+ if not (
30
+ exception.errno == EEXIST # lock already exist
31
+ or (exception.errno == EACCES and sys.platform == "win32") # has no access to this lock
32
+ ): # pragma: win32 no cover
33
+ raise
34
+ else:
35
+ self._context.lock_file_fd = file_handler
36
+
37
+ def _release(self) -> None:
38
+ assert self._context.lock_file_fd is not None # noqa: S101
39
+ os.close(self._context.lock_file_fd) # the lock file is definitely not None
40
+ self._context.lock_file_fd = None
41
+ with suppress(OSError): # the file is already deleted and that's what we want
42
+ Path(self.lock_file).unlink()
43
+
44
+
45
+ __all__ = [
46
+ "SoftFileLock",
47
+ ]
evalkit_tf437/lib/python3.10/site-packages/filelock/_unix.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ import sys
5
+ from contextlib import suppress
6
+ from errno import ENOSYS
7
+ from pathlib import Path
8
+ from typing import cast
9
+
10
+ from ._api import BaseFileLock
11
+ from ._util import ensure_directory_exists
12
+
13
+ #: a flag to indicate if the fcntl API is available
14
+ has_fcntl = False
15
+ if sys.platform == "win32": # pragma: win32 cover
16
+
17
+ class UnixFileLock(BaseFileLock):
18
+ """Uses the :func:`fcntl.flock` to hard lock the lock file on unix systems."""
19
+
20
+ def _acquire(self) -> None:
21
+ raise NotImplementedError
22
+
23
+ def _release(self) -> None:
24
+ raise NotImplementedError
25
+
26
+ else: # pragma: win32 no cover
27
+ try:
28
+ import fcntl
29
+ except ImportError:
30
+ pass
31
+ else:
32
+ has_fcntl = True
33
+
34
+ class UnixFileLock(BaseFileLock):
35
+ """Uses the :func:`fcntl.flock` to hard lock the lock file on unix systems."""
36
+
37
+ def _acquire(self) -> None:
38
+ ensure_directory_exists(self.lock_file)
39
+ open_flags = os.O_RDWR | os.O_TRUNC
40
+ if not Path(self.lock_file).exists():
41
+ open_flags |= os.O_CREAT
42
+ fd = os.open(self.lock_file, open_flags, self._context.mode)
43
+ with suppress(PermissionError): # This locked is not owned by this UID
44
+ os.fchmod(fd, self._context.mode)
45
+ try:
46
+ fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
47
+ except OSError as exception:
48
+ os.close(fd)
49
+ if exception.errno == ENOSYS: # NotImplemented error
50
+ msg = "FileSystem does not appear to support flock; use SoftFileLock instead"
51
+ raise NotImplementedError(msg) from exception
52
+ else:
53
+ self._context.lock_file_fd = fd
54
+
55
+ def _release(self) -> None:
56
+ # Do not remove the lockfile:
57
+ # https://github.com/tox-dev/py-filelock/issues/31
58
+ # https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
59
+ fd = cast(int, self._context.lock_file_fd)
60
+ self._context.lock_file_fd = None
61
+ fcntl.flock(fd, fcntl.LOCK_UN)
62
+ os.close(fd)
63
+
64
+
65
+ __all__ = [
66
+ "UnixFileLock",
67
+ "has_fcntl",
68
+ ]
evalkit_tf437/lib/python3.10/site-packages/filelock/_util.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ import stat
5
+ import sys
6
+ from errno import EACCES, EISDIR
7
+ from pathlib import Path
8
+
9
+
10
+ def raise_on_not_writable_file(filename: str) -> None:
11
+ """
12
+ Raise an exception if attempting to open the file for writing would fail.
13
+
14
+ This is done so files that will never be writable can be separated from files that are writable but currently
15
+ locked.
16
+
17
+ :param filename: file to check
18
+ :raises OSError: as if the file was opened for writing.
19
+
20
+ """
21
+ try: # use stat to do exists + can write to check without race condition
22
+ file_stat = os.stat(filename) # noqa: PTH116
23
+ except OSError:
24
+ return # swallow does not exist or other errors
25
+
26
+ if file_stat.st_mtime != 0: # if os.stat returns but modification is zero that's an invalid os.stat - ignore it
27
+ if not (file_stat.st_mode & stat.S_IWUSR):
28
+ raise PermissionError(EACCES, "Permission denied", filename)
29
+
30
+ if stat.S_ISDIR(file_stat.st_mode):
31
+ if sys.platform == "win32": # pragma: win32 cover
32
+ # On Windows, this is PermissionError
33
+ raise PermissionError(EACCES, "Permission denied", filename)
34
+ else: # pragma: win32 no cover # noqa: RET506
35
+ # On linux / macOS, this is IsADirectoryError
36
+ raise IsADirectoryError(EISDIR, "Is a directory", filename)
37
+
38
+
39
+ def ensure_directory_exists(filename: Path | str) -> None:
40
+ """
41
+ Ensure the directory containing the file exists (create it if necessary).
42
+
43
+ :param filename: file.
44
+
45
+ """
46
+ Path(filename).parent.mkdir(parents=True, exist_ok=True)
47
+
48
+
49
+ __all__ = [
50
+ "ensure_directory_exists",
51
+ "raise_on_not_writable_file",
52
+ ]
evalkit_tf437/lib/python3.10/site-packages/filelock/_windows.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ import sys
5
+ from contextlib import suppress
6
+ from errno import EACCES
7
+ from pathlib import Path
8
+ from typing import cast
9
+
10
+ from ._api import BaseFileLock
11
+ from ._util import ensure_directory_exists, raise_on_not_writable_file
12
+
13
+ if sys.platform == "win32": # pragma: win32 cover
14
+ import msvcrt
15
+
16
+ class WindowsFileLock(BaseFileLock):
17
+ """Uses the :func:`msvcrt.locking` function to hard lock the lock file on Windows systems."""
18
+
19
+ def _acquire(self) -> None:
20
+ raise_on_not_writable_file(self.lock_file)
21
+ ensure_directory_exists(self.lock_file)
22
+ flags = (
23
+ os.O_RDWR # open for read and write
24
+ | os.O_CREAT # create file if not exists
25
+ | os.O_TRUNC # truncate file if not empty
26
+ )
27
+ try:
28
+ fd = os.open(self.lock_file, flags, self._context.mode)
29
+ except OSError as exception:
30
+ if exception.errno != EACCES: # has no access to this lock
31
+ raise
32
+ else:
33
+ try:
34
+ msvcrt.locking(fd, msvcrt.LK_NBLCK, 1)
35
+ except OSError as exception:
36
+ os.close(fd) # close file first
37
+ if exception.errno != EACCES: # file is already locked
38
+ raise
39
+ else:
40
+ self._context.lock_file_fd = fd
41
+
42
+ def _release(self) -> None:
43
+ fd = cast(int, self._context.lock_file_fd)
44
+ self._context.lock_file_fd = None
45
+ msvcrt.locking(fd, msvcrt.LK_UNLCK, 1)
46
+ os.close(fd)
47
+
48
+ with suppress(OSError): # Probably another instance of the application hat acquired the file lock.
49
+ Path(self.lock_file).unlink()
50
+
51
+ else: # pragma: win32 no cover
52
+
53
+ class WindowsFileLock(BaseFileLock):
54
+ """Uses the :func:`msvcrt.locking` function to hard lock the lock file on Windows systems."""
55
+
56
+ def _acquire(self) -> None:
57
+ raise NotImplementedError
58
+
59
+ def _release(self) -> None:
60
+ raise NotImplementedError
61
+
62
+
63
+ __all__ = [
64
+ "WindowsFileLock",
65
+ ]
evalkit_tf437/lib/python3.10/site-packages/filelock/asyncio.py ADDED
@@ -0,0 +1,342 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """An asyncio-based implementation of the file lock.""" # noqa: A005
2
+
3
+ from __future__ import annotations
4
+
5
+ import asyncio
6
+ import contextlib
7
+ import logging
8
+ import os
9
+ import time
10
+ from dataclasses import dataclass
11
+ from threading import local
12
+ from typing import TYPE_CHECKING, Any, Callable, NoReturn, cast
13
+
14
+ from ._api import BaseFileLock, FileLockContext, FileLockMeta
15
+ from ._error import Timeout
16
+ from ._soft import SoftFileLock
17
+ from ._unix import UnixFileLock
18
+ from ._windows import WindowsFileLock
19
+
20
+ if TYPE_CHECKING:
21
+ import sys
22
+ from concurrent import futures
23
+ from types import TracebackType
24
+
25
+ if sys.version_info >= (3, 11): # pragma: no cover (py311+)
26
+ from typing import Self
27
+ else: # pragma: no cover (<py311)
28
+ from typing_extensions import Self
29
+
30
+
31
+ _LOGGER = logging.getLogger("filelock")
32
+
33
+
34
+ @dataclass
35
+ class AsyncFileLockContext(FileLockContext):
36
+ """A dataclass which holds the context for a ``BaseAsyncFileLock`` object."""
37
+
38
+ #: Whether run in executor
39
+ run_in_executor: bool = True
40
+
41
+ #: The executor
42
+ executor: futures.Executor | None = None
43
+
44
+ #: The loop
45
+ loop: asyncio.AbstractEventLoop | None = None
46
+
47
+
48
+ class AsyncThreadLocalFileContext(AsyncFileLockContext, local):
49
+ """A thread local version of the ``FileLockContext`` class."""
50
+
51
+
52
+ class AsyncAcquireReturnProxy:
53
+ """A context-aware object that will release the lock file when exiting."""
54
+
55
+ def __init__(self, lock: BaseAsyncFileLock) -> None: # noqa: D107
56
+ self.lock = lock
57
+
58
+ async def __aenter__(self) -> BaseAsyncFileLock: # noqa: D105
59
+ return self.lock
60
+
61
+ async def __aexit__( # noqa: D105
62
+ self,
63
+ exc_type: type[BaseException] | None,
64
+ exc_value: BaseException | None,
65
+ traceback: TracebackType | None,
66
+ ) -> None:
67
+ await self.lock.release()
68
+
69
+
70
+ class AsyncFileLockMeta(FileLockMeta):
71
+ def __call__( # type: ignore[override] # noqa: PLR0913
72
+ cls, # noqa: N805
73
+ lock_file: str | os.PathLike[str],
74
+ timeout: float = -1,
75
+ mode: int = 0o644,
76
+ thread_local: bool = False, # noqa: FBT001, FBT002
77
+ *,
78
+ blocking: bool = True,
79
+ is_singleton: bool = False,
80
+ loop: asyncio.AbstractEventLoop | None = None,
81
+ run_in_executor: bool = True,
82
+ executor: futures.Executor | None = None,
83
+ ) -> BaseAsyncFileLock:
84
+ if thread_local and run_in_executor:
85
+ msg = "run_in_executor is not supported when thread_local is True"
86
+ raise ValueError(msg)
87
+ instance = super().__call__(
88
+ lock_file=lock_file,
89
+ timeout=timeout,
90
+ mode=mode,
91
+ thread_local=thread_local,
92
+ blocking=blocking,
93
+ is_singleton=is_singleton,
94
+ loop=loop,
95
+ run_in_executor=run_in_executor,
96
+ executor=executor,
97
+ )
98
+ return cast(BaseAsyncFileLock, instance)
99
+
100
+
101
+ class BaseAsyncFileLock(BaseFileLock, metaclass=AsyncFileLockMeta):
102
+ """Base class for asynchronous file locks."""
103
+
104
+ def __init__( # noqa: PLR0913
105
+ self,
106
+ lock_file: str | os.PathLike[str],
107
+ timeout: float = -1,
108
+ mode: int = 0o644,
109
+ thread_local: bool = False, # noqa: FBT001, FBT002
110
+ *,
111
+ blocking: bool = True,
112
+ is_singleton: bool = False,
113
+ loop: asyncio.AbstractEventLoop | None = None,
114
+ run_in_executor: bool = True,
115
+ executor: futures.Executor | None = None,
116
+ ) -> None:
117
+ """
118
+ Create a new lock object.
119
+
120
+ :param lock_file: path to the file
121
+ :param timeout: default timeout when acquiring the lock, in seconds. It will be used as fallback value in \
122
+ the acquire method, if no timeout value (``None``) is given. If you want to disable the timeout, set it \
123
+ to a negative value. A timeout of 0 means that there is exactly one attempt to acquire the file lock.
124
+ :param mode: file permissions for the lockfile
125
+ :param thread_local: Whether this object's internal context should be thread local or not. If this is set to \
126
+ ``False`` then the lock will be reentrant across threads.
127
+ :param blocking: whether the lock should be blocking or not
128
+ :param is_singleton: If this is set to ``True`` then only one instance of this class will be created \
129
+ per lock file. This is useful if you want to use the lock object for reentrant locking without needing \
130
+ to pass the same object around.
131
+ :param loop: The event loop to use. If not specified, the running event loop will be used.
132
+ :param run_in_executor: If this is set to ``True`` then the lock will be acquired in an executor.
133
+ :param executor: The executor to use. If not specified, the default executor will be used.
134
+
135
+ """
136
+ self._is_thread_local = thread_local
137
+ self._is_singleton = is_singleton
138
+
139
+ # Create the context. Note that external code should not work with the context directly and should instead use
140
+ # properties of this class.
141
+ kwargs: dict[str, Any] = {
142
+ "lock_file": os.fspath(lock_file),
143
+ "timeout": timeout,
144
+ "mode": mode,
145
+ "blocking": blocking,
146
+ "loop": loop,
147
+ "run_in_executor": run_in_executor,
148
+ "executor": executor,
149
+ }
150
+ self._context: AsyncFileLockContext = (AsyncThreadLocalFileContext if thread_local else AsyncFileLockContext)(
151
+ **kwargs
152
+ )
153
+
154
+ @property
155
+ def run_in_executor(self) -> bool:
156
+ """::return: whether run in executor."""
157
+ return self._context.run_in_executor
158
+
159
+ @property
160
+ def executor(self) -> futures.Executor | None:
161
+ """::return: the executor."""
162
+ return self._context.executor
163
+
164
+ @executor.setter
165
+ def executor(self, value: futures.Executor | None) -> None: # pragma: no cover
166
+ """
167
+ Change the executor.
168
+
169
+ :param value: the new executor or ``None``
170
+ :type value: futures.Executor | None
171
+
172
+ """
173
+ self._context.executor = value
174
+
175
+ @property
176
+ def loop(self) -> asyncio.AbstractEventLoop | None:
177
+ """::return: the event loop."""
178
+ return self._context.loop
179
+
180
+ async def acquire( # type: ignore[override]
181
+ self,
182
+ timeout: float | None = None,
183
+ poll_interval: float = 0.05,
184
+ *,
185
+ blocking: bool | None = None,
186
+ ) -> AsyncAcquireReturnProxy:
187
+ """
188
+ Try to acquire the file lock.
189
+
190
+ :param timeout: maximum wait time for acquiring the lock, ``None`` means use the default
191
+ :attr:`~BaseFileLock.timeout` is and if ``timeout < 0``, there is no timeout and
192
+ this method will block until the lock could be acquired
193
+ :param poll_interval: interval of trying to acquire the lock file
194
+ :param blocking: defaults to True. If False, function will return immediately if it cannot obtain a lock on the
195
+ first attempt. Otherwise, this method will block until the timeout expires or the lock is acquired.
196
+ :raises Timeout: if fails to acquire lock within the timeout period
197
+ :return: a context object that will unlock the file when the context is exited
198
+
199
+ .. code-block:: python
200
+
201
+ # You can use this method in the context manager (recommended)
202
+ with lock.acquire():
203
+ pass
204
+
205
+ # Or use an equivalent try-finally construct:
206
+ lock.acquire()
207
+ try:
208
+ pass
209
+ finally:
210
+ lock.release()
211
+
212
+ """
213
+ # Use the default timeout, if no timeout is provided.
214
+ if timeout is None:
215
+ timeout = self._context.timeout
216
+
217
+ if blocking is None:
218
+ blocking = self._context.blocking
219
+
220
+ # Increment the number right at the beginning. We can still undo it, if something fails.
221
+ self._context.lock_counter += 1
222
+
223
+ lock_id = id(self)
224
+ lock_filename = self.lock_file
225
+ start_time = time.perf_counter()
226
+ try:
227
+ while True:
228
+ if not self.is_locked:
229
+ _LOGGER.debug("Attempting to acquire lock %s on %s", lock_id, lock_filename)
230
+ await self._run_internal_method(self._acquire)
231
+ if self.is_locked:
232
+ _LOGGER.debug("Lock %s acquired on %s", lock_id, lock_filename)
233
+ break
234
+ if blocking is False:
235
+ _LOGGER.debug("Failed to immediately acquire lock %s on %s", lock_id, lock_filename)
236
+ raise Timeout(lock_filename) # noqa: TRY301
237
+ if 0 <= timeout < time.perf_counter() - start_time:
238
+ _LOGGER.debug("Timeout on acquiring lock %s on %s", lock_id, lock_filename)
239
+ raise Timeout(lock_filename) # noqa: TRY301
240
+ msg = "Lock %s not acquired on %s, waiting %s seconds ..."
241
+ _LOGGER.debug(msg, lock_id, lock_filename, poll_interval)
242
+ await asyncio.sleep(poll_interval)
243
+ except BaseException: # Something did go wrong, so decrement the counter.
244
+ self._context.lock_counter = max(0, self._context.lock_counter - 1)
245
+ raise
246
+ return AsyncAcquireReturnProxy(lock=self)
247
+
248
+ async def release(self, force: bool = False) -> None: # type: ignore[override] # noqa: FBT001, FBT002
249
+ """
250
+ Releases the file lock. Please note, that the lock is only completely released, if the lock counter is 0.
251
+ Also note, that the lock file itself is not automatically deleted.
252
+
253
+ :param force: If true, the lock counter is ignored and the lock is released in every case/
254
+
255
+ """
256
+ if self.is_locked:
257
+ self._context.lock_counter -= 1
258
+
259
+ if self._context.lock_counter == 0 or force:
260
+ lock_id, lock_filename = id(self), self.lock_file
261
+
262
+ _LOGGER.debug("Attempting to release lock %s on %s", lock_id, lock_filename)
263
+ await self._run_internal_method(self._release)
264
+ self._context.lock_counter = 0
265
+ _LOGGER.debug("Lock %s released on %s", lock_id, lock_filename)
266
+
267
+ async def _run_internal_method(self, method: Callable[[], Any]) -> None:
268
+ if asyncio.iscoroutinefunction(method):
269
+ await method()
270
+ elif self.run_in_executor:
271
+ loop = self.loop or asyncio.get_running_loop()
272
+ await loop.run_in_executor(self.executor, method)
273
+ else:
274
+ method()
275
+
276
+ def __enter__(self) -> NoReturn:
277
+ """
278
+ Replace old __enter__ method to avoid using it.
279
+
280
+ NOTE: DO NOT USE `with` FOR ASYNCIO LOCKS, USE `async with` INSTEAD.
281
+
282
+ :return: none
283
+ :rtype: NoReturn
284
+ """
285
+ msg = "Do not use `with` for asyncio locks, use `async with` instead."
286
+ raise NotImplementedError(msg)
287
+
288
+ async def __aenter__(self) -> Self:
289
+ """
290
+ Acquire the lock.
291
+
292
+ :return: the lock object
293
+
294
+ """
295
+ await self.acquire()
296
+ return self
297
+
298
+ async def __aexit__(
299
+ self,
300
+ exc_type: type[BaseException] | None,
301
+ exc_value: BaseException | None,
302
+ traceback: TracebackType | None,
303
+ ) -> None:
304
+ """
305
+ Release the lock.
306
+
307
+ :param exc_type: the exception type if raised
308
+ :param exc_value: the exception value if raised
309
+ :param traceback: the exception traceback if raised
310
+
311
+ """
312
+ await self.release()
313
+
314
+ def __del__(self) -> None:
315
+ """Called when the lock object is deleted."""
316
+ with contextlib.suppress(RuntimeError):
317
+ loop = self.loop or asyncio.get_running_loop()
318
+ if not loop.is_running(): # pragma: no cover
319
+ loop.run_until_complete(self.release(force=True))
320
+ else:
321
+ loop.create_task(self.release(force=True))
322
+
323
+
324
+ class AsyncSoftFileLock(SoftFileLock, BaseAsyncFileLock):
325
+ """Simply watches the existence of the lock file."""
326
+
327
+
328
+ class AsyncUnixFileLock(UnixFileLock, BaseAsyncFileLock):
329
+ """Uses the :func:`fcntl.flock` to hard lock the lock file on unix systems."""
330
+
331
+
332
+ class AsyncWindowsFileLock(WindowsFileLock, BaseAsyncFileLock):
333
+ """Uses the :func:`msvcrt.locking` to hard lock the lock file on windows systems."""
334
+
335
+
336
+ __all__ = [
337
+ "AsyncAcquireReturnProxy",
338
+ "AsyncSoftFileLock",
339
+ "AsyncUnixFileLock",
340
+ "AsyncWindowsFileLock",
341
+ "BaseAsyncFileLock",
342
+ ]
evalkit_tf437/lib/python3.10/site-packages/filelock/py.typed ADDED
File without changes
evalkit_tf437/lib/python3.10/site-packages/filelock/version.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # file generated by setuptools_scm
2
+ # don't change, don't track in version control
3
+ TYPE_CHECKING = False
4
+ if TYPE_CHECKING:
5
+ from typing import Tuple, Union
6
+ VERSION_TUPLE = Tuple[Union[int, str], ...]
7
+ else:
8
+ VERSION_TUPLE = object
9
+
10
+ version: str
11
+ __version__: str
12
+ __version_tuple__: VERSION_TUPLE
13
+ version_tuple: VERSION_TUPLE
14
+
15
+ __version__ = version = '3.16.1'
16
+ __version_tuple__ = version_tuple = (3, 16, 1)
evalkit_tf437/lib/python3.10/site-packages/pip/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (618 Bytes). View file
 
evalkit_tf437/lib/python3.10/site-packages/pip/__pycache__/__main__.cpython-310.pyc ADDED
Binary file (452 Bytes). View file