ZTWHHH commited on
Commit
a0b1550
·
verified ·
1 Parent(s): 013cf23

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llava_next/lib/python3.10/site-packages/transformers/models/clap/__init__.py +76 -0
  2. llava_next/lib/python3.10/site-packages/transformers/models/clap/__pycache__/__init__.cpython-310.pyc +0 -0
  3. llava_next/lib/python3.10/site-packages/transformers/models/clap/__pycache__/configuration_clap.cpython-310.pyc +0 -0
  4. llava_next/lib/python3.10/site-packages/transformers/models/clap/__pycache__/convert_clap_original_pytorch_to_hf.cpython-310.pyc +0 -0
  5. llava_next/lib/python3.10/site-packages/transformers/models/clap/__pycache__/feature_extraction_clap.cpython-310.pyc +0 -0
  6. llava_next/lib/python3.10/site-packages/transformers/models/clap/__pycache__/modeling_clap.cpython-310.pyc +0 -0
  7. llava_next/lib/python3.10/site-packages/transformers/models/clap/__pycache__/processing_clap.cpython-310.pyc +0 -0
  8. llava_next/lib/python3.10/site-packages/transformers/models/clap/feature_extraction_clap.py +363 -0
  9. llava_next/lib/python3.10/site-packages/transformers/models/clap/processing_clap.py +117 -0
  10. llava_next/lib/python3.10/site-packages/transformers/models/distilbert/__init__.py +166 -0
  11. llava_next/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/__init__.cpython-310.pyc +0 -0
  12. llava_next/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/configuration_distilbert.cpython-310.pyc +0 -0
  13. llava_next/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_distilbert.cpython-310.pyc +0 -0
  14. llava_next/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_flax_distilbert.cpython-310.pyc +0 -0
  15. llava_next/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_tf_distilbert.cpython-310.pyc +0 -0
  16. llava_next/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/tokenization_distilbert.cpython-310.pyc +0 -0
  17. llava_next/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/tokenization_distilbert_fast.cpython-310.pyc +0 -0
  18. llava_next/lib/python3.10/site-packages/transformers/models/distilbert/configuration_distilbert.py +155 -0
  19. llava_next/lib/python3.10/site-packages/transformers/models/distilbert/modeling_distilbert.py +1392 -0
  20. llava_next/lib/python3.10/site-packages/transformers/models/distilbert/modeling_flax_distilbert.py +895 -0
  21. llava_next/lib/python3.10/site-packages/transformers/models/distilbert/modeling_tf_distilbert.py +1145 -0
  22. llava_next/lib/python3.10/site-packages/transformers/models/distilbert/tokenization_distilbert.py +553 -0
  23. llava_next/lib/python3.10/site-packages/transformers/models/distilbert/tokenization_distilbert_fast.py +231 -0
  24. llava_next/lib/python3.10/site-packages/transformers/models/phobert/__init__.py +29 -0
  25. llava_next/lib/python3.10/site-packages/transformers/models/phobert/__pycache__/__init__.cpython-310.pyc +0 -0
  26. llava_next/lib/python3.10/site-packages/transformers/models/phobert/__pycache__/tokenization_phobert.cpython-310.pyc +0 -0
  27. llava_next/lib/python3.10/site-packages/transformers/models/phobert/tokenization_phobert.py +367 -0
  28. llava_next/lib/python3.10/site-packages/transformers/models/rag/__pycache__/__init__.cpython-310.pyc +0 -0
  29. llava_next/lib/python3.10/site-packages/transformers/models/rag/__pycache__/retrieval_rag.cpython-310.pyc +0 -0
  30. llava_next/lib/python3.10/site-packages/transformers/models/rag/__pycache__/tokenization_rag.cpython-310.pyc +0 -0
  31. llava_next/lib/python3.10/site-packages/transformers/models/rag/retrieval_rag.py +666 -0
  32. llava_next/lib/python3.10/site-packages/transformers/models/sam/__pycache__/convert_sam_original_to_hf_format.cpython-310.pyc +0 -0
  33. llava_next/lib/python3.10/site-packages/transformers/models/sew/__init__.py +56 -0
  34. llava_next/lib/python3.10/site-packages/transformers/models/sew/__pycache__/__init__.cpython-310.pyc +0 -0
  35. llava_next/lib/python3.10/site-packages/transformers/models/sew/__pycache__/configuration_sew.cpython-310.pyc +0 -0
  36. llava_next/lib/python3.10/site-packages/transformers/models/sew/__pycache__/convert_sew_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  37. llava_next/lib/python3.10/site-packages/transformers/models/sew/__pycache__/modeling_sew.cpython-310.pyc +0 -0
  38. llava_next/lib/python3.10/site-packages/transformers/models/sew/configuration_sew.py +258 -0
  39. llava_next/lib/python3.10/site-packages/transformers/models/sew/convert_sew_original_pytorch_checkpoint_to_pytorch.py +306 -0
  40. llava_next/lib/python3.10/site-packages/transformers/models/sew/modeling_sew.py +1230 -0
  41. llava_next/lib/python3.10/site-packages/transformers/models/speech_encoder_decoder/configuration_speech_encoder_decoder.py +108 -0
  42. llava_next/lib/python3.10/site-packages/transformers/models/speech_encoder_decoder/modeling_speech_encoder_decoder.py +604 -0
  43. llava_next/lib/python3.10/site-packages/transformers/models/videomae/__init__.py +75 -0
  44. llava_next/lib/python3.10/site-packages/transformers/models/videomae/__pycache__/__init__.cpython-310.pyc +0 -0
  45. llava_next/lib/python3.10/site-packages/transformers/models/videomae/__pycache__/configuration_videomae.cpython-310.pyc +0 -0
  46. llava_next/lib/python3.10/site-packages/transformers/models/videomae/__pycache__/convert_videomae_to_pytorch.cpython-310.pyc +0 -0
  47. llava_next/lib/python3.10/site-packages/transformers/models/videomae/__pycache__/feature_extraction_videomae.cpython-310.pyc +0 -0
  48. llava_next/lib/python3.10/site-packages/transformers/models/videomae/__pycache__/image_processing_videomae.cpython-310.pyc +0 -0
  49. llava_next/lib/python3.10/site-packages/transformers/models/videomae/__pycache__/modeling_videomae.cpython-310.pyc +0 -0
  50. llava_next/lib/python3.10/site-packages/transformers/models/videomae/configuration_videomae.py +149 -0
llava_next/lib/python3.10/site-packages/transformers/models/clap/__init__.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_clap": [
21
+ "CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
22
+ "ClapAudioConfig",
23
+ "ClapConfig",
24
+ "ClapTextConfig",
25
+ ],
26
+ "processing_clap": ["ClapProcessor"],
27
+ }
28
+
29
+ try:
30
+ if not is_torch_available():
31
+ raise OptionalDependencyNotAvailable()
32
+ except OptionalDependencyNotAvailable:
33
+ pass
34
+ else:
35
+ _import_structure["modeling_clap"] = [
36
+ "CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
37
+ "ClapModel",
38
+ "ClapPreTrainedModel",
39
+ "ClapTextModel",
40
+ "ClapTextModelWithProjection",
41
+ "ClapAudioModel",
42
+ "ClapAudioModelWithProjection",
43
+ ]
44
+ _import_structure["feature_extraction_clap"] = ["ClapFeatureExtractor"]
45
+
46
+ if TYPE_CHECKING:
47
+ from .configuration_clap import (
48
+ CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
49
+ ClapAudioConfig,
50
+ ClapConfig,
51
+ ClapTextConfig,
52
+ )
53
+ from .processing_clap import ClapProcessor
54
+
55
+ try:
56
+ if not is_torch_available():
57
+ raise OptionalDependencyNotAvailable()
58
+ except OptionalDependencyNotAvailable:
59
+ pass
60
+ else:
61
+ from .feature_extraction_clap import ClapFeatureExtractor
62
+ from .modeling_clap import (
63
+ CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
64
+ ClapAudioModel,
65
+ ClapAudioModelWithProjection,
66
+ ClapModel,
67
+ ClapPreTrainedModel,
68
+ ClapTextModel,
69
+ ClapTextModelWithProjection,
70
+ )
71
+
72
+
73
+ else:
74
+ import sys
75
+
76
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llava_next/lib/python3.10/site-packages/transformers/models/clap/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.18 kB). View file
 
llava_next/lib/python3.10/site-packages/transformers/models/clap/__pycache__/configuration_clap.cpython-310.pyc ADDED
Binary file (17.4 kB). View file
 
llava_next/lib/python3.10/site-packages/transformers/models/clap/__pycache__/convert_clap_original_pytorch_to_hf.cpython-310.pyc ADDED
Binary file (3.34 kB). View file
 
llava_next/lib/python3.10/site-packages/transformers/models/clap/__pycache__/feature_extraction_clap.cpython-310.pyc ADDED
Binary file (14.5 kB). View file
 
llava_next/lib/python3.10/site-packages/transformers/models/clap/__pycache__/modeling_clap.cpython-310.pyc ADDED
Binary file (66.8 kB). View file
 
llava_next/lib/python3.10/site-packages/transformers/models/clap/__pycache__/processing_clap.cpython-310.pyc ADDED
Binary file (5.25 kB). View file
 
llava_next/lib/python3.10/site-packages/transformers/models/clap/feature_extraction_clap.py ADDED
@@ -0,0 +1,363 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Feature extractor class for CLAP."""
16
+
17
+
18
+ import copy
19
+ from typing import Any, Dict, List, Optional, Union
20
+
21
+ import numpy as np
22
+ import torch
23
+
24
+ from ...audio_utils import mel_filter_bank, spectrogram, window_function
25
+ from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
26
+ from ...feature_extraction_utils import BatchFeature
27
+ from ...utils import TensorType, logging
28
+
29
+
30
+ logger = logging.get_logger(__name__)
31
+
32
+
33
+ class ClapFeatureExtractor(SequenceFeatureExtractor):
34
+ r"""
35
+ Constructs a CLAP feature extractor.
36
+
37
+ This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains
38
+ most of the main methods. Users should refer to this superclass for more information regarding those methods.
39
+
40
+ This class extracts mel-filter bank features from raw speech using a custom numpy implementation of the *Short Time
41
+ Fourier Transform* (STFT) which should match pytorch's `torch.stft` equivalent.
42
+
43
+ Args:
44
+ feature_size (`int`, *optional*, defaults to 64):
45
+ The feature dimension of the extracted Mel spectrograms. This corresponds to the number of mel filters
46
+ (`n_mels`).
47
+ sampling_rate (`int`, *optional*, defaults to 48000):
48
+ The sampling rate at which the audio files should be digitalized expressed in hertz (Hz). This only serves
49
+ to warn users if the audio fed to the feature extractor does not have the same sampling rate.
50
+ hop_length (`int`,*optional*, defaults to 480):
51
+ Length of the overlaping windows for the STFT used to obtain the Mel Spectrogram. The audio will be split
52
+ in smaller `frames` with a step of `hop_length` between each frame.
53
+ max_length_s (`int`, *optional*, defaults to 10):
54
+ The maximum input length of the model in seconds. This is used to pad the audio.
55
+ fft_window_size (`int`, *optional*, defaults to 1024):
56
+ Size of the window (in samples) on which the Fourier transform is applied. This controls the frequency
57
+ resolution of the spectrogram. 400 means that the fourrier transform is computed on windows of 400 samples.
58
+ padding_value (`float`, *optional*, defaults to 0.0):
59
+ Padding value used to pad the audio. Should correspond to silences.
60
+ return_attention_mask (`bool`, *optional*, defaults to `False`):
61
+ Whether or not the model should return the attention masks coresponding to the input.
62
+ frequency_min (`float`, *optional*, defaults to 0):
63
+ The lowest frequency of interest. The STFT will not be computed for values below this.
64
+ frequency_max (`float`, *optional*, defaults to 14000):
65
+ The highest frequency of interest. The STFT will not be computed for values above this.
66
+ top_db (`float`, *optional*):
67
+ The highest decibel value used to convert the mel spectrogram to the log scale. For more details see the
68
+ `audio_utils.power_to_db` function
69
+ truncation (`str`, *optional*, defaults to `"fusion"`):
70
+ Truncation pattern for long audio inputs. Two patterns are available:
71
+ - `fusion` will use `_random_mel_fusion`, which stacks 3 random crops from the mel spectrogram and a
72
+ downsampled version of the entire mel spectrogram.
73
+ If `config.fusion` is set to True, shorter audios also need to to return 4 mels, which will just be a copy
74
+ of the original mel obtained from the padded audio.
75
+ - `rand_trunc` will select a random crop of the mel spectrogram.
76
+ padding (`str`, *optional*, defaults to `"repeatpad"`):
77
+ Padding pattern for shorter audio inputs. Three patterns were originally implemented:
78
+ - `repeatpad`: the audio is repeated, and then padded to fit the `max_length`.
79
+ - `repeat`: the audio is repeated and then cut to fit the `max_length`
80
+ - `pad`: the audio is padded.
81
+ """
82
+
83
+ model_input_names = ["input_features", "is_longer"]
84
+
85
+ def __init__(
86
+ self,
87
+ feature_size=64,
88
+ sampling_rate=48_000,
89
+ hop_length=480,
90
+ max_length_s=10,
91
+ fft_window_size=1024,
92
+ padding_value=0.0,
93
+ return_attention_mask=False, # pad inputs to max length with silence token (zero) and no attention mask
94
+ frequency_min: float = 0,
95
+ frequency_max: float = 14_000,
96
+ top_db: int = None,
97
+ truncation: str = "fusion",
98
+ padding: str = "repeatpad",
99
+ **kwargs,
100
+ ):
101
+ super().__init__(
102
+ feature_size=feature_size,
103
+ sampling_rate=sampling_rate,
104
+ padding_value=padding_value,
105
+ return_attention_mask=return_attention_mask,
106
+ **kwargs,
107
+ )
108
+ self.top_db = top_db
109
+ self.truncation = truncation
110
+ self.padding = padding
111
+ self.fft_window_size = fft_window_size
112
+ self.nb_frequency_bins = (fft_window_size >> 1) + 1
113
+ self.hop_length = hop_length
114
+ self.max_length_s = max_length_s
115
+ self.nb_max_samples = max_length_s * sampling_rate
116
+ self.sampling_rate = sampling_rate
117
+ self.frequency_min = frequency_min
118
+ self.frequency_max = frequency_max
119
+ self.mel_filters = mel_filter_bank(
120
+ num_frequency_bins=self.nb_frequency_bins,
121
+ num_mel_filters=feature_size,
122
+ min_frequency=frequency_min,
123
+ max_frequency=frequency_max,
124
+ sampling_rate=sampling_rate,
125
+ norm=None,
126
+ mel_scale="htk",
127
+ )
128
+ self.mel_filters_slaney = mel_filter_bank(
129
+ num_frequency_bins=self.nb_frequency_bins,
130
+ num_mel_filters=feature_size,
131
+ min_frequency=frequency_min,
132
+ max_frequency=frequency_max,
133
+ sampling_rate=sampling_rate,
134
+ norm="slaney",
135
+ mel_scale="slaney",
136
+ )
137
+
138
+ def to_dict(self) -> Dict[str, Any]:
139
+ """
140
+ Serializes this instance to a Python dictionary.
141
+
142
+ Returns:
143
+ `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance, excpet for the
144
+ mel filter banks, which do not need to be saved or printed as they are too long.
145
+ """
146
+ output = copy.deepcopy(self.__dict__)
147
+ output["feature_extractor_type"] = self.__class__.__name__
148
+ if "mel_filters" in output:
149
+ del output["mel_filters"]
150
+ if "mel_filters_slaney" in output:
151
+ del output["mel_filters_slaney"]
152
+ return output
153
+
154
+ def _np_extract_fbank_features(self, waveform: np.array, mel_filters: Optional[np.array] = None) -> np.ndarray:
155
+ """
156
+ Compute the log-mel spectrogram of the provided `waveform` using the Hann window. In CLAP, two different filter
157
+ banks are used depending on the truncation pattern:
158
+ - `self.mel_filters`: they correspond to the default parameters of `torchaudio` which can be obtained from
159
+ calling `torchaudio.transforms.MelSpectrogram().mel_scale.fb`. These filters are used when `truncation`
160
+ is set to `"fusion"`.
161
+ - `self.mel_filteres_slaney` : they correspond to the default parameters of `librosa` which used
162
+ `librosa.filters.mel` when computing the mel spectrogram. These filters were only used in the original
163
+ implementation when the truncation mode is not `"fusion"`.
164
+ """
165
+ log_mel_spectrogram = spectrogram(
166
+ waveform,
167
+ window_function(self.fft_window_size, "hann"),
168
+ frame_length=self.fft_window_size,
169
+ hop_length=self.hop_length,
170
+ power=2.0,
171
+ mel_filters=mel_filters,
172
+ log_mel="dB",
173
+ )
174
+ return log_mel_spectrogram.T
175
+
176
+ def _random_mel_fusion(self, mel, total_frames, chunk_frames):
177
+ ranges = np.array_split(list(range(0, total_frames - chunk_frames + 1)), 3)
178
+ if len(ranges[1]) == 0:
179
+ # if the audio is too short, we just use the first chunk
180
+ ranges[1] = [0]
181
+ if len(ranges[2]) == 0:
182
+ # if the audio is too short, we just use the first chunk
183
+ ranges[2] = [0]
184
+ # randomly choose index for each part
185
+ idx_front = np.random.choice(ranges[0])
186
+ idx_middle = np.random.choice(ranges[1])
187
+ idx_back = np.random.choice(ranges[2])
188
+
189
+ mel_chunk_front = mel[idx_front : idx_front + chunk_frames, :]
190
+ mel_chunk_middle = mel[idx_middle : idx_middle + chunk_frames, :]
191
+ mel_chunk_back = mel[idx_back : idx_back + chunk_frames, :]
192
+
193
+ mel = torch.tensor(mel[None, None, :])
194
+ mel_shrink = torch.nn.functional.interpolate(
195
+ mel, size=[chunk_frames, 64], mode="bilinear", align_corners=False
196
+ )
197
+ mel_shrink = mel_shrink[0][0].numpy()
198
+ mel_fusion = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back], axis=0)
199
+ return mel_fusion
200
+
201
+ def _get_input_mel(self, waveform: np.array, max_length, truncation, padding) -> np.array:
202
+ """
203
+ Extracts the mel spectrogram and prepares it for the mode based on the `truncation` and `padding` arguments.
204
+ Four different path are possible:
205
+ - `truncation="fusion"` and the length of the waveform is greater than the max length: the mel spectrogram
206
+ will be computed on the entire audio. 3 random crops and a dowsampled version of the full mel spectrogram
207
+ are then stacked together. They will later be used for `feature_fusion`.
208
+ - `truncation="rand_trunc"` and the length of the waveform is smaller than the max length: the audio is
209
+ padded based on `padding`.
210
+ - `truncation="fusion"` and the length of the waveform is smaller than the max length: the audio is padded
211
+ based on `padding`, and is repeated `4` times.
212
+ - `truncation="rand_trunc"` and the length of the waveform is greater than the max length: the mel
213
+ spectrogram will be computed on a random crop of the waveform.
214
+
215
+ """
216
+ if waveform.shape[0] > max_length:
217
+ if truncation == "rand_trunc":
218
+ longer = True
219
+ # random crop to max_length (for compatibility) -> this should be handled by self.pad
220
+ overflow = len(waveform) - max_length
221
+ idx = np.random.randint(0, overflow + 1)
222
+ waveform = waveform[idx : idx + max_length]
223
+ input_mel = self._np_extract_fbank_features(waveform, self.mel_filters_slaney)[None, :]
224
+ elif truncation == "fusion":
225
+ mel = self._np_extract_fbank_features(waveform, self.mel_filters)
226
+ chunk_frames = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
227
+ total_frames = mel.shape[0]
228
+ if chunk_frames == total_frames:
229
+ # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
230
+ # In this case, we just use the whole audio.
231
+ input_mel = np.stack([mel, mel, mel, mel], axis=0)
232
+ longer = False
233
+ else:
234
+ input_mel = self._random_mel_fusion(mel, total_frames, chunk_frames)
235
+ longer = True
236
+ else:
237
+ raise NotImplementedError(f"data_truncating {truncation} not implemented")
238
+
239
+ else:
240
+ longer = False
241
+ # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
242
+ if waveform.shape[0] < max_length:
243
+ if padding == "repeat":
244
+ n_repeat = int(max_length / len(waveform))
245
+ waveform = np.tile(waveform, n_repeat + 1)[:max_length]
246
+ if padding == "repeatpad":
247
+ n_repeat = int(max_length / len(waveform))
248
+ waveform = np.tile(waveform, n_repeat)
249
+ waveform = np.pad(waveform, (0, max_length - waveform.shape[0]), mode="constant", constant_values=0)
250
+
251
+ if truncation == "fusion":
252
+ input_mel = self._np_extract_fbank_features(waveform, self.mel_filters)
253
+ input_mel = np.stack([input_mel, input_mel, input_mel, input_mel], axis=0)
254
+ else:
255
+ input_mel = self._np_extract_fbank_features(waveform, self.mel_filters_slaney)[None, :]
256
+
257
+ return input_mel, longer
258
+
259
+ def __call__(
260
+ self,
261
+ raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],
262
+ truncation: str = None,
263
+ padding: Optional[str] = None,
264
+ max_length: Optional[int] = None,
265
+ sampling_rate: Optional[int] = None,
266
+ return_tensors: Optional[Union[str, TensorType]] = None,
267
+ **kwargs,
268
+ ) -> BatchFeature:
269
+ """
270
+ Main method to featurize and prepare for the model one or several sequence(s).
271
+
272
+ Args:
273
+ raw_speech (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`):
274
+ The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float
275
+ values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not
276
+ stereo, i.e. single float per timestep.
277
+ truncation (`str`, *optional*):
278
+ Truncation pattern for long audio inputs. Two patterns are available:
279
+ - `fusion` will use `_random_mel_fusion`, which stacks 3 random crops from the mel spectrogram and
280
+ a downsampled version of the entire mel spectrogram.
281
+ If `config.fusion` is set to True, shorter audios also need to to return 4 mels, which will just be a
282
+ copy of the original mel obtained from the padded audio.
283
+ - `rand_trunc` will select a random crop of the mel spectrogram.
284
+ padding (`str`, *optional*):
285
+ Padding pattern for shorter audio inputs. Three patterns were originally implemented:
286
+ - `repeatpad`: the audio is repeated, and then padded to fit the `max_length`.
287
+ - `repeat`: the audio is repeated and then cut to fit the `max_length`
288
+ - `pad`: the audio is padded.
289
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
290
+ If set, will return tensors instead of list of python integers. Acceptable values are:
291
+
292
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
293
+ - `'pt'`: Return PyTorch `torch.np.array` objects.
294
+ - `'np'`: Return Numpy `np.ndarray` objects.
295
+ sampling_rate (`int`, *optional*):
296
+ The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass
297
+ `sampling_rate` at the forward call to prevent silent errors and allow automatic speech recognition
298
+ pipeline.
299
+ """
300
+ truncation = truncation if truncation is not None else self.truncation
301
+ padding = padding if padding else self.padding
302
+
303
+ if sampling_rate is not None:
304
+ if sampling_rate != self.sampling_rate:
305
+ raise ValueError(
306
+ f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
307
+ f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
308
+ f" was sampled with {self.sampling_rate} and not {sampling_rate}."
309
+ )
310
+ else:
311
+ logger.warning(
312
+ "It is strongly recommended to pass the `sampling_rate` argument to this function. "
313
+ "Failing to do so can result in silent errors that might be hard to debug."
314
+ )
315
+
316
+ is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1
317
+ if is_batched_numpy and len(raw_speech.shape) > 2:
318
+ raise ValueError(f"Only mono-channel audio is supported for input to {self}")
319
+ is_batched = is_batched_numpy or (
320
+ isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list)))
321
+ )
322
+
323
+ if is_batched:
324
+ raw_speech = [np.asarray(speech, dtype=np.float64) for speech in raw_speech]
325
+ elif not is_batched and not isinstance(raw_speech, np.ndarray):
326
+ raw_speech = np.asarray(raw_speech, dtype=np.float64)
327
+ elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64):
328
+ raw_speech = raw_speech.astype(np.float64)
329
+
330
+ # always return batch
331
+ if not is_batched:
332
+ raw_speech = [np.asarray(raw_speech)]
333
+
334
+ # convert to mel spectrogram, truncate and pad if needed.
335
+ padded_inputs = [
336
+ self._get_input_mel(waveform, max_length if max_length else self.nb_max_samples, truncation, padding)
337
+ for waveform in raw_speech
338
+ ]
339
+
340
+ input_mel = []
341
+ is_longer = []
342
+ for mel, longer in padded_inputs:
343
+ input_mel.append(mel)
344
+ is_longer.append(longer)
345
+
346
+ if truncation == "fusion" and sum(is_longer) == 0:
347
+ # if no audio is longer than 10s, then randomly select one audio to be longer
348
+ rand_idx = np.random.randint(0, len(input_mel))
349
+ is_longer[rand_idx] = True
350
+
351
+ if isinstance(input_mel[0], List):
352
+ input_mel = [np.asarray(feature, dtype=np.float64) for feature in input_mel]
353
+
354
+ # is_longer is a list of bool
355
+ is_longer = [[longer] for longer in is_longer]
356
+
357
+ input_features = {"input_features": input_mel, "is_longer": is_longer}
358
+ input_features = BatchFeature(input_features)
359
+
360
+ if return_tensors is not None:
361
+ input_features = input_features.convert_to_tensors(return_tensors)
362
+
363
+ return input_features
llava_next/lib/python3.10/site-packages/transformers/models/clap/processing_clap.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Audio/Text processor class for CLAP
17
+ """
18
+
19
+ from ...processing_utils import ProcessorMixin
20
+ from ...tokenization_utils_base import BatchEncoding
21
+
22
+
23
+ class ClapProcessor(ProcessorMixin):
24
+ r"""
25
+ Constructs a CLAP processor which wraps a CLAP feature extractor and a RoBerta tokenizer into a single processor.
26
+
27
+ [`ClapProcessor`] offers all the functionalities of [`ClapFeatureExtractor`] and [`RobertaTokenizerFast`]. See the
28
+ [`~ClapProcessor.__call__`] and [`~ClapProcessor.decode`] for more information.
29
+
30
+ Args:
31
+ feature_extractor ([`ClapFeatureExtractor`]):
32
+ The audio processor is a required input.
33
+ tokenizer ([`RobertaTokenizerFast`]):
34
+ The tokenizer is a required input.
35
+ """
36
+
37
+ feature_extractor_class = "ClapFeatureExtractor"
38
+ tokenizer_class = ("RobertaTokenizer", "RobertaTokenizerFast")
39
+
40
+ def __init__(self, feature_extractor, tokenizer):
41
+ super().__init__(feature_extractor, tokenizer)
42
+
43
+ def __call__(self, text=None, audios=None, return_tensors=None, **kwargs):
44
+ """
45
+ Main method to prepare for the model one or several sequences(s) and audio(s). This method forwards the `text`
46
+ and `kwargs` arguments to RobertaTokenizerFast's [`~RobertaTokenizerFast.__call__`] if `text` is not `None` to
47
+ encode the text. To prepare the audio(s), this method forwards the `audios` and `kwrags` arguments to
48
+ ClapFeatureExtractor's [`~ClapFeatureExtractor.__call__`] if `audios` is not `None`. Please refer to the
49
+ doctsring of the above two methods for more information.
50
+
51
+ Args:
52
+ text (`str`, `List[str]`, `List[List[str]]`):
53
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
54
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
55
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
56
+ audios (`np.ndarray`, `torch.Tensor`, `List[np.ndarray]`, `List[torch.Tensor]`):
57
+ The audio or batch of audios to be prepared. Each audio can be NumPy array or PyTorch tensor. In case
58
+ of a NumPy array/PyTorch tensor, each audio should be of shape (C, T), where C is a number of channels,
59
+ and T the sample length of the audio.
60
+
61
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
62
+ If set, will return tensors of a particular framework. Acceptable values are:
63
+
64
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
65
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
66
+ - `'np'`: Return NumPy `np.ndarray` objects.
67
+ - `'jax'`: Return JAX `jnp.ndarray` objects.
68
+
69
+ Returns:
70
+ [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
71
+
72
+ - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
73
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
74
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
75
+ `None`).
76
+ - **audio_features** -- Audio features to be fed to a model. Returned when `audios` is not `None`.
77
+ """
78
+ sampling_rate = kwargs.pop("sampling_rate", None)
79
+
80
+ if text is None and audios is None:
81
+ raise ValueError("You have to specify either text or audios. Both cannot be none.")
82
+
83
+ if text is not None:
84
+ encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs)
85
+
86
+ if audios is not None:
87
+ audio_features = self.feature_extractor(
88
+ audios, sampling_rate=sampling_rate, return_tensors=return_tensors, **kwargs
89
+ )
90
+
91
+ if text is not None and audios is not None:
92
+ encoding["input_features"] = audio_features.input_features
93
+ return encoding
94
+ elif text is not None:
95
+ return encoding
96
+ else:
97
+ return BatchEncoding(data=dict(**audio_features), tensor_type=return_tensors)
98
+
99
+ def batch_decode(self, *args, **kwargs):
100
+ """
101
+ This method forwards all its arguments to RobertaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
102
+ refer to the docstring of this method for more information.
103
+ """
104
+ return self.tokenizer.batch_decode(*args, **kwargs)
105
+
106
+ def decode(self, *args, **kwargs):
107
+ """
108
+ This method forwards all its arguments to RobertaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer
109
+ to the docstring of this method for more information.
110
+ """
111
+ return self.tokenizer.decode(*args, **kwargs)
112
+
113
+ @property
114
+ def model_input_names(self):
115
+ tokenizer_input_names = self.tokenizer.model_input_names
116
+ feature_extractor_input_names = self.feature_extractor.model_input_names
117
+ return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names))
llava_next/lib/python3.10/site-packages/transformers/models/distilbert/__init__.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_flax_available,
21
+ is_tf_available,
22
+ is_tokenizers_available,
23
+ is_torch_available,
24
+ )
25
+
26
+
27
+ _import_structure = {
28
+ "configuration_distilbert": [
29
+ "DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
30
+ "DistilBertConfig",
31
+ "DistilBertOnnxConfig",
32
+ ],
33
+ "tokenization_distilbert": ["DistilBertTokenizer"],
34
+ }
35
+
36
+ try:
37
+ if not is_tokenizers_available():
38
+ raise OptionalDependencyNotAvailable()
39
+ except OptionalDependencyNotAvailable:
40
+ pass
41
+ else:
42
+ _import_structure["tokenization_distilbert_fast"] = ["DistilBertTokenizerFast"]
43
+
44
+ try:
45
+ if not is_torch_available():
46
+ raise OptionalDependencyNotAvailable()
47
+ except OptionalDependencyNotAvailable:
48
+ pass
49
+ else:
50
+ _import_structure["modeling_distilbert"] = [
51
+ "DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
52
+ "DistilBertForMaskedLM",
53
+ "DistilBertForMultipleChoice",
54
+ "DistilBertForQuestionAnswering",
55
+ "DistilBertForSequenceClassification",
56
+ "DistilBertForTokenClassification",
57
+ "DistilBertModel",
58
+ "DistilBertPreTrainedModel",
59
+ ]
60
+
61
+ try:
62
+ if not is_tf_available():
63
+ raise OptionalDependencyNotAvailable()
64
+ except OptionalDependencyNotAvailable:
65
+ pass
66
+ else:
67
+ _import_structure["modeling_tf_distilbert"] = [
68
+ "TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
69
+ "TFDistilBertForMaskedLM",
70
+ "TFDistilBertForMultipleChoice",
71
+ "TFDistilBertForQuestionAnswering",
72
+ "TFDistilBertForSequenceClassification",
73
+ "TFDistilBertForTokenClassification",
74
+ "TFDistilBertMainLayer",
75
+ "TFDistilBertModel",
76
+ "TFDistilBertPreTrainedModel",
77
+ ]
78
+
79
+ try:
80
+ if not is_flax_available():
81
+ raise OptionalDependencyNotAvailable()
82
+ except OptionalDependencyNotAvailable:
83
+ pass
84
+ else:
85
+ _import_structure["modeling_flax_distilbert"] = [
86
+ "FlaxDistilBertForMaskedLM",
87
+ "FlaxDistilBertForMultipleChoice",
88
+ "FlaxDistilBertForQuestionAnswering",
89
+ "FlaxDistilBertForSequenceClassification",
90
+ "FlaxDistilBertForTokenClassification",
91
+ "FlaxDistilBertModel",
92
+ "FlaxDistilBertPreTrainedModel",
93
+ ]
94
+
95
+
96
+ if TYPE_CHECKING:
97
+ from .configuration_distilbert import (
98
+ DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
99
+ DistilBertConfig,
100
+ DistilBertOnnxConfig,
101
+ )
102
+ from .tokenization_distilbert import DistilBertTokenizer
103
+
104
+ try:
105
+ if not is_tokenizers_available():
106
+ raise OptionalDependencyNotAvailable()
107
+ except OptionalDependencyNotAvailable:
108
+ pass
109
+ else:
110
+ from .tokenization_distilbert_fast import DistilBertTokenizerFast
111
+
112
+ try:
113
+ if not is_torch_available():
114
+ raise OptionalDependencyNotAvailable()
115
+ except OptionalDependencyNotAvailable:
116
+ pass
117
+ else:
118
+ from .modeling_distilbert import (
119
+ DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
120
+ DistilBertForMaskedLM,
121
+ DistilBertForMultipleChoice,
122
+ DistilBertForQuestionAnswering,
123
+ DistilBertForSequenceClassification,
124
+ DistilBertForTokenClassification,
125
+ DistilBertModel,
126
+ DistilBertPreTrainedModel,
127
+ )
128
+
129
+ try:
130
+ if not is_tf_available():
131
+ raise OptionalDependencyNotAvailable()
132
+ except OptionalDependencyNotAvailable:
133
+ pass
134
+ else:
135
+ from .modeling_tf_distilbert import (
136
+ TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
137
+ TFDistilBertForMaskedLM,
138
+ TFDistilBertForMultipleChoice,
139
+ TFDistilBertForQuestionAnswering,
140
+ TFDistilBertForSequenceClassification,
141
+ TFDistilBertForTokenClassification,
142
+ TFDistilBertMainLayer,
143
+ TFDistilBertModel,
144
+ TFDistilBertPreTrainedModel,
145
+ )
146
+
147
+ try:
148
+ if not is_flax_available():
149
+ raise OptionalDependencyNotAvailable()
150
+ except OptionalDependencyNotAvailable:
151
+ pass
152
+ else:
153
+ from .modeling_flax_distilbert import (
154
+ FlaxDistilBertForMaskedLM,
155
+ FlaxDistilBertForMultipleChoice,
156
+ FlaxDistilBertForQuestionAnswering,
157
+ FlaxDistilBertForSequenceClassification,
158
+ FlaxDistilBertForTokenClassification,
159
+ FlaxDistilBertModel,
160
+ FlaxDistilBertPreTrainedModel,
161
+ )
162
+
163
+ else:
164
+ import sys
165
+
166
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llava_next/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.46 kB). View file
 
llava_next/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/configuration_distilbert.cpython-310.pyc ADDED
Binary file (6.38 kB). View file
 
llava_next/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_distilbert.cpython-310.pyc ADDED
Binary file (41.4 kB). View file
 
llava_next/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_flax_distilbert.cpython-310.pyc ADDED
Binary file (22.9 kB). View file
 
llava_next/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_tf_distilbert.cpython-310.pyc ADDED
Binary file (36 kB). View file
 
llava_next/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/tokenization_distilbert.cpython-310.pyc ADDED
Binary file (18.1 kB). View file
 
llava_next/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/tokenization_distilbert_fast.cpython-310.pyc ADDED
Binary file (8.41 kB). View file
 
llava_next/lib/python3.10/site-packages/transformers/models/distilbert/configuration_distilbert.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ DistilBERT model configuration"""
16
+ from collections import OrderedDict
17
+ from typing import Mapping
18
+
19
+ from ...configuration_utils import PretrainedConfig
20
+ from ...onnx import OnnxConfig
21
+ from ...utils import logging
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+ DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
27
+ "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
28
+ "distilbert-base-uncased-distilled-squad": (
29
+ "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
30
+ ),
31
+ "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
32
+ "distilbert-base-cased-distilled-squad": (
33
+ "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
34
+ ),
35
+ "distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
36
+ "distilbert-base-multilingual-cased": (
37
+ "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
38
+ ),
39
+ "distilbert-base-uncased-finetuned-sst-2-english": (
40
+ "https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
41
+ ),
42
+ }
43
+
44
+
45
+ class DistilBertConfig(PretrainedConfig):
46
+ r"""
47
+ This is the configuration class to store the configuration of a [`DistilBertModel`] or a [`TFDistilBertModel`]. It
48
+ is used to instantiate a DistilBERT model according to the specified arguments, defining the model architecture.
49
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the DistilBERT
50
+ [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) architecture.
51
+
52
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
53
+ documentation from [`PretrainedConfig`] for more information.
54
+
55
+ Args:
56
+ vocab_size (`int`, *optional*, defaults to 30522):
57
+ Vocabulary size of the DistilBERT model. Defines the number of different tokens that can be represented by
58
+ the `inputs_ids` passed when calling [`DistilBertModel`] or [`TFDistilBertModel`].
59
+ max_position_embeddings (`int`, *optional*, defaults to 512):
60
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
61
+ just in case (e.g., 512 or 1024 or 2048).
62
+ sinusoidal_pos_embds (`boolean`, *optional*, defaults to `False`):
63
+ Whether to use sinusoidal positional embeddings.
64
+ n_layers (`int`, *optional*, defaults to 6):
65
+ Number of hidden layers in the Transformer encoder.
66
+ n_heads (`int`, *optional*, defaults to 12):
67
+ Number of attention heads for each attention layer in the Transformer encoder.
68
+ dim (`int`, *optional*, defaults to 768):
69
+ Dimensionality of the encoder layers and the pooler layer.
70
+ hidden_dim (`int`, *optional*, defaults to 3072):
71
+ The size of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
72
+ dropout (`float`, *optional*, defaults to 0.1):
73
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
74
+ attention_dropout (`float`, *optional*, defaults to 0.1):
75
+ The dropout ratio for the attention probabilities.
76
+ activation (`str` or `Callable`, *optional*, defaults to `"gelu"`):
77
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
78
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
79
+ initializer_range (`float`, *optional*, defaults to 0.02):
80
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
81
+ qa_dropout (`float`, *optional*, defaults to 0.1):
82
+ The dropout probabilities used in the question answering model [`DistilBertForQuestionAnswering`].
83
+ seq_classif_dropout (`float`, *optional*, defaults to 0.2):
84
+ The dropout probabilities used in the sequence classification and the multiple choice model
85
+ [`DistilBertForSequenceClassification`].
86
+
87
+ Examples:
88
+
89
+ ```python
90
+ >>> from transformers import DistilBertConfig, DistilBertModel
91
+
92
+ >>> # Initializing a DistilBERT configuration
93
+ >>> configuration = DistilBertConfig()
94
+
95
+ >>> # Initializing a model (with random weights) from the configuration
96
+ >>> model = DistilBertModel(configuration)
97
+
98
+ >>> # Accessing the model configuration
99
+ >>> configuration = model.config
100
+ ```"""
101
+
102
+ model_type = "distilbert"
103
+ attribute_map = {
104
+ "hidden_size": "dim",
105
+ "num_attention_heads": "n_heads",
106
+ "num_hidden_layers": "n_layers",
107
+ }
108
+
109
+ def __init__(
110
+ self,
111
+ vocab_size=30522,
112
+ max_position_embeddings=512,
113
+ sinusoidal_pos_embds=False,
114
+ n_layers=6,
115
+ n_heads=12,
116
+ dim=768,
117
+ hidden_dim=4 * 768,
118
+ dropout=0.1,
119
+ attention_dropout=0.1,
120
+ activation="gelu",
121
+ initializer_range=0.02,
122
+ qa_dropout=0.1,
123
+ seq_classif_dropout=0.2,
124
+ pad_token_id=0,
125
+ **kwargs,
126
+ ):
127
+ self.vocab_size = vocab_size
128
+ self.max_position_embeddings = max_position_embeddings
129
+ self.sinusoidal_pos_embds = sinusoidal_pos_embds
130
+ self.n_layers = n_layers
131
+ self.n_heads = n_heads
132
+ self.dim = dim
133
+ self.hidden_dim = hidden_dim
134
+ self.dropout = dropout
135
+ self.attention_dropout = attention_dropout
136
+ self.activation = activation
137
+ self.initializer_range = initializer_range
138
+ self.qa_dropout = qa_dropout
139
+ self.seq_classif_dropout = seq_classif_dropout
140
+ super().__init__(**kwargs, pad_token_id=pad_token_id)
141
+
142
+
143
+ class DistilBertOnnxConfig(OnnxConfig):
144
+ @property
145
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
146
+ if self.task == "multiple-choice":
147
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
148
+ else:
149
+ dynamic_axis = {0: "batch", 1: "sequence"}
150
+ return OrderedDict(
151
+ [
152
+ ("input_ids", dynamic_axis),
153
+ ("attention_mask", dynamic_axis),
154
+ ]
155
+ )
llava_next/lib/python3.10/site-packages/transformers/models/distilbert/modeling_distilbert.py ADDED
@@ -0,0 +1,1392 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ PyTorch DistilBERT model adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM) and in
18
+ part from HuggingFace PyTorch version of Google AI Bert model (https://github.com/google-research/bert)
19
+ """
20
+
21
+
22
+ import math
23
+ from typing import Dict, List, Optional, Set, Tuple, Union
24
+
25
+ import numpy as np
26
+ import torch
27
+ import torch.nn.functional as F
28
+ from torch import nn
29
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
30
+
31
+ from ...activations import get_activation
32
+ from ...configuration_utils import PretrainedConfig
33
+ from ...integrations.deepspeed import is_deepspeed_zero3_enabled
34
+ from ...modeling_outputs import (
35
+ BaseModelOutput,
36
+ MaskedLMOutput,
37
+ MultipleChoiceModelOutput,
38
+ QuestionAnsweringModelOutput,
39
+ SequenceClassifierOutput,
40
+ TokenClassifierOutput,
41
+ )
42
+ from ...modeling_utils import PreTrainedModel
43
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
44
+ from ...utils import (
45
+ add_code_sample_docstrings,
46
+ add_start_docstrings,
47
+ add_start_docstrings_to_model_forward,
48
+ is_flash_attn_2_available,
49
+ is_flash_attn_greater_or_equal_2_10,
50
+ logging,
51
+ replace_return_docstrings,
52
+ )
53
+ from .configuration_distilbert import DistilBertConfig
54
+
55
+
56
+ if is_flash_attn_2_available():
57
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
58
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
59
+
60
+
61
+ logger = logging.get_logger(__name__)
62
+ _CHECKPOINT_FOR_DOC = "distilbert-base-uncased"
63
+ _CONFIG_FOR_DOC = "DistilBertConfig"
64
+
65
+ DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
66
+ "distilbert-base-uncased",
67
+ "distilbert-base-uncased-distilled-squad",
68
+ "distilbert-base-cased",
69
+ "distilbert-base-cased-distilled-squad",
70
+ "distilbert-base-german-cased",
71
+ "distilbert-base-multilingual-cased",
72
+ "distilbert-base-uncased-finetuned-sst-2-english",
73
+ # See all DistilBERT models at https://huggingface.co/models?filter=distilbert
74
+ ]
75
+
76
+
77
+ # UTILS AND BUILDING BLOCKS OF THE ARCHITECTURE #
78
+
79
+
80
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
81
+ def _get_unpad_data(attention_mask):
82
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
83
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
84
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
85
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
86
+ return (
87
+ indices,
88
+ cu_seqlens,
89
+ max_seqlen_in_batch,
90
+ )
91
+
92
+
93
+ def create_sinusoidal_embeddings(n_pos: int, dim: int, out: torch.Tensor):
94
+ if is_deepspeed_zero3_enabled():
95
+ import deepspeed
96
+
97
+ with deepspeed.zero.GatheredParameters(out, modifier_rank=0):
98
+ if torch.distributed.get_rank() == 0:
99
+ _create_sinusoidal_embeddings(n_pos=n_pos, dim=dim, out=out)
100
+ else:
101
+ _create_sinusoidal_embeddings(n_pos=n_pos, dim=dim, out=out)
102
+
103
+
104
+ def _create_sinusoidal_embeddings(n_pos: int, dim: int, out: torch.Tensor):
105
+ position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)])
106
+ out.requires_grad = False
107
+ out[:, 0::2] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
108
+ out[:, 1::2] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
109
+ out.detach_()
110
+
111
+
112
+ class Embeddings(nn.Module):
113
+ def __init__(self, config: PretrainedConfig):
114
+ super().__init__()
115
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.dim, padding_idx=config.pad_token_id)
116
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.dim)
117
+ if config.sinusoidal_pos_embds:
118
+ create_sinusoidal_embeddings(
119
+ n_pos=config.max_position_embeddings, dim=config.dim, out=self.position_embeddings.weight
120
+ )
121
+
122
+ self.LayerNorm = nn.LayerNorm(config.dim, eps=1e-12)
123
+ self.dropout = nn.Dropout(config.dropout)
124
+ self.register_buffer(
125
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
126
+ )
127
+
128
+ def forward(self, input_ids: torch.Tensor, input_embeds: Optional[torch.Tensor] = None) -> torch.Tensor:
129
+ """
130
+ Parameters:
131
+ input_ids (torch.Tensor):
132
+ torch.tensor(bs, max_seq_length) The token ids to embed.
133
+ input_embeds (*optional*, torch.Tensor):
134
+ The pre-computed word embeddings. Can only be passed if the input ids are `None`.
135
+
136
+
137
+ Returns: torch.tensor(bs, max_seq_length, dim) The embedded tokens (plus position embeddings, no token_type
138
+ embeddings)
139
+ """
140
+ if input_ids is not None:
141
+ input_embeds = self.word_embeddings(input_ids) # (bs, max_seq_length, dim)
142
+
143
+ seq_length = input_embeds.size(1)
144
+
145
+ # Setting the position-ids to the registered buffer in constructor, it helps
146
+ # when tracing the model without passing position-ids, solves
147
+ # isues similar to issue #5664
148
+ if hasattr(self, "position_ids"):
149
+ position_ids = self.position_ids[:, :seq_length]
150
+ else:
151
+ position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device) # (max_seq_length)
152
+ position_ids = position_ids.unsqueeze(0).expand_as(input_ids) # (bs, max_seq_length)
153
+
154
+ position_embeddings = self.position_embeddings(position_ids) # (bs, max_seq_length, dim)
155
+
156
+ embeddings = input_embeds + position_embeddings # (bs, max_seq_length, dim)
157
+ embeddings = self.LayerNorm(embeddings) # (bs, max_seq_length, dim)
158
+ embeddings = self.dropout(embeddings) # (bs, max_seq_length, dim)
159
+ return embeddings
160
+
161
+
162
+ class MultiHeadSelfAttention(nn.Module):
163
+ def __init__(self, config: PretrainedConfig):
164
+ super().__init__()
165
+ self.config = config
166
+
167
+ self.n_heads = config.n_heads
168
+ self.dim = config.dim
169
+ self.dropout = nn.Dropout(p=config.attention_dropout)
170
+ self.is_causal = False
171
+
172
+ # Have an even number of multi heads that divide the dimensions
173
+ if self.dim % self.n_heads != 0:
174
+ # Raise value errors for even multi-head attention nodes
175
+ raise ValueError(f"self.n_heads: {self.n_heads} must divide self.dim: {self.dim} evenly")
176
+
177
+ self.q_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
178
+ self.k_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
179
+ self.v_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
180
+ self.out_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
181
+
182
+ self.pruned_heads: Set[int] = set()
183
+ self.attention_head_size = self.dim // self.n_heads
184
+
185
+ def prune_heads(self, heads: List[int]):
186
+ if len(heads) == 0:
187
+ return
188
+ heads, index = find_pruneable_heads_and_indices(
189
+ heads, self.n_heads, self.attention_head_size, self.pruned_heads
190
+ )
191
+ # Prune linear layers
192
+ self.q_lin = prune_linear_layer(self.q_lin, index)
193
+ self.k_lin = prune_linear_layer(self.k_lin, index)
194
+ self.v_lin = prune_linear_layer(self.v_lin, index)
195
+ self.out_lin = prune_linear_layer(self.out_lin, index, dim=1)
196
+ # Update hyper params
197
+ self.n_heads = self.n_heads - len(heads)
198
+ self.dim = self.attention_head_size * self.n_heads
199
+ self.pruned_heads = self.pruned_heads.union(heads)
200
+
201
+ def forward(
202
+ self,
203
+ query: torch.Tensor,
204
+ key: torch.Tensor,
205
+ value: torch.Tensor,
206
+ mask: torch.Tensor,
207
+ head_mask: Optional[torch.Tensor] = None,
208
+ output_attentions: bool = False,
209
+ ) -> Tuple[torch.Tensor, ...]:
210
+ """
211
+ Parameters:
212
+ query: torch.tensor(bs, seq_length, dim)
213
+ key: torch.tensor(bs, seq_length, dim)
214
+ value: torch.tensor(bs, seq_length, dim)
215
+ mask: torch.tensor(bs, seq_length)
216
+
217
+ Returns:
218
+ weights: torch.tensor(bs, n_heads, seq_length, seq_length) Attention weights context: torch.tensor(bs,
219
+ seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True`
220
+ """
221
+ bs, q_length, dim = query.size()
222
+ k_length = key.size(1)
223
+ # assert dim == self.dim, f'Dimensions do not match: {dim} input vs {self.dim} configured'
224
+ # assert key.size() == value.size()
225
+
226
+ dim_per_head = self.dim // self.n_heads
227
+
228
+ mask_reshp = (bs, 1, 1, k_length)
229
+
230
+ def shape(x: torch.Tensor) -> torch.Tensor:
231
+ """separate heads"""
232
+ return x.view(bs, -1, self.n_heads, dim_per_head).transpose(1, 2)
233
+
234
+ def unshape(x: torch.Tensor) -> torch.Tensor:
235
+ """group heads"""
236
+ return x.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * dim_per_head)
237
+
238
+ q = shape(self.q_lin(query)) # (bs, n_heads, q_length, dim_per_head)
239
+ k = shape(self.k_lin(key)) # (bs, n_heads, k_length, dim_per_head)
240
+ v = shape(self.v_lin(value)) # (bs, n_heads, k_length, dim_per_head)
241
+
242
+ q = q / math.sqrt(dim_per_head) # (bs, n_heads, q_length, dim_per_head)
243
+ scores = torch.matmul(q, k.transpose(2, 3)) # (bs, n_heads, q_length, k_length)
244
+ mask = (mask == 0).view(mask_reshp).expand_as(scores) # (bs, n_heads, q_length, k_length)
245
+ scores = scores.masked_fill(
246
+ mask, torch.tensor(torch.finfo(scores.dtype).min)
247
+ ) # (bs, n_heads, q_length, k_length)
248
+
249
+ weights = nn.functional.softmax(scores, dim=-1) # (bs, n_heads, q_length, k_length)
250
+ weights = self.dropout(weights) # (bs, n_heads, q_length, k_length)
251
+
252
+ # Mask heads if we want to
253
+ if head_mask is not None:
254
+ weights = weights * head_mask
255
+
256
+ context = torch.matmul(weights, v) # (bs, n_heads, q_length, dim_per_head)
257
+ context = unshape(context) # (bs, q_length, dim)
258
+ context = self.out_lin(context) # (bs, q_length, dim)
259
+
260
+ if output_attentions:
261
+ return (context, weights)
262
+ else:
263
+ return (context,)
264
+
265
+
266
+ class DistilBertFlashAttention2(MultiHeadSelfAttention):
267
+ """
268
+ DistilBert flash attention module. This module inherits from `MultiHeadSelfAttention` as the weights of the module
269
+ stays untouched. The only required change would be on the forward pass where it needs to correctly call the public
270
+ API of flash attention and deal with padding tokens in case the input contains any of them.
271
+ """
272
+
273
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
274
+ def __init__(self, *args, **kwargs):
275
+ super().__init__(*args, **kwargs)
276
+
277
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
278
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
279
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
280
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
281
+
282
+ def forward(
283
+ self,
284
+ query: torch.Tensor,
285
+ key: torch.Tensor,
286
+ value: torch.Tensor,
287
+ mask: torch.Tensor,
288
+ head_mask: Optional[torch.Tensor] = None,
289
+ output_attentions: bool = False,
290
+ ) -> Tuple[torch.Tensor, ...]:
291
+ """
292
+ Parameters:
293
+ query: torch.tensor(bs, seq_length, dim)
294
+ key: torch.tensor(bs, seq_length, dim)
295
+ value: torch.tensor(bs, seq_length, dim)
296
+ mask: torch.tensor(bs, seq_length)
297
+
298
+ Returns:
299
+ weights: torch.tensor(bs, n_heads, seq_length, seq_length) Attention weights context: torch.tensor(bs,
300
+ seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True`
301
+ """
302
+ batch_size, q_length, dim = query.size()
303
+
304
+ dim_per_head = self.dim // self.n_heads
305
+
306
+ def reshape(x: torch.Tensor) -> torch.Tensor:
307
+ """separate heads"""
308
+ return x.view(batch_size, -1, self.n_heads, dim_per_head)
309
+
310
+ # Flash attention requires the input to have the shape
311
+ # batch_size x seq_length x head_dim x hidden_dim
312
+ query_states = reshape(self.q_lin(query))
313
+ key_states = reshape(self.k_lin(key))
314
+ value_states = reshape(self.v_lin(value))
315
+
316
+ attn_dropout = self.config.attention_dropout if self.training else 0.0
317
+
318
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
319
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
320
+ # cast them back in the correct dtype just to be sure everything works as expected.
321
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
322
+ # in fp32. (LlamaRMSNorm handles it correctly)
323
+
324
+ if query_states.dtype == torch.float32:
325
+ if torch.is_autocast_enabled():
326
+ target_dtype = torch.get_autocast_gpu_dtype()
327
+ # Handle the case where the model is quantized
328
+ elif hasattr(self.config, "_pre_quantization_dtype"):
329
+ target_dtype = self.config._pre_quantization_dtype
330
+ else:
331
+ target_dtype = self.q_lin.weight.dtype
332
+
333
+ logger.warning_once(
334
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
335
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
336
+ f" {target_dtype}."
337
+ )
338
+
339
+ query_states = query_states.to(target_dtype)
340
+ key_states = key_states.to(target_dtype)
341
+ value_states = value_states.to(target_dtype)
342
+
343
+ attn_weights = self._flash_attention_forward(
344
+ query_states, key_states, value_states, mask, q_length, dropout=attn_dropout
345
+ )
346
+
347
+ attn_weights_reshaped = attn_weights.reshape(batch_size, q_length, self.n_heads * dim_per_head)
348
+ attn_output = self.out_lin(attn_weights_reshaped)
349
+
350
+ if output_attentions:
351
+ return (attn_output, attn_weights)
352
+ else:
353
+ return (attn_output,)
354
+
355
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward with causal=True->causal=False
356
+ def _flash_attention_forward(
357
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
358
+ ):
359
+ """
360
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
361
+ first unpad the input, then computes the attention scores and pad the final attention scores.
362
+
363
+ Args:
364
+ query_states (`torch.Tensor`):
365
+ Input query states to be passed to Flash Attention API
366
+ key_states (`torch.Tensor`):
367
+ Input key states to be passed to Flash Attention API
368
+ value_states (`torch.Tensor`):
369
+ Input value states to be passed to Flash Attention API
370
+ attention_mask (`torch.Tensor`):
371
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
372
+ position of padding tokens and 1 for the position of non-padding tokens.
373
+ dropout (`int`, *optional*):
374
+ Attention dropout
375
+ softmax_scale (`float`, *optional*):
376
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
377
+ """
378
+ if not self._flash_attn_uses_top_left_mask:
379
+ causal = self.is_causal
380
+ else:
381
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
382
+ causal = self.is_causal and query_length != 1
383
+
384
+ # Contains at least one padding token in the sequence
385
+ if attention_mask is not None:
386
+ batch_size = query_states.shape[0]
387
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
388
+ query_states, key_states, value_states, attention_mask, query_length
389
+ )
390
+
391
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
392
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
393
+
394
+ attn_output_unpad = flash_attn_varlen_func(
395
+ query_states,
396
+ key_states,
397
+ value_states,
398
+ cu_seqlens_q=cu_seqlens_q,
399
+ cu_seqlens_k=cu_seqlens_k,
400
+ max_seqlen_q=max_seqlen_in_batch_q,
401
+ max_seqlen_k=max_seqlen_in_batch_k,
402
+ dropout_p=dropout,
403
+ softmax_scale=softmax_scale,
404
+ causal=causal,
405
+ )
406
+
407
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
408
+ else:
409
+ attn_output = flash_attn_func(
410
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
411
+ )
412
+
413
+ return attn_output
414
+
415
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input with num_heads->n_heads
416
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
417
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
418
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
419
+
420
+ key_layer = index_first_axis(
421
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
422
+ )
423
+ value_layer = index_first_axis(
424
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
425
+ )
426
+ if query_length == kv_seq_len:
427
+ query_layer = index_first_axis(
428
+ query_layer.reshape(batch_size * kv_seq_len, self.n_heads, head_dim), indices_k
429
+ )
430
+ cu_seqlens_q = cu_seqlens_k
431
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
432
+ indices_q = indices_k
433
+ elif query_length == 1:
434
+ max_seqlen_in_batch_q = 1
435
+ cu_seqlens_q = torch.arange(
436
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
437
+ ) # There is a memcpy here, that is very bad.
438
+ indices_q = cu_seqlens_q[:-1]
439
+ query_layer = query_layer.squeeze(1)
440
+ else:
441
+ # The -q_len: slice assumes left padding.
442
+ attention_mask = attention_mask[:, -query_length:]
443
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
444
+
445
+ return (
446
+ query_layer,
447
+ key_layer,
448
+ value_layer,
449
+ indices_q,
450
+ (cu_seqlens_q, cu_seqlens_k),
451
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
452
+ )
453
+
454
+
455
+ class FFN(nn.Module):
456
+ def __init__(self, config: PretrainedConfig):
457
+ super().__init__()
458
+ self.dropout = nn.Dropout(p=config.dropout)
459
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
460
+ self.seq_len_dim = 1
461
+ self.lin1 = nn.Linear(in_features=config.dim, out_features=config.hidden_dim)
462
+ self.lin2 = nn.Linear(in_features=config.hidden_dim, out_features=config.dim)
463
+ self.activation = get_activation(config.activation)
464
+
465
+ def forward(self, input: torch.Tensor) -> torch.Tensor:
466
+ return apply_chunking_to_forward(self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, input)
467
+
468
+ def ff_chunk(self, input: torch.Tensor) -> torch.Tensor:
469
+ x = self.lin1(input)
470
+ x = self.activation(x)
471
+ x = self.lin2(x)
472
+ x = self.dropout(x)
473
+ return x
474
+
475
+
476
+ DISTILBERT_ATTENTION_CLASSES = {
477
+ "eager": MultiHeadSelfAttention,
478
+ "flash_attention_2": DistilBertFlashAttention2,
479
+ }
480
+
481
+
482
+ class TransformerBlock(nn.Module):
483
+ def __init__(self, config: PretrainedConfig):
484
+ super().__init__()
485
+
486
+ # Have an even number of Configure multi-heads
487
+ if config.dim % config.n_heads != 0:
488
+ raise ValueError(f"config.n_heads {config.n_heads} must divide config.dim {config.dim} evenly")
489
+
490
+ self.attention = DISTILBERT_ATTENTION_CLASSES[config._attn_implementation](config)
491
+ self.sa_layer_norm = nn.LayerNorm(normalized_shape=config.dim, eps=1e-12)
492
+
493
+ self.ffn = FFN(config)
494
+ self.output_layer_norm = nn.LayerNorm(normalized_shape=config.dim, eps=1e-12)
495
+
496
+ def forward(
497
+ self,
498
+ x: torch.Tensor,
499
+ attn_mask: Optional[torch.Tensor] = None,
500
+ head_mask: Optional[torch.Tensor] = None,
501
+ output_attentions: bool = False,
502
+ ) -> Tuple[torch.Tensor, ...]:
503
+ """
504
+ Parameters:
505
+ x: torch.tensor(bs, seq_length, dim)
506
+ attn_mask: torch.tensor(bs, seq_length)
507
+
508
+ Returns:
509
+ sa_weights: torch.tensor(bs, n_heads, seq_length, seq_length) The attention weights ffn_output:
510
+ torch.tensor(bs, seq_length, dim) The output of the transformer block contextualization.
511
+ """
512
+ # Self-Attention
513
+ sa_output = self.attention(
514
+ query=x,
515
+ key=x,
516
+ value=x,
517
+ mask=attn_mask,
518
+ head_mask=head_mask,
519
+ output_attentions=output_attentions,
520
+ )
521
+ if output_attentions:
522
+ sa_output, sa_weights = sa_output # (bs, seq_length, dim), (bs, n_heads, seq_length, seq_length)
523
+ else: # To handle these `output_attentions` or `output_hidden_states` cases returning tuples
524
+ if type(sa_output) != tuple:
525
+ raise TypeError(f"sa_output must be a tuple but it is {type(sa_output)} type")
526
+
527
+ sa_output = sa_output[0]
528
+ sa_output = self.sa_layer_norm(sa_output + x) # (bs, seq_length, dim)
529
+
530
+ # Feed Forward Network
531
+ ffn_output = self.ffn(sa_output) # (bs, seq_length, dim)
532
+ ffn_output: torch.Tensor = self.output_layer_norm(ffn_output + sa_output) # (bs, seq_length, dim)
533
+
534
+ output = (ffn_output,)
535
+ if output_attentions:
536
+ output = (sa_weights,) + output
537
+ return output
538
+
539
+
540
+ class Transformer(nn.Module):
541
+ def __init__(self, config: PretrainedConfig):
542
+ super().__init__()
543
+ self.n_layers = config.n_layers
544
+ self.layer = nn.ModuleList([TransformerBlock(config) for _ in range(config.n_layers)])
545
+ self.gradient_checkpointing = False
546
+
547
+ def forward(
548
+ self,
549
+ x: torch.Tensor,
550
+ attn_mask: Optional[torch.Tensor] = None,
551
+ head_mask: Optional[torch.Tensor] = None,
552
+ output_attentions: bool = False,
553
+ output_hidden_states: bool = False,
554
+ return_dict: Optional[bool] = None,
555
+ ) -> Union[BaseModelOutput, Tuple[torch.Tensor, ...]]: # docstyle-ignore
556
+ """
557
+ Parameters:
558
+ x: torch.tensor(bs, seq_length, dim) Input sequence embedded.
559
+ attn_mask: torch.tensor(bs, seq_length) Attention mask on the sequence.
560
+
561
+ Returns:
562
+ hidden_state: torch.tensor(bs, seq_length, dim) Sequence of hidden states in the last (top)
563
+ layer all_hidden_states: Tuple[torch.tensor(bs, seq_length, dim)]
564
+ Tuple of length n_layers with the hidden states from each layer.
565
+ Optional: only if output_hidden_states=True
566
+ all_attentions: Tuple[torch.tensor(bs, n_heads, seq_length, seq_length)]
567
+ Tuple of length n_layers with the attention weights from each layer
568
+ Optional: only if output_attentions=True
569
+ """
570
+ all_hidden_states = () if output_hidden_states else None
571
+ all_attentions = () if output_attentions else None
572
+
573
+ hidden_state = x
574
+ for i, layer_module in enumerate(self.layer):
575
+ if output_hidden_states:
576
+ all_hidden_states = all_hidden_states + (hidden_state,)
577
+
578
+ if self.gradient_checkpointing and self.training:
579
+ layer_outputs = self._gradient_checkpointing_func(
580
+ layer_module.__call__,
581
+ hidden_state,
582
+ attn_mask,
583
+ head_mask[i],
584
+ output_attentions,
585
+ )
586
+ else:
587
+ layer_outputs = layer_module(
588
+ hidden_state,
589
+ attn_mask,
590
+ head_mask[i],
591
+ output_attentions,
592
+ )
593
+
594
+ hidden_state = layer_outputs[-1]
595
+
596
+ if output_attentions:
597
+ if len(layer_outputs) != 2:
598
+ raise ValueError(f"The length of the layer_outputs should be 2, but it is {len(layer_outputs)}")
599
+
600
+ attentions = layer_outputs[0]
601
+ all_attentions = all_attentions + (attentions,)
602
+ else:
603
+ if len(layer_outputs) != 1:
604
+ raise ValueError(f"The length of the layer_outputs should be 1, but it is {len(layer_outputs)}")
605
+
606
+ # Add last layer
607
+ if output_hidden_states:
608
+ all_hidden_states = all_hidden_states + (hidden_state,)
609
+
610
+ if not return_dict:
611
+ return tuple(v for v in [hidden_state, all_hidden_states, all_attentions] if v is not None)
612
+ return BaseModelOutput(
613
+ last_hidden_state=hidden_state, hidden_states=all_hidden_states, attentions=all_attentions
614
+ )
615
+
616
+
617
+ # INTERFACE FOR ENCODER AND TASK SPECIFIC MODEL #
618
+ class DistilBertPreTrainedModel(PreTrainedModel):
619
+ """
620
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
621
+ models.
622
+ """
623
+
624
+ config_class = DistilBertConfig
625
+ load_tf_weights = None
626
+ base_model_prefix = "distilbert"
627
+ supports_gradient_checkpointing = True
628
+ _supports_flash_attn_2 = True
629
+
630
+ def _init_weights(self, module: nn.Module):
631
+ """Initialize the weights."""
632
+ if isinstance(module, nn.Linear):
633
+ # Slightly different from the TF version which uses truncated_normal for initialization
634
+ # cf https://github.com/pytorch/pytorch/pull/5617
635
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
636
+ if module.bias is not None:
637
+ module.bias.data.zero_()
638
+ elif isinstance(module, nn.Embedding):
639
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
640
+ if module.padding_idx is not None:
641
+ module.weight.data[module.padding_idx].zero_()
642
+ elif isinstance(module, nn.LayerNorm):
643
+ module.bias.data.zero_()
644
+ module.weight.data.fill_(1.0)
645
+
646
+
647
+ DISTILBERT_START_DOCSTRING = r"""
648
+
649
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
650
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
651
+ etc.)
652
+
653
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
654
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
655
+ and behavior.
656
+
657
+ Parameters:
658
+ config ([`DistilBertConfig`]): Model configuration class with all the parameters of the model.
659
+ Initializing with a config file does not load the weights associated with the model, only the
660
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
661
+ """
662
+
663
+ DISTILBERT_INPUTS_DOCSTRING = r"""
664
+ Args:
665
+ input_ids (`torch.LongTensor` of shape `({0})`):
666
+ Indices of input sequence tokens in the vocabulary.
667
+
668
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
669
+ [`PreTrainedTokenizer.__call__`] for details.
670
+
671
+ [What are input IDs?](../glossary#input-ids)
672
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
673
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
674
+
675
+ - 1 for tokens that are **not masked**,
676
+ - 0 for tokens that are **masked**.
677
+
678
+ [What are attention masks?](../glossary#attention-mask)
679
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
680
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
681
+
682
+ - 1 indicates the head is **not masked**,
683
+ - 0 indicates the head is **masked**.
684
+
685
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
686
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
687
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
688
+ model's internal embedding lookup matrix.
689
+ output_attentions (`bool`, *optional*):
690
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
691
+ tensors for more detail.
692
+ output_hidden_states (`bool`, *optional*):
693
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
694
+ more detail.
695
+ return_dict (`bool`, *optional*):
696
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
697
+ """
698
+
699
+
700
+ @add_start_docstrings(
701
+ "The bare DistilBERT encoder/transformer outputting raw hidden-states without any specific head on top.",
702
+ DISTILBERT_START_DOCSTRING,
703
+ )
704
+ class DistilBertModel(DistilBertPreTrainedModel):
705
+ def __init__(self, config: PretrainedConfig):
706
+ super().__init__(config)
707
+
708
+ self.embeddings = Embeddings(config) # Embeddings
709
+ self.transformer = Transformer(config) # Encoder
710
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
711
+
712
+ # Initialize weights and apply final processing
713
+ self.post_init()
714
+
715
+ def get_position_embeddings(self) -> nn.Embedding:
716
+ """
717
+ Returns the position embeddings
718
+ """
719
+ return self.embeddings.position_embeddings
720
+
721
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
722
+ """
723
+ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
724
+
725
+ Arguments:
726
+ new_num_position_embeddings (`int`):
727
+ The number of new position embedding matrix. If position embeddings are learned, increasing the size
728
+ will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
729
+ end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
730
+ size will add correct vectors at the end following the position encoding algorithm, whereas reducing
731
+ the size will remove vectors from the end.
732
+ """
733
+ num_position_embeds_diff = new_num_position_embeddings - self.config.max_position_embeddings
734
+
735
+ # no resizing needs to be done if the length stays the same
736
+ if num_position_embeds_diff == 0:
737
+ return
738
+
739
+ logger.info(f"Setting `config.max_position_embeddings={new_num_position_embeddings}`...")
740
+ self.config.max_position_embeddings = new_num_position_embeddings
741
+
742
+ old_position_embeddings_weight = self.embeddings.position_embeddings.weight.clone()
743
+
744
+ self.embeddings.position_embeddings = nn.Embedding(self.config.max_position_embeddings, self.config.dim)
745
+
746
+ if self.config.sinusoidal_pos_embds:
747
+ create_sinusoidal_embeddings(
748
+ n_pos=self.config.max_position_embeddings, dim=self.config.dim, out=self.position_embeddings.weight
749
+ )
750
+ else:
751
+ with torch.no_grad():
752
+ if num_position_embeds_diff > 0:
753
+ self.embeddings.position_embeddings.weight[:-num_position_embeds_diff] = nn.Parameter(
754
+ old_position_embeddings_weight
755
+ )
756
+ else:
757
+ self.embeddings.position_embeddings.weight = nn.Parameter(
758
+ old_position_embeddings_weight[:num_position_embeds_diff]
759
+ )
760
+ # move position_embeddings to correct device
761
+ self.embeddings.position_embeddings.to(self.device)
762
+
763
+ def get_input_embeddings(self) -> nn.Embedding:
764
+ return self.embeddings.word_embeddings
765
+
766
+ def set_input_embeddings(self, new_embeddings: nn.Embedding):
767
+ self.embeddings.word_embeddings = new_embeddings
768
+
769
+ def _prune_heads(self, heads_to_prune: Dict[int, List[List[int]]]):
770
+ """
771
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
772
+ class PreTrainedModel
773
+ """
774
+ for layer, heads in heads_to_prune.items():
775
+ self.transformer.layer[layer].attention.prune_heads(heads)
776
+
777
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices"))
778
+ @add_code_sample_docstrings(
779
+ checkpoint=_CHECKPOINT_FOR_DOC,
780
+ output_type=BaseModelOutput,
781
+ config_class=_CONFIG_FOR_DOC,
782
+ )
783
+ def forward(
784
+ self,
785
+ input_ids: Optional[torch.Tensor] = None,
786
+ attention_mask: Optional[torch.Tensor] = None,
787
+ head_mask: Optional[torch.Tensor] = None,
788
+ inputs_embeds: Optional[torch.Tensor] = None,
789
+ output_attentions: Optional[bool] = None,
790
+ output_hidden_states: Optional[bool] = None,
791
+ return_dict: Optional[bool] = None,
792
+ ) -> Union[BaseModelOutput, Tuple[torch.Tensor, ...]]:
793
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
794
+ output_hidden_states = (
795
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
796
+ )
797
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
798
+
799
+ if input_ids is not None and inputs_embeds is not None:
800
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
801
+ elif input_ids is not None:
802
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
803
+ input_shape = input_ids.size()
804
+ elif inputs_embeds is not None:
805
+ input_shape = inputs_embeds.size()[:-1]
806
+ else:
807
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
808
+
809
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
810
+
811
+ # Prepare head mask if needed
812
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
813
+
814
+ embeddings = self.embeddings(input_ids, inputs_embeds) # (bs, seq_length, dim)
815
+
816
+ if self._use_flash_attention_2:
817
+ attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
818
+ else:
819
+ if attention_mask is None:
820
+ attention_mask = torch.ones(input_shape, device=device) # (bs, seq_length)
821
+
822
+ return self.transformer(
823
+ x=embeddings,
824
+ attn_mask=attention_mask,
825
+ head_mask=head_mask,
826
+ output_attentions=output_attentions,
827
+ output_hidden_states=output_hidden_states,
828
+ return_dict=return_dict,
829
+ )
830
+
831
+
832
+ @add_start_docstrings(
833
+ """DistilBert Model with a `masked language modeling` head on top.""",
834
+ DISTILBERT_START_DOCSTRING,
835
+ )
836
+ class DistilBertForMaskedLM(DistilBertPreTrainedModel):
837
+ _tied_weights_keys = ["vocab_projector.weight"]
838
+
839
+ def __init__(self, config: PretrainedConfig):
840
+ super().__init__(config)
841
+
842
+ self.activation = get_activation(config.activation)
843
+
844
+ self.distilbert = DistilBertModel(config)
845
+ self.vocab_transform = nn.Linear(config.dim, config.dim)
846
+ self.vocab_layer_norm = nn.LayerNorm(config.dim, eps=1e-12)
847
+ self.vocab_projector = nn.Linear(config.dim, config.vocab_size)
848
+
849
+ # Initialize weights and apply final processing
850
+ self.post_init()
851
+
852
+ self.mlm_loss_fct = nn.CrossEntropyLoss()
853
+
854
+ def get_position_embeddings(self) -> nn.Embedding:
855
+ """
856
+ Returns the position embeddings
857
+ """
858
+ return self.distilbert.get_position_embeddings()
859
+
860
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
861
+ """
862
+ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
863
+
864
+ Arguments:
865
+ new_num_position_embeddings (`int`):
866
+ The number of new position embedding matrix. If position embeddings are learned, increasing the size
867
+ will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
868
+ end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
869
+ size will add correct vectors at the end following the position encoding algorithm, whereas reducing
870
+ the size will remove vectors from the end.
871
+ """
872
+ self.distilbert.resize_position_embeddings(new_num_position_embeddings)
873
+
874
+ def get_output_embeddings(self) -> nn.Module:
875
+ return self.vocab_projector
876
+
877
+ def set_output_embeddings(self, new_embeddings: nn.Module):
878
+ self.vocab_projector = new_embeddings
879
+
880
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices"))
881
+ @add_code_sample_docstrings(
882
+ checkpoint=_CHECKPOINT_FOR_DOC,
883
+ output_type=MaskedLMOutput,
884
+ config_class=_CONFIG_FOR_DOC,
885
+ )
886
+ def forward(
887
+ self,
888
+ input_ids: Optional[torch.Tensor] = None,
889
+ attention_mask: Optional[torch.Tensor] = None,
890
+ head_mask: Optional[torch.Tensor] = None,
891
+ inputs_embeds: Optional[torch.Tensor] = None,
892
+ labels: Optional[torch.LongTensor] = None,
893
+ output_attentions: Optional[bool] = None,
894
+ output_hidden_states: Optional[bool] = None,
895
+ return_dict: Optional[bool] = None,
896
+ ) -> Union[MaskedLMOutput, Tuple[torch.Tensor, ...]]:
897
+ r"""
898
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
899
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
900
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
901
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
902
+ """
903
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
904
+
905
+ dlbrt_output = self.distilbert(
906
+ input_ids=input_ids,
907
+ attention_mask=attention_mask,
908
+ head_mask=head_mask,
909
+ inputs_embeds=inputs_embeds,
910
+ output_attentions=output_attentions,
911
+ output_hidden_states=output_hidden_states,
912
+ return_dict=return_dict,
913
+ )
914
+ hidden_states = dlbrt_output[0] # (bs, seq_length, dim)
915
+ prediction_logits = self.vocab_transform(hidden_states) # (bs, seq_length, dim)
916
+ prediction_logits = self.activation(prediction_logits) # (bs, seq_length, dim)
917
+ prediction_logits = self.vocab_layer_norm(prediction_logits) # (bs, seq_length, dim)
918
+ prediction_logits = self.vocab_projector(prediction_logits) # (bs, seq_length, vocab_size)
919
+
920
+ mlm_loss = None
921
+ if labels is not None:
922
+ mlm_loss = self.mlm_loss_fct(prediction_logits.view(-1, prediction_logits.size(-1)), labels.view(-1))
923
+
924
+ if not return_dict:
925
+ output = (prediction_logits,) + dlbrt_output[1:]
926
+ return ((mlm_loss,) + output) if mlm_loss is not None else output
927
+
928
+ return MaskedLMOutput(
929
+ loss=mlm_loss,
930
+ logits=prediction_logits,
931
+ hidden_states=dlbrt_output.hidden_states,
932
+ attentions=dlbrt_output.attentions,
933
+ )
934
+
935
+
936
+ @add_start_docstrings(
937
+ """
938
+ DistilBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the
939
+ pooled output) e.g. for GLUE tasks.
940
+ """,
941
+ DISTILBERT_START_DOCSTRING,
942
+ )
943
+ class DistilBertForSequenceClassification(DistilBertPreTrainedModel):
944
+ def __init__(self, config: PretrainedConfig):
945
+ super().__init__(config)
946
+ self.num_labels = config.num_labels
947
+ self.config = config
948
+
949
+ self.distilbert = DistilBertModel(config)
950
+ self.pre_classifier = nn.Linear(config.dim, config.dim)
951
+ self.classifier = nn.Linear(config.dim, config.num_labels)
952
+ self.dropout = nn.Dropout(config.seq_classif_dropout)
953
+
954
+ # Initialize weights and apply final processing
955
+ self.post_init()
956
+
957
+ def get_position_embeddings(self) -> nn.Embedding:
958
+ """
959
+ Returns the position embeddings
960
+ """
961
+ return self.distilbert.get_position_embeddings()
962
+
963
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
964
+ """
965
+ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
966
+
967
+ Arguments:
968
+ new_num_position_embeddings (`int`):
969
+ The number of new position embedding matrix. If position embeddings are learned, increasing the size
970
+ will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
971
+ end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
972
+ size will add correct vectors at the end following the position encoding algorithm, whereas reducing
973
+ the size will remove vectors from the end.
974
+ """
975
+ self.distilbert.resize_position_embeddings(new_num_position_embeddings)
976
+
977
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
978
+ @add_code_sample_docstrings(
979
+ checkpoint=_CHECKPOINT_FOR_DOC,
980
+ output_type=SequenceClassifierOutput,
981
+ config_class=_CONFIG_FOR_DOC,
982
+ )
983
+ def forward(
984
+ self,
985
+ input_ids: Optional[torch.Tensor] = None,
986
+ attention_mask: Optional[torch.Tensor] = None,
987
+ head_mask: Optional[torch.Tensor] = None,
988
+ inputs_embeds: Optional[torch.Tensor] = None,
989
+ labels: Optional[torch.LongTensor] = None,
990
+ output_attentions: Optional[bool] = None,
991
+ output_hidden_states: Optional[bool] = None,
992
+ return_dict: Optional[bool] = None,
993
+ ) -> Union[SequenceClassifierOutput, Tuple[torch.Tensor, ...]]:
994
+ r"""
995
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
996
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
997
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
998
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
999
+ """
1000
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1001
+
1002
+ distilbert_output = self.distilbert(
1003
+ input_ids=input_ids,
1004
+ attention_mask=attention_mask,
1005
+ head_mask=head_mask,
1006
+ inputs_embeds=inputs_embeds,
1007
+ output_attentions=output_attentions,
1008
+ output_hidden_states=output_hidden_states,
1009
+ return_dict=return_dict,
1010
+ )
1011
+ hidden_state = distilbert_output[0] # (bs, seq_len, dim)
1012
+ pooled_output = hidden_state[:, 0] # (bs, dim)
1013
+ pooled_output = self.pre_classifier(pooled_output) # (bs, dim)
1014
+ pooled_output = nn.ReLU()(pooled_output) # (bs, dim)
1015
+ pooled_output = self.dropout(pooled_output) # (bs, dim)
1016
+ logits = self.classifier(pooled_output) # (bs, num_labels)
1017
+
1018
+ loss = None
1019
+ if labels is not None:
1020
+ if self.config.problem_type is None:
1021
+ if self.num_labels == 1:
1022
+ self.config.problem_type = "regression"
1023
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1024
+ self.config.problem_type = "single_label_classification"
1025
+ else:
1026
+ self.config.problem_type = "multi_label_classification"
1027
+
1028
+ if self.config.problem_type == "regression":
1029
+ loss_fct = MSELoss()
1030
+ if self.num_labels == 1:
1031
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1032
+ else:
1033
+ loss = loss_fct(logits, labels)
1034
+ elif self.config.problem_type == "single_label_classification":
1035
+ loss_fct = CrossEntropyLoss()
1036
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1037
+ elif self.config.problem_type == "multi_label_classification":
1038
+ loss_fct = BCEWithLogitsLoss()
1039
+ loss = loss_fct(logits, labels)
1040
+
1041
+ if not return_dict:
1042
+ output = (logits,) + distilbert_output[1:]
1043
+ return ((loss,) + output) if loss is not None else output
1044
+
1045
+ return SequenceClassifierOutput(
1046
+ loss=loss,
1047
+ logits=logits,
1048
+ hidden_states=distilbert_output.hidden_states,
1049
+ attentions=distilbert_output.attentions,
1050
+ )
1051
+
1052
+
1053
+ @add_start_docstrings(
1054
+ """
1055
+ DistilBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
1056
+ linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1057
+ """,
1058
+ DISTILBERT_START_DOCSTRING,
1059
+ )
1060
+ class DistilBertForQuestionAnswering(DistilBertPreTrainedModel):
1061
+ def __init__(self, config: PretrainedConfig):
1062
+ super().__init__(config)
1063
+
1064
+ self.distilbert = DistilBertModel(config)
1065
+ self.qa_outputs = nn.Linear(config.dim, config.num_labels)
1066
+ if config.num_labels != 2:
1067
+ raise ValueError(f"config.num_labels should be 2, but it is {config.num_labels}")
1068
+
1069
+ self.dropout = nn.Dropout(config.qa_dropout)
1070
+
1071
+ # Initialize weights and apply final processing
1072
+ self.post_init()
1073
+
1074
+ def get_position_embeddings(self) -> nn.Embedding:
1075
+ """
1076
+ Returns the position embeddings
1077
+ """
1078
+ return self.distilbert.get_position_embeddings()
1079
+
1080
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
1081
+ """
1082
+ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
1083
+
1084
+ Arguments:
1085
+ new_num_position_embeddings (`int`):
1086
+ The number of new position embedding matrix. If position embeddings are learned, increasing the size
1087
+ will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
1088
+ end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
1089
+ size will add correct vectors at the end following the position encoding algorithm, whereas reducing
1090
+ the size will remove vectors from the end.
1091
+ """
1092
+ self.distilbert.resize_position_embeddings(new_num_position_embeddings)
1093
+
1094
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices"))
1095
+ @add_code_sample_docstrings(
1096
+ checkpoint=_CHECKPOINT_FOR_DOC,
1097
+ output_type=QuestionAnsweringModelOutput,
1098
+ config_class=_CONFIG_FOR_DOC,
1099
+ )
1100
+ def forward(
1101
+ self,
1102
+ input_ids: Optional[torch.Tensor] = None,
1103
+ attention_mask: Optional[torch.Tensor] = None,
1104
+ head_mask: Optional[torch.Tensor] = None,
1105
+ inputs_embeds: Optional[torch.Tensor] = None,
1106
+ start_positions: Optional[torch.Tensor] = None,
1107
+ end_positions: Optional[torch.Tensor] = None,
1108
+ output_attentions: Optional[bool] = None,
1109
+ output_hidden_states: Optional[bool] = None,
1110
+ return_dict: Optional[bool] = None,
1111
+ ) -> Union[QuestionAnsweringModelOutput, Tuple[torch.Tensor, ...]]:
1112
+ r"""
1113
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1114
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1115
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1116
+ are not taken into account for computing the loss.
1117
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1118
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1119
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1120
+ are not taken into account for computing the loss.
1121
+ """
1122
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1123
+
1124
+ distilbert_output = self.distilbert(
1125
+ input_ids=input_ids,
1126
+ attention_mask=attention_mask,
1127
+ head_mask=head_mask,
1128
+ inputs_embeds=inputs_embeds,
1129
+ output_attentions=output_attentions,
1130
+ output_hidden_states=output_hidden_states,
1131
+ return_dict=return_dict,
1132
+ )
1133
+ hidden_states = distilbert_output[0] # (bs, max_query_len, dim)
1134
+
1135
+ hidden_states = self.dropout(hidden_states) # (bs, max_query_len, dim)
1136
+ logits = self.qa_outputs(hidden_states) # (bs, max_query_len, 2)
1137
+ start_logits, end_logits = logits.split(1, dim=-1)
1138
+ start_logits = start_logits.squeeze(-1).contiguous() # (bs, max_query_len)
1139
+ end_logits = end_logits.squeeze(-1).contiguous() # (bs, max_query_len)
1140
+
1141
+ total_loss = None
1142
+ if start_positions is not None and end_positions is not None:
1143
+ # If we are on multi-GPU, split add a dimension
1144
+ if len(start_positions.size()) > 1:
1145
+ start_positions = start_positions.squeeze(-1)
1146
+ if len(end_positions.size()) > 1:
1147
+ end_positions = end_positions.squeeze(-1)
1148
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1149
+ ignored_index = start_logits.size(1)
1150
+ start_positions = start_positions.clamp(0, ignored_index)
1151
+ end_positions = end_positions.clamp(0, ignored_index)
1152
+
1153
+ loss_fct = nn.CrossEntropyLoss(ignore_index=ignored_index)
1154
+ start_loss = loss_fct(start_logits, start_positions)
1155
+ end_loss = loss_fct(end_logits, end_positions)
1156
+ total_loss = (start_loss + end_loss) / 2
1157
+
1158
+ if not return_dict:
1159
+ output = (start_logits, end_logits) + distilbert_output[1:]
1160
+ return ((total_loss,) + output) if total_loss is not None else output
1161
+
1162
+ return QuestionAnsweringModelOutput(
1163
+ loss=total_loss,
1164
+ start_logits=start_logits,
1165
+ end_logits=end_logits,
1166
+ hidden_states=distilbert_output.hidden_states,
1167
+ attentions=distilbert_output.attentions,
1168
+ )
1169
+
1170
+
1171
+ @add_start_docstrings(
1172
+ """
1173
+ DistilBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
1174
+ for Named-Entity-Recognition (NER) tasks.
1175
+ """,
1176
+ DISTILBERT_START_DOCSTRING,
1177
+ )
1178
+ class DistilBertForTokenClassification(DistilBertPreTrainedModel):
1179
+ def __init__(self, config: PretrainedConfig):
1180
+ super().__init__(config)
1181
+ self.num_labels = config.num_labels
1182
+
1183
+ self.distilbert = DistilBertModel(config)
1184
+ self.dropout = nn.Dropout(config.dropout)
1185
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1186
+
1187
+ # Initialize weights and apply final processing
1188
+ self.post_init()
1189
+
1190
+ def get_position_embeddings(self) -> nn.Embedding:
1191
+ """
1192
+ Returns the position embeddings
1193
+ """
1194
+ return self.distilbert.get_position_embeddings()
1195
+
1196
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
1197
+ """
1198
+ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
1199
+
1200
+ Arguments:
1201
+ new_num_position_embeddings (`int`):
1202
+ The number of new position embedding matrix. If position embeddings are learned, increasing the size
1203
+ will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
1204
+ end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
1205
+ size will add correct vectors at the end following the position encoding algorithm, whereas reducing
1206
+ the size will remove vectors from the end.
1207
+ """
1208
+ self.distilbert.resize_position_embeddings(new_num_position_embeddings)
1209
+
1210
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING)
1211
+ @add_code_sample_docstrings(
1212
+ checkpoint=_CHECKPOINT_FOR_DOC,
1213
+ output_type=TokenClassifierOutput,
1214
+ config_class=_CONFIG_FOR_DOC,
1215
+ )
1216
+ def forward(
1217
+ self,
1218
+ input_ids: Optional[torch.Tensor] = None,
1219
+ attention_mask: Optional[torch.Tensor] = None,
1220
+ head_mask: Optional[torch.Tensor] = None,
1221
+ inputs_embeds: Optional[torch.Tensor] = None,
1222
+ labels: Optional[torch.LongTensor] = None,
1223
+ output_attentions: Optional[bool] = None,
1224
+ output_hidden_states: Optional[bool] = None,
1225
+ return_dict: Optional[bool] = None,
1226
+ ) -> Union[TokenClassifierOutput, Tuple[torch.Tensor, ...]]:
1227
+ r"""
1228
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1229
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1230
+ """
1231
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1232
+
1233
+ outputs = self.distilbert(
1234
+ input_ids,
1235
+ attention_mask=attention_mask,
1236
+ head_mask=head_mask,
1237
+ inputs_embeds=inputs_embeds,
1238
+ output_attentions=output_attentions,
1239
+ output_hidden_states=output_hidden_states,
1240
+ return_dict=return_dict,
1241
+ )
1242
+
1243
+ sequence_output = outputs[0]
1244
+
1245
+ sequence_output = self.dropout(sequence_output)
1246
+ logits = self.classifier(sequence_output)
1247
+
1248
+ loss = None
1249
+ if labels is not None:
1250
+ loss_fct = CrossEntropyLoss()
1251
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1252
+
1253
+ if not return_dict:
1254
+ output = (logits,) + outputs[1:]
1255
+ return ((loss,) + output) if loss is not None else output
1256
+
1257
+ return TokenClassifierOutput(
1258
+ loss=loss,
1259
+ logits=logits,
1260
+ hidden_states=outputs.hidden_states,
1261
+ attentions=outputs.attentions,
1262
+ )
1263
+
1264
+
1265
+ @add_start_docstrings(
1266
+ """
1267
+ DistilBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
1268
+ a softmax) e.g. for RocStories/SWAG tasks.
1269
+ """,
1270
+ DISTILBERT_START_DOCSTRING,
1271
+ )
1272
+ class DistilBertForMultipleChoice(DistilBertPreTrainedModel):
1273
+ def __init__(self, config: PretrainedConfig):
1274
+ super().__init__(config)
1275
+
1276
+ self.distilbert = DistilBertModel(config)
1277
+ self.pre_classifier = nn.Linear(config.dim, config.dim)
1278
+ self.classifier = nn.Linear(config.dim, 1)
1279
+ self.dropout = nn.Dropout(config.seq_classif_dropout)
1280
+
1281
+ # Initialize weights and apply final processing
1282
+ self.post_init()
1283
+
1284
+ def get_position_embeddings(self) -> nn.Embedding:
1285
+ """
1286
+ Returns the position embeddings
1287
+ """
1288
+ return self.distilbert.get_position_embeddings()
1289
+
1290
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
1291
+ """
1292
+ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
1293
+
1294
+ Arguments:
1295
+ new_num_position_embeddings (`int`)
1296
+ The number of new position embeddings. If position embeddings are learned, increasing the size will add
1297
+ newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
1298
+ position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
1299
+ add correct vectors at the end following the position encoding algorithm, whereas reducing the size
1300
+ will remove vectors from the end.
1301
+ """
1302
+ self.distilbert.resize_position_embeddings(new_num_position_embeddings)
1303
+
1304
+ @add_start_docstrings_to_model_forward(
1305
+ DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
1306
+ )
1307
+ @replace_return_docstrings(output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC)
1308
+ def forward(
1309
+ self,
1310
+ input_ids: Optional[torch.Tensor] = None,
1311
+ attention_mask: Optional[torch.Tensor] = None,
1312
+ head_mask: Optional[torch.Tensor] = None,
1313
+ inputs_embeds: Optional[torch.Tensor] = None,
1314
+ labels: Optional[torch.LongTensor] = None,
1315
+ output_attentions: Optional[bool] = None,
1316
+ output_hidden_states: Optional[bool] = None,
1317
+ return_dict: Optional[bool] = None,
1318
+ ) -> Union[MultipleChoiceModelOutput, Tuple[torch.Tensor, ...]]:
1319
+ r"""
1320
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1321
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
1322
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
1323
+ `input_ids` above)
1324
+
1325
+ Returns:
1326
+
1327
+ Examples:
1328
+
1329
+ ```python
1330
+ >>> from transformers import AutoTokenizer, DistilBertForMultipleChoice
1331
+ >>> import torch
1332
+
1333
+ >>> tokenizer = AutoTokenizer.from_pretrained("distilbert-base-cased")
1334
+ >>> model = DistilBertForMultipleChoice.from_pretrained("distilbert-base-cased")
1335
+
1336
+ >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
1337
+ >>> choice0 = "It is eaten with a fork and a knife."
1338
+ >>> choice1 = "It is eaten while held in the hand."
1339
+ >>> labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1
1340
+
1341
+ >>> encoding = tokenizer([[prompt, choice0], [prompt, choice1]], return_tensors="pt", padding=True)
1342
+ >>> outputs = model(**{k: v.unsqueeze(0) for k, v in encoding.items()}, labels=labels) # batch size is 1
1343
+
1344
+ >>> # the linear classifier still needs to be trained
1345
+ >>> loss = outputs.loss
1346
+ >>> logits = outputs.logits
1347
+ ```"""
1348
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1349
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
1350
+
1351
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
1352
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
1353
+ inputs_embeds = (
1354
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
1355
+ if inputs_embeds is not None
1356
+ else None
1357
+ )
1358
+
1359
+ outputs = self.distilbert(
1360
+ input_ids,
1361
+ attention_mask=attention_mask,
1362
+ head_mask=head_mask,
1363
+ inputs_embeds=inputs_embeds,
1364
+ output_attentions=output_attentions,
1365
+ output_hidden_states=output_hidden_states,
1366
+ return_dict=return_dict,
1367
+ )
1368
+
1369
+ hidden_state = outputs[0] # (bs * num_choices, seq_len, dim)
1370
+ pooled_output = hidden_state[:, 0] # (bs * num_choices, dim)
1371
+ pooled_output = self.pre_classifier(pooled_output) # (bs * num_choices, dim)
1372
+ pooled_output = nn.ReLU()(pooled_output) # (bs * num_choices, dim)
1373
+ pooled_output = self.dropout(pooled_output) # (bs * num_choices, dim)
1374
+ logits = self.classifier(pooled_output) # (bs * num_choices, 1)
1375
+
1376
+ reshaped_logits = logits.view(-1, num_choices) # (bs, num_choices)
1377
+
1378
+ loss = None
1379
+ if labels is not None:
1380
+ loss_fct = CrossEntropyLoss()
1381
+ loss = loss_fct(reshaped_logits, labels)
1382
+
1383
+ if not return_dict:
1384
+ output = (reshaped_logits,) + outputs[1:]
1385
+ return ((loss,) + output) if loss is not None else output
1386
+
1387
+ return MultipleChoiceModelOutput(
1388
+ loss=loss,
1389
+ logits=reshaped_logits,
1390
+ hidden_states=outputs.hidden_states,
1391
+ attentions=outputs.attentions,
1392
+ )
llava_next/lib/python3.10/site-packages/transformers/models/distilbert/modeling_flax_distilbert.py ADDED
@@ -0,0 +1,895 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import math
17
+ from typing import Callable, Optional, Tuple
18
+
19
+ import flax.linen as nn
20
+ import jax
21
+ import jax.numpy as jnp
22
+ import numpy as np
23
+ from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
24
+ from flax.traverse_util import flatten_dict, unflatten_dict
25
+ from jax import lax
26
+
27
+ from ...modeling_flax_outputs import (
28
+ FlaxBaseModelOutput,
29
+ FlaxMaskedLMOutput,
30
+ FlaxMultipleChoiceModelOutput,
31
+ FlaxQuestionAnsweringModelOutput,
32
+ FlaxSequenceClassifierOutput,
33
+ FlaxTokenClassifierOutput,
34
+ )
35
+ from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring, overwrite_call_docstring
36
+ from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging
37
+ from .configuration_distilbert import DistilBertConfig
38
+
39
+
40
+ logger = logging.get_logger(__name__)
41
+
42
+ _CHECKPOINT_FOR_DOC = "distilbert-base-uncased"
43
+ _CONFIG_FOR_DOC = "DistilBertConfig"
44
+
45
+
46
+ FLAX_DISTILBERT_START_DOCSTRING = r"""
47
+
48
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
49
+ library implements for all its model (such as downloading, saving and converting weights from PyTorch models)
50
+
51
+ This model is also a
52
+ [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as
53
+ a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and
54
+ behavior.
55
+
56
+ Finally, this model supports inherent JAX features such as:
57
+
58
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
59
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
60
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
61
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
62
+
63
+ Parameters:
64
+ config ([`DistilBertConfig`]): Model configuration class with all the parameters of the model.
65
+ Initializing with a config file does not load the weights associated with the model, only the
66
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
67
+ """
68
+
69
+ DISTILBERT_INPUTS_DOCSTRING = r"""
70
+ Args:
71
+ input_ids (`numpy.ndarray` of shape `({0})`):
72
+ Indices of input sequence tokens in the vocabulary.
73
+
74
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
75
+ [`PreTrainedTokenizer.__call__`] for details.
76
+
77
+ [What are input IDs?](../glossary#input-ids)
78
+ attention_mask (`numpy.ndarray` of shape `({0})`, *optional*):
79
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
80
+
81
+ - 1 for tokens that are **not masked**,
82
+ - 0 for tokens that are **masked**.
83
+
84
+ [What are attention masks?](../glossary#attention-mask)
85
+ output_attentions (`bool`, *optional*):
86
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
87
+ tensors for more detail.
88
+ output_hidden_states (`bool`, *optional*):
89
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
90
+ more detail.
91
+ return_dict (`bool`, *optional*):
92
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
93
+ """
94
+
95
+
96
+ def get_angles(pos, i, d_model):
97
+ angle_rates = 1 / np.power(10000, (2 * (i // 2)) / np.float32(d_model))
98
+ return pos * angle_rates
99
+
100
+
101
+ def positional_encoding(position, d_model):
102
+ # create the sinusoidal pattern for the positional encoding
103
+ angle_rads = get_angles(np.arange(position)[:, np.newaxis], np.arange(d_model)[np.newaxis, :], d_model)
104
+
105
+ # apply sin to even indices in the array; 2i
106
+ angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])
107
+
108
+ # apply cos to odd indices in the array; 2i+1
109
+ angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])
110
+
111
+ pos_encoding = angle_rads[np.newaxis, ...]
112
+
113
+ return jnp.array(pos_encoding)
114
+
115
+
116
+ class FlaxEmbeddings(nn.Module):
117
+ """Construct the embeddings from word, position and token_type embeddings."""
118
+
119
+ config: DistilBertConfig
120
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
121
+
122
+ def setup(self):
123
+ self.word_embeddings = nn.Embed(
124
+ self.config.vocab_size,
125
+ self.config.dim,
126
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
127
+ )
128
+ if not self.config.sinusoidal_pos_embds:
129
+ self.position_embeddings = nn.Embed(
130
+ self.config.max_position_embeddings,
131
+ self.config.dim,
132
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
133
+ )
134
+ else:
135
+ self.pos_encoding = positional_encoding(self.config.max_position_embeddings, self.config.dim)
136
+ self.LayerNorm = nn.LayerNorm(epsilon=1e-12, dtype=self.dtype)
137
+ self.dropout = nn.Dropout(rate=self.config.dropout)
138
+
139
+ def __call__(self, input_ids, deterministic: bool = True):
140
+ # Embed
141
+ batch_size, seq_length = input_ids.shape
142
+ inputs_embeds = self.word_embeddings(input_ids.astype("i4"))
143
+ if not self.config.sinusoidal_pos_embds:
144
+ position_ids = jnp.arange(seq_length).astype("i4")
145
+ position_ids = jnp.broadcast_to(position_ids, shape=(batch_size, seq_length))
146
+ position_embeds = self.position_embeddings(position_ids.astype("i4"))
147
+ else:
148
+ position_embeds = self.pos_encoding[:, :seq_length, :]
149
+ # explictly cast the positions here, since self.embed_positions are not registered as parameters
150
+ position_embeds = position_embeds.astype(inputs_embeds.dtype)
151
+
152
+ # Sum all embeddings
153
+ hidden_states = inputs_embeds + position_embeds
154
+
155
+ # Layer Norm
156
+ hidden_states = self.LayerNorm(hidden_states)
157
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
158
+ return hidden_states
159
+
160
+
161
+ class FlaxMultiHeadSelfAttention(nn.Module):
162
+ config: DistilBertConfig
163
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
164
+
165
+ def setup(self):
166
+ self.n_heads = self.config.n_heads
167
+ self.dim = self.config.dim
168
+ self.dropout = nn.Dropout(rate=self.config.attention_dropout)
169
+
170
+ if not (self.dim % self.n_heads == 0):
171
+ raise ValueError(f"Hidden size {self.dim} not dividable by number of heads {self.n_heads}")
172
+
173
+ self.q_lin = nn.Dense(
174
+ self.dim,
175
+ dtype=self.dtype,
176
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
177
+ )
178
+ self.k_lin = nn.Dense(
179
+ self.dim,
180
+ dtype=self.dtype,
181
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
182
+ )
183
+ self.v_lin = nn.Dense(
184
+ self.dim,
185
+ dtype=self.dtype,
186
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
187
+ )
188
+ self.out_lin = nn.Dense(
189
+ self.dim,
190
+ dtype=self.dtype,
191
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
192
+ )
193
+
194
+ def __call__(
195
+ self,
196
+ query,
197
+ key,
198
+ value,
199
+ mask,
200
+ deterministic: bool = True,
201
+ output_attentions: bool = False,
202
+ ):
203
+ bs, q_len, dim = query.shape
204
+ k_len = key.shape[1]
205
+ # assert dim == self.dim, f'Dimensions do not match: {dim} input vs {self.dim} configured'
206
+ # assert key.size() == value.size()
207
+
208
+ dim_per_head = self.dim // self.n_heads
209
+
210
+ mask_reshp = (bs, 1, 1, k_len)
211
+
212
+ def shape(x):
213
+ """separate heads"""
214
+ return x.reshape(bs, -1, self.n_heads, dim_per_head).transpose(0, 2, 1, 3)
215
+
216
+ def unshape(x):
217
+ """group heads"""
218
+ return x.transpose(0, 2, 1, 3).reshape(bs, -1, self.n_heads * dim_per_head)
219
+
220
+ q = shape(self.q_lin(query)) # (bs, n_heads, q_len, dim_per_head)
221
+ k = shape(self.k_lin(key)) # (bs, n_heads, k_len, dim_per_head)
222
+ v = shape(self.v_lin(value)) # (bs, n_heads, k_len, dim_per_head)
223
+
224
+ q = q / math.sqrt(dim_per_head) # (bs, n_heads, q_len, dim_per_head)
225
+ scores = jnp.matmul(q, k.transpose(0, 1, 3, 2)) # (bs, n_heads, q_len, k_len)
226
+ mask = jnp.reshape(mask, mask_reshp)
227
+
228
+ mask = mask.astype(scores.dtype)
229
+ scores = scores - 1e30 * (1.0 - mask)
230
+
231
+ weights = nn.softmax(scores, axis=-1) # (bs, n_heads, q_len, k_len)
232
+ weights = self.dropout(weights, deterministic=deterministic)
233
+
234
+ context = jnp.matmul(weights, v) # (bs, n_heads, q_len, dim_per_head)
235
+ context = unshape(context) # (bs, q_len, dim)
236
+ context = self.out_lin(context) # (bs, q_len, dim)
237
+
238
+ if output_attentions:
239
+ return (context, weights)
240
+ else:
241
+ return (context,)
242
+
243
+
244
+ class FlaxFFN(nn.Module):
245
+ config: DistilBertConfig
246
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
247
+
248
+ def setup(self):
249
+ self.dropout = nn.Dropout(rate=self.config.dropout)
250
+ self.chunk_size_feed_forward = self.config.chunk_size_feed_forward
251
+ self.seq_len_dim = 1
252
+ self.lin1 = nn.Dense(
253
+ self.config.hidden_dim,
254
+ dtype=self.dtype,
255
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
256
+ )
257
+ self.lin2 = nn.Dense(
258
+ self.config.dim,
259
+ dtype=self.dtype,
260
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
261
+ )
262
+
263
+ self.activation = ACT2FN[self.config.activation]
264
+
265
+ def __call__(self, hidden_states, deterministic: bool = True):
266
+ hidden_states = self.lin1(hidden_states)
267
+ hidden_states = self.activation(hidden_states)
268
+ hidden_states = self.lin2(hidden_states)
269
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
270
+ return hidden_states
271
+
272
+
273
+ class FlaxTransformerBlock(nn.Module):
274
+ config: DistilBertConfig
275
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
276
+
277
+ def setup(self):
278
+ assert (
279
+ self.config.dim % self.config.n_heads == 0
280
+ ), f"Hidden size {self.config.dim} not dividable by number of heads {self.config.n_heads}"
281
+
282
+ self.attention = FlaxMultiHeadSelfAttention(self.config, dtype=self.dtype)
283
+ self.sa_layer_norm = nn.LayerNorm(epsilon=1e-12, dtype=self.dtype)
284
+
285
+ self.ffn = FlaxFFN(self.config, dtype=self.dtype)
286
+ self.output_layer_norm = nn.LayerNorm(epsilon=1e-12, dtype=self.dtype)
287
+
288
+ def __call__(
289
+ self,
290
+ hidden_states,
291
+ attn_mask,
292
+ output_attentions: bool = False,
293
+ deterministic: bool = True,
294
+ ):
295
+ # Self-Attention
296
+ sa_output = self.attention(
297
+ query=hidden_states,
298
+ key=hidden_states,
299
+ value=hidden_states,
300
+ mask=attn_mask,
301
+ output_attentions=output_attentions,
302
+ deterministic=deterministic,
303
+ )
304
+ if output_attentions:
305
+ sa_output, sa_weights = sa_output
306
+ else:
307
+ assert type(sa_output) == tuple
308
+ sa_output = sa_output[0]
309
+ sa_output = self.sa_layer_norm(sa_output + hidden_states)
310
+
311
+ # Feed Forward Network
312
+ ffn_output = self.ffn(sa_output, deterministic=deterministic)
313
+ ffn_output = self.output_layer_norm(ffn_output + sa_output)
314
+ output = (ffn_output,)
315
+ if output_attentions:
316
+ output = (sa_weights,) + output
317
+ return output
318
+
319
+
320
+ class FlaxTransformer(nn.Module):
321
+ config: DistilBertConfig
322
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
323
+
324
+ def setup(self):
325
+ self.layers = [
326
+ FlaxTransformerBlock(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.n_layers)
327
+ ]
328
+
329
+ def __call__(
330
+ self,
331
+ hidden_states,
332
+ attention_mask,
333
+ output_attentions: bool = False,
334
+ output_hidden_states: bool = False,
335
+ deterministic: bool = True,
336
+ return_dict: bool = False,
337
+ ):
338
+ all_hidden_states = () if output_hidden_states else None
339
+ all_attentions = () if output_attentions else None
340
+
341
+ for layer_module in self.layers:
342
+ if output_hidden_states:
343
+ all_hidden_states = all_hidden_states + (hidden_states,)
344
+
345
+ layer_outputs = layer_module(
346
+ hidden_states=hidden_states,
347
+ attn_mask=attention_mask,
348
+ output_attentions=output_attentions,
349
+ deterministic=deterministic,
350
+ )
351
+ hidden_states = layer_outputs[-1]
352
+
353
+ if output_attentions:
354
+ assert len(layer_outputs) == 2
355
+ attentions = layer_outputs[0]
356
+ all_attentions = all_attentions + (attentions,)
357
+ else:
358
+ assert len(layer_outputs) == 1
359
+
360
+ # Add last layer
361
+ if output_hidden_states:
362
+ all_hidden_states = all_hidden_states + (hidden_states,)
363
+
364
+ if not return_dict:
365
+ return tuple(v for v in [hidden_states, all_attentions, all_hidden_states] if v is not None)
366
+ return FlaxBaseModelOutput(
367
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
368
+ )
369
+
370
+
371
+ class FlaxTransformerEncoder(nn.Module):
372
+ config: DistilBertConfig
373
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
374
+
375
+ def setup(self):
376
+ self.layer = FlaxTransformer(self.config, dtype=self.dtype)
377
+
378
+ def __call__(
379
+ self,
380
+ hidden_states,
381
+ attention_mask,
382
+ output_attentions: bool = False,
383
+ output_hidden_states: bool = False,
384
+ deterministic: bool = True,
385
+ return_dict: bool = False,
386
+ ):
387
+ return self.layer(
388
+ hidden_states=hidden_states,
389
+ attention_mask=attention_mask,
390
+ output_attentions=output_attentions,
391
+ output_hidden_states=output_hidden_states,
392
+ deterministic=deterministic,
393
+ return_dict=return_dict,
394
+ )
395
+
396
+
397
+ class FlaxDistilBertLMDecoder(nn.Module):
398
+ config: DistilBertConfig
399
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
400
+ bias_init: Callable[..., np.ndarray] = jax.nn.initializers.zeros
401
+
402
+ def setup(self):
403
+ self.bias = self.param("bias", self.bias_init, (self.config.vocab_size,))
404
+
405
+ def __call__(self, inputs, kernel):
406
+ inputs = jnp.asarray(inputs, self.dtype)
407
+ kernel = jnp.asarray(kernel, self.dtype)
408
+ y = lax.dot_general(inputs, kernel, (((inputs.ndim - 1,), (0,)), ((), ())))
409
+ bias = jnp.asarray(self.bias, self.dtype)
410
+ y = y + bias
411
+ return y
412
+
413
+
414
+ class FlaxDistilBertPreTrainedModel(FlaxPreTrainedModel):
415
+ """
416
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
417
+ models.
418
+ """
419
+
420
+ config_class = DistilBertConfig
421
+ base_model_prefix = "distilbert"
422
+ module_class: nn.Module = None
423
+
424
+ def __init__(
425
+ self,
426
+ config: DistilBertConfig,
427
+ input_shape: Tuple = (1, 1),
428
+ seed: int = 0,
429
+ dtype: jnp.dtype = jnp.float32,
430
+ _do_init: bool = True,
431
+ **kwargs,
432
+ ):
433
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
434
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
435
+
436
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
437
+ # init input tensors
438
+ input_ids = jnp.zeros(input_shape, dtype="i4")
439
+ attention_mask = jnp.ones_like(input_ids)
440
+
441
+ params_rng, dropout_rng = jax.random.split(rng)
442
+ rngs = {"params": params_rng, "dropout": dropout_rng}
443
+
444
+ random_params = self.module.init(rngs, input_ids, attention_mask, return_dict=False)["params"]
445
+
446
+ if params is not None:
447
+ random_params = flatten_dict(unfreeze(random_params))
448
+ params = flatten_dict(unfreeze(params))
449
+ for missing_key in self._missing_keys:
450
+ params[missing_key] = random_params[missing_key]
451
+ self._missing_keys = set()
452
+ return freeze(unflatten_dict(params))
453
+ else:
454
+ return random_params
455
+
456
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
457
+ def __call__(
458
+ self,
459
+ input_ids,
460
+ attention_mask=None,
461
+ head_mask=None,
462
+ params: dict = None,
463
+ dropout_rng: jax.random.PRNGKey = None,
464
+ train: bool = False,
465
+ output_attentions: Optional[bool] = None,
466
+ output_hidden_states: Optional[bool] = None,
467
+ return_dict: Optional[bool] = None,
468
+ ):
469
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
470
+ output_hidden_states = (
471
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
472
+ )
473
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
474
+
475
+ if attention_mask is None:
476
+ attention_mask = jnp.ones_like(input_ids)
477
+
478
+ # Handle any PRNG if needed
479
+ rngs = {}
480
+ if dropout_rng is not None:
481
+ rngs["dropout"] = dropout_rng
482
+
483
+ return self.module.apply(
484
+ {"params": params or self.params},
485
+ jnp.array(input_ids, dtype="i4"),
486
+ jnp.array(attention_mask, dtype="i4"),
487
+ not train,
488
+ output_attentions,
489
+ output_hidden_states,
490
+ return_dict,
491
+ rngs=rngs,
492
+ )
493
+
494
+
495
+ class FlaxDistilBertModule(nn.Module):
496
+ config: DistilBertConfig
497
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
498
+
499
+ def setup(self):
500
+ self.embeddings = FlaxEmbeddings(self.config, dtype=self.dtype)
501
+ self.transformer = FlaxTransformerEncoder(self.config, dtype=self.dtype)
502
+
503
+ def __call__(
504
+ self,
505
+ input_ids,
506
+ attention_mask,
507
+ deterministic: bool = True,
508
+ output_attentions: bool = False,
509
+ output_hidden_states: bool = False,
510
+ return_dict: bool = True,
511
+ ):
512
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
513
+ output_hidden_states = (
514
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
515
+ )
516
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
517
+
518
+ input_embeds = self.embeddings(input_ids, deterministic=deterministic)
519
+ return self.transformer(
520
+ hidden_states=input_embeds,
521
+ attention_mask=attention_mask,
522
+ deterministic=deterministic,
523
+ output_attentions=output_attentions,
524
+ output_hidden_states=output_hidden_states,
525
+ return_dict=return_dict,
526
+ )
527
+
528
+
529
+ @add_start_docstrings(
530
+ "The bare DistilBert Model transformer outputting raw hidden-states without any specific head on top.",
531
+ FLAX_DISTILBERT_START_DOCSTRING,
532
+ )
533
+ class FlaxDistilBertModel(FlaxDistilBertPreTrainedModel):
534
+ module_class = FlaxDistilBertModule
535
+
536
+
537
+ append_call_sample_docstring(FlaxDistilBertModel, _CHECKPOINT_FOR_DOC, None, _CONFIG_FOR_DOC)
538
+
539
+
540
+ class FlaxDistilBertForMaskedLMModule(nn.Module):
541
+ config: DistilBertConfig
542
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
543
+
544
+ def setup(self):
545
+ self.distilbert = FlaxDistilBertModule(self.config, dtype=self.dtype)
546
+ self.vocab_transform = nn.Dense(
547
+ self.config.dim,
548
+ dtype=self.dtype,
549
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
550
+ )
551
+ self.vocab_layer_norm = nn.LayerNorm(epsilon=1e-12, dtype=self.dtype)
552
+ if self.config.tie_word_embeddings:
553
+ self.vocab_projector = FlaxDistilBertLMDecoder(
554
+ self.config,
555
+ dtype=self.dtype,
556
+ )
557
+ else:
558
+ self.vocab_projector = nn.Dense(
559
+ self.config.vocab_size,
560
+ dtype=self.dtype,
561
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
562
+ )
563
+
564
+ def __call__(
565
+ self,
566
+ input_ids,
567
+ attention_mask,
568
+ deterministic: bool = True,
569
+ output_attentions: bool = False,
570
+ output_hidden_states: bool = False,
571
+ return_dict: bool = True,
572
+ ):
573
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
574
+
575
+ dlbrt_output = self.distilbert(
576
+ input_ids=input_ids,
577
+ attention_mask=attention_mask,
578
+ output_attentions=output_attentions,
579
+ output_hidden_states=output_hidden_states,
580
+ deterministic=deterministic,
581
+ return_dict=return_dict,
582
+ )
583
+ hidden_states = dlbrt_output[0]
584
+ prediction_logits = self.vocab_transform(hidden_states)
585
+ prediction_logits = ACT2FN[self.config.activation](prediction_logits)
586
+ prediction_logits = self.vocab_layer_norm(prediction_logits)
587
+
588
+ if self.config.tie_word_embeddings:
589
+ shared_embedding = self.distilbert.variables["params"]["embeddings"]["word_embeddings"]["embedding"]
590
+ prediction_logits = self.vocab_projector(prediction_logits, shared_embedding.T)
591
+ else:
592
+ prediction_logits = self.vocab_projector(prediction_logits)
593
+
594
+ if not return_dict:
595
+ output = (prediction_logits,) + dlbrt_output[1:]
596
+ return output
597
+
598
+ return FlaxMaskedLMOutput(
599
+ logits=prediction_logits,
600
+ hidden_states=dlbrt_output.hidden_states,
601
+ attentions=dlbrt_output.attentions,
602
+ )
603
+
604
+
605
+ @add_start_docstrings("""DistilBert Model with a `language modeling` head on top.""", FLAX_DISTILBERT_START_DOCSTRING)
606
+ class FlaxDistilBertForMaskedLM(FlaxDistilBertPreTrainedModel):
607
+ module_class = FlaxDistilBertForMaskedLMModule
608
+
609
+
610
+ append_call_sample_docstring(FlaxDistilBertForMaskedLM, _CHECKPOINT_FOR_DOC, FlaxMaskedLMOutput, _CONFIG_FOR_DOC)
611
+
612
+
613
+ class FlaxDistilBertForSequenceClassificationModule(nn.Module):
614
+ config: DistilBertConfig
615
+ dtype: jnp.dtype = jnp.float32
616
+
617
+ def setup(self):
618
+ self.distilbert = FlaxDistilBertModule(config=self.config, dtype=self.dtype)
619
+ self.pre_classifier = nn.Dense(
620
+ self.config.dim,
621
+ dtype=self.dtype,
622
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
623
+ )
624
+ self.dropout = nn.Dropout(rate=self.config.seq_classif_dropout)
625
+ self.classifier = nn.Dense(
626
+ self.config.num_labels,
627
+ dtype=self.dtype,
628
+ )
629
+
630
+ def __call__(
631
+ self,
632
+ input_ids,
633
+ attention_mask,
634
+ deterministic: bool = True,
635
+ output_attentions: bool = False,
636
+ output_hidden_states: bool = False,
637
+ return_dict: bool = True,
638
+ ):
639
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
640
+ # Model
641
+ distilbert_output = self.distilbert(
642
+ input_ids,
643
+ attention_mask,
644
+ deterministic=deterministic,
645
+ output_attentions=output_attentions,
646
+ output_hidden_states=output_hidden_states,
647
+ return_dict=return_dict,
648
+ )
649
+ hidden_state = distilbert_output[0] # (bs, seq_len, dim)
650
+ pooled_output = hidden_state[:, 0] # (bs, dim)
651
+ pooled_output = self.pre_classifier(pooled_output) # (bs, dim)
652
+ pooled_output = ACT2FN["relu"](pooled_output)
653
+ pooled_output = self.dropout(pooled_output, deterministic=deterministic)
654
+ logits = self.classifier(pooled_output) # (bs, dim)
655
+
656
+ if not return_dict:
657
+ return (logits,) + distilbert_output[1:]
658
+
659
+ return FlaxSequenceClassifierOutput(
660
+ logits=logits,
661
+ hidden_states=distilbert_output.hidden_states,
662
+ attentions=distilbert_output.attentions,
663
+ )
664
+
665
+
666
+ @add_start_docstrings(
667
+ """
668
+ DistilBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the
669
+ pooled output) e.g. for GLUE tasks.
670
+ """,
671
+ FLAX_DISTILBERT_START_DOCSTRING,
672
+ )
673
+ class FlaxDistilBertForSequenceClassification(FlaxDistilBertPreTrainedModel):
674
+ module_class = FlaxDistilBertForSequenceClassificationModule
675
+
676
+
677
+ append_call_sample_docstring(
678
+ FlaxDistilBertForSequenceClassification,
679
+ _CHECKPOINT_FOR_DOC,
680
+ FlaxSequenceClassifierOutput,
681
+ _CONFIG_FOR_DOC,
682
+ )
683
+
684
+
685
+ class FlaxDistilBertForMultipleChoiceModule(nn.Module):
686
+ config: DistilBertConfig
687
+ dtype: jnp.dtype = jnp.float32
688
+
689
+ def setup(self):
690
+ self.distilbert = FlaxDistilBertModule(config=self.config, dtype=self.dtype)
691
+ self.pre_classifier = nn.Dense(
692
+ self.config.dim,
693
+ dtype=self.dtype,
694
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
695
+ )
696
+ self.dropout = nn.Dropout(rate=self.config.seq_classif_dropout)
697
+ self.classifier = nn.Dense(
698
+ 1,
699
+ dtype=self.dtype,
700
+ )
701
+
702
+ def __call__(
703
+ self,
704
+ input_ids,
705
+ attention_mask,
706
+ deterministic: bool = True,
707
+ output_attentions: bool = False,
708
+ output_hidden_states: bool = False,
709
+ return_dict: bool = True,
710
+ ):
711
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
712
+ num_choices = input_ids.shape[1]
713
+ input_ids = input_ids.reshape(-1, input_ids.shape[-1]) if input_ids is not None else None
714
+ attention_mask = attention_mask.reshape(-1, attention_mask.shape[-1]) if attention_mask is not None else None
715
+
716
+ # Model
717
+ outputs = self.distilbert(
718
+ input_ids,
719
+ attention_mask,
720
+ deterministic=deterministic,
721
+ output_attentions=output_attentions,
722
+ output_hidden_states=output_hidden_states,
723
+ return_dict=return_dict,
724
+ )
725
+
726
+ hidden_state = outputs[0]
727
+ pooled_output = hidden_state[:, 0]
728
+ pooled_output = self.pre_classifier(pooled_output)
729
+ pooled_output = ACT2FN["relu"](pooled_output)
730
+ pooled_output = self.dropout(pooled_output, deterministic=deterministic)
731
+ logits = self.classifier(pooled_output)
732
+
733
+ reshaped_logits = logits.reshape(-1, num_choices)
734
+
735
+ if not return_dict:
736
+ return (reshaped_logits,) + outputs[2:]
737
+
738
+ return FlaxMultipleChoiceModelOutput(
739
+ logits=reshaped_logits,
740
+ hidden_states=outputs.hidden_states,
741
+ attentions=outputs.attentions,
742
+ )
743
+
744
+
745
+ @add_start_docstrings(
746
+ """
747
+ DistilBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
748
+ a softmax) e.g. for RocStories/SWAG tasks.
749
+ """,
750
+ FLAX_DISTILBERT_START_DOCSTRING,
751
+ )
752
+ class FlaxDistilBertForMultipleChoice(FlaxDistilBertPreTrainedModel):
753
+ module_class = FlaxDistilBertForMultipleChoiceModule
754
+
755
+
756
+ overwrite_call_docstring(
757
+ FlaxDistilBertForMultipleChoice, DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
758
+ )
759
+ append_call_sample_docstring(
760
+ FlaxDistilBertForMultipleChoice,
761
+ _CHECKPOINT_FOR_DOC,
762
+ FlaxMultipleChoiceModelOutput,
763
+ _CONFIG_FOR_DOC,
764
+ )
765
+
766
+
767
+ class FlaxDistilBertForTokenClassificationModule(nn.Module):
768
+ config: DistilBertConfig
769
+ dtype: jnp.dtype = jnp.float32
770
+
771
+ def setup(self):
772
+ self.distilbert = FlaxDistilBertModule(config=self.config, dtype=self.dtype)
773
+ self.dropout = nn.Dropout(rate=self.config.dropout)
774
+ self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype)
775
+
776
+ def __call__(
777
+ self,
778
+ input_ids,
779
+ attention_mask,
780
+ deterministic: bool = True,
781
+ output_attentions: bool = False,
782
+ output_hidden_states: bool = False,
783
+ return_dict: bool = True,
784
+ ):
785
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
786
+ # Model
787
+ outputs = self.distilbert(
788
+ input_ids,
789
+ attention_mask,
790
+ deterministic=deterministic,
791
+ output_attentions=output_attentions,
792
+ output_hidden_states=output_hidden_states,
793
+ return_dict=return_dict,
794
+ )
795
+
796
+ hidden_states = outputs[0]
797
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
798
+ logits = self.classifier(hidden_states)
799
+
800
+ if not return_dict:
801
+ return (logits,) + outputs[1:]
802
+
803
+ return FlaxTokenClassifierOutput(
804
+ logits=logits,
805
+ hidden_states=outputs.hidden_states,
806
+ attentions=outputs.attentions,
807
+ )
808
+
809
+
810
+ @add_start_docstrings(
811
+ """
812
+ DistilBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
813
+ for Named-Entity-Recognition (NER) tasks.
814
+ """,
815
+ FLAX_DISTILBERT_START_DOCSTRING,
816
+ )
817
+ class FlaxDistilBertForTokenClassification(FlaxDistilBertPreTrainedModel):
818
+ module_class = FlaxDistilBertForTokenClassificationModule
819
+
820
+
821
+ append_call_sample_docstring(
822
+ FlaxDistilBertForTokenClassification,
823
+ _CHECKPOINT_FOR_DOC,
824
+ FlaxTokenClassifierOutput,
825
+ _CONFIG_FOR_DOC,
826
+ )
827
+
828
+
829
+ class FlaxDistilBertForQuestionAnsweringModule(nn.Module):
830
+ config: DistilBertConfig
831
+ dtype: jnp.dtype = jnp.float32
832
+
833
+ def setup(self):
834
+ self.distilbert = FlaxDistilBertModule(config=self.config, dtype=self.dtype)
835
+ self.qa_outputs = nn.Dense(self.config.num_labels, dtype=self.dtype)
836
+ assert self.config.num_labels == 2
837
+ self.dropout = nn.Dropout(rate=self.config.qa_dropout)
838
+
839
+ def __call__(
840
+ self,
841
+ input_ids,
842
+ attention_mask,
843
+ deterministic: bool = True,
844
+ output_attentions: bool = False,
845
+ output_hidden_states: bool = False,
846
+ return_dict: bool = True,
847
+ ):
848
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
849
+
850
+ # Model
851
+ distilbert_output = self.distilbert(
852
+ input_ids,
853
+ attention_mask,
854
+ deterministic=deterministic,
855
+ output_attentions=output_attentions,
856
+ output_hidden_states=output_hidden_states,
857
+ return_dict=return_dict,
858
+ )
859
+
860
+ hidden_states = distilbert_output[0]
861
+
862
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
863
+ logits = self.qa_outputs(hidden_states)
864
+ start_logits, end_logits = logits.split(self.config.num_labels, axis=-1)
865
+ start_logits = start_logits.squeeze(-1)
866
+ end_logits = end_logits.squeeze(-1)
867
+
868
+ if not return_dict:
869
+ return (start_logits, end_logits) + distilbert_output[1:]
870
+
871
+ return FlaxQuestionAnsweringModelOutput(
872
+ start_logits=start_logits,
873
+ end_logits=end_logits,
874
+ hidden_states=distilbert_output.hidden_states,
875
+ attentions=distilbert_output.attentions,
876
+ )
877
+
878
+
879
+ @add_start_docstrings(
880
+ """
881
+ DistilBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
882
+ linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
883
+ """,
884
+ FLAX_DISTILBERT_START_DOCSTRING,
885
+ )
886
+ class FlaxDistilBertForQuestionAnswering(FlaxDistilBertPreTrainedModel):
887
+ module_class = FlaxDistilBertForQuestionAnsweringModule
888
+
889
+
890
+ append_call_sample_docstring(
891
+ FlaxDistilBertForQuestionAnswering,
892
+ _CHECKPOINT_FOR_DOC,
893
+ FlaxQuestionAnsweringModelOutput,
894
+ _CONFIG_FOR_DOC,
895
+ )
llava_next/lib/python3.10/site-packages/transformers/models/distilbert/modeling_tf_distilbert.py ADDED
@@ -0,0 +1,1145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ TF 2.0 DistilBERT model
17
+ """
18
+
19
+
20
+ from __future__ import annotations
21
+
22
+ import warnings
23
+ from typing import Optional, Tuple, Union
24
+
25
+ import numpy as np
26
+ import tensorflow as tf
27
+
28
+ from ...activations_tf import get_tf_activation
29
+ from ...modeling_tf_outputs import (
30
+ TFBaseModelOutput,
31
+ TFMaskedLMOutput,
32
+ TFMultipleChoiceModelOutput,
33
+ TFQuestionAnsweringModelOutput,
34
+ TFSequenceClassifierOutput,
35
+ TFTokenClassifierOutput,
36
+ )
37
+ from ...modeling_tf_utils import (
38
+ TFMaskedLanguageModelingLoss,
39
+ TFModelInputType,
40
+ TFMultipleChoiceLoss,
41
+ TFPreTrainedModel,
42
+ TFQuestionAnsweringLoss,
43
+ TFSequenceClassificationLoss,
44
+ TFTokenClassificationLoss,
45
+ get_initializer,
46
+ keras_serializable,
47
+ unpack_inputs,
48
+ )
49
+ from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
50
+ from ...utils import (
51
+ add_code_sample_docstrings,
52
+ add_start_docstrings,
53
+ add_start_docstrings_to_model_forward,
54
+ logging,
55
+ )
56
+ from .configuration_distilbert import DistilBertConfig
57
+
58
+
59
+ logger = logging.get_logger(__name__)
60
+
61
+ _CHECKPOINT_FOR_DOC = "distilbert-base-uncased"
62
+ _CONFIG_FOR_DOC = "DistilBertConfig"
63
+
64
+ TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
65
+ "distilbert-base-uncased",
66
+ "distilbert-base-uncased-distilled-squad",
67
+ "distilbert-base-cased",
68
+ "distilbert-base-cased-distilled-squad",
69
+ "distilbert-base-multilingual-cased",
70
+ "distilbert-base-uncased-finetuned-sst-2-english",
71
+ # See all DistilBERT models at https://huggingface.co/models?filter=distilbert
72
+ ]
73
+
74
+
75
+ class TFEmbeddings(tf.keras.layers.Layer):
76
+ """Construct the embeddings from word, position and token_type embeddings."""
77
+
78
+ def __init__(self, config, **kwargs):
79
+ super().__init__(**kwargs)
80
+ self.config = config
81
+ self.dim = config.dim
82
+ self.initializer_range = config.initializer_range
83
+ self.max_position_embeddings = config.max_position_embeddings
84
+ self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=1e-12, name="LayerNorm")
85
+ self.dropout = tf.keras.layers.Dropout(rate=config.dropout)
86
+
87
+ def build(self, input_shape=None):
88
+ with tf.name_scope("word_embeddings"):
89
+ self.weight = self.add_weight(
90
+ name="weight",
91
+ shape=[self.config.vocab_size, self.dim],
92
+ initializer=get_initializer(initializer_range=self.initializer_range),
93
+ )
94
+
95
+ with tf.name_scope("position_embeddings"):
96
+ self.position_embeddings = self.add_weight(
97
+ name="embeddings",
98
+ shape=[self.max_position_embeddings, self.dim],
99
+ initializer=get_initializer(initializer_range=self.initializer_range),
100
+ )
101
+
102
+ if self.built:
103
+ return
104
+ self.built = True
105
+ if getattr(self, "LayerNorm", None) is not None:
106
+ with tf.name_scope(self.LayerNorm.name):
107
+ self.LayerNorm.build([None, None, self.config.dim])
108
+
109
+ def call(self, input_ids=None, position_ids=None, inputs_embeds=None, training=False):
110
+ """
111
+ Applies embedding based on inputs tensor.
112
+
113
+ Returns:
114
+ final_embeddings (`tf.Tensor`): output embedding tensor.
115
+ """
116
+ assert not (input_ids is None and inputs_embeds is None)
117
+
118
+ if input_ids is not None:
119
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
120
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
121
+
122
+ input_shape = shape_list(inputs_embeds)[:-1]
123
+
124
+ if position_ids is None:
125
+ position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)
126
+
127
+ position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
128
+ final_embeddings = inputs_embeds + position_embeds
129
+ final_embeddings = self.LayerNorm(inputs=final_embeddings)
130
+ final_embeddings = self.dropout(inputs=final_embeddings, training=training)
131
+
132
+ return final_embeddings
133
+
134
+
135
+ class TFMultiHeadSelfAttention(tf.keras.layers.Layer):
136
+ def __init__(self, config, **kwargs):
137
+ super().__init__(**kwargs)
138
+
139
+ self.n_heads = config.n_heads
140
+ self.dim = config.dim
141
+ self.dropout = tf.keras.layers.Dropout(config.attention_dropout)
142
+ self.output_attentions = config.output_attentions
143
+
144
+ assert self.dim % self.n_heads == 0, f"Hidden size {self.dim} not dividable by number of heads {self.n_heads}"
145
+
146
+ self.q_lin = tf.keras.layers.Dense(
147
+ config.dim, kernel_initializer=get_initializer(config.initializer_range), name="q_lin"
148
+ )
149
+ self.k_lin = tf.keras.layers.Dense(
150
+ config.dim, kernel_initializer=get_initializer(config.initializer_range), name="k_lin"
151
+ )
152
+ self.v_lin = tf.keras.layers.Dense(
153
+ config.dim, kernel_initializer=get_initializer(config.initializer_range), name="v_lin"
154
+ )
155
+ self.out_lin = tf.keras.layers.Dense(
156
+ config.dim, kernel_initializer=get_initializer(config.initializer_range), name="out_lin"
157
+ )
158
+
159
+ self.pruned_heads = set()
160
+ self.config = config
161
+
162
+ def prune_heads(self, heads):
163
+ raise NotImplementedError
164
+
165
+ def call(self, query, key, value, mask, head_mask, output_attentions, training=False):
166
+ """
167
+ Parameters:
168
+ query: tf.Tensor(bs, seq_length, dim)
169
+ key: tf.Tensor(bs, seq_length, dim)
170
+ value: tf.Tensor(bs, seq_length, dim)
171
+ mask: tf.Tensor(bs, seq_length)
172
+
173
+ Returns:
174
+ weights: tf.Tensor(bs, n_heads, seq_length, seq_length) Attention weights context: tf.Tensor(bs,
175
+ seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True`
176
+ """
177
+ bs, q_length, dim = shape_list(query)
178
+ k_length = shape_list(key)[1]
179
+ # assert dim == self.dim, f'Dimensions do not match: {dim} input vs {self.dim} configured'
180
+ # assert key.size() == value.size()
181
+ dim_per_head = int(self.dim / self.n_heads)
182
+ dim_per_head = tf.cast(dim_per_head, dtype=tf.int32)
183
+ mask_reshape = [bs, 1, 1, k_length]
184
+
185
+ def shape(x):
186
+ """separate heads"""
187
+ return tf.transpose(tf.reshape(x, (bs, -1, self.n_heads, dim_per_head)), perm=(0, 2, 1, 3))
188
+
189
+ def unshape(x):
190
+ """group heads"""
191
+ return tf.reshape(tf.transpose(x, perm=(0, 2, 1, 3)), (bs, -1, self.n_heads * dim_per_head))
192
+
193
+ q = shape(self.q_lin(query)) # (bs, n_heads, q_length, dim_per_head)
194
+ k = shape(self.k_lin(key)) # (bs, n_heads, k_length, dim_per_head)
195
+ v = shape(self.v_lin(value)) # (bs, n_heads, k_length, dim_per_head)
196
+ q = tf.cast(q, dtype=tf.float32)
197
+ q = tf.multiply(q, tf.math.rsqrt(tf.cast(dim_per_head, dtype=tf.float32)))
198
+ k = tf.cast(k, dtype=q.dtype)
199
+ scores = tf.matmul(q, k, transpose_b=True) # (bs, n_heads, q_length, k_length)
200
+ mask = tf.reshape(mask, mask_reshape) # (bs, n_heads, qlen, klen)
201
+ # scores.masked_fill_(mask, -float('inf')) # (bs, n_heads, q_length, k_length)
202
+
203
+ mask = tf.cast(mask, dtype=scores.dtype)
204
+ scores = scores - 1e30 * (1.0 - mask)
205
+ weights = stable_softmax(scores, axis=-1) # (bs, n_heads, qlen, klen)
206
+ weights = self.dropout(weights, training=training) # (bs, n_heads, qlen, klen)
207
+
208
+ # Mask heads if we want to
209
+ if head_mask is not None:
210
+ weights = weights * head_mask
211
+
212
+ context = tf.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head)
213
+ context = unshape(context) # (bs, q_length, dim)
214
+ context = self.out_lin(context) # (bs, q_length, dim)
215
+
216
+ if output_attentions:
217
+ return (context, weights)
218
+ else:
219
+ return (context,)
220
+
221
+ def build(self, input_shape=None):
222
+ if self.built:
223
+ return
224
+ self.built = True
225
+ if getattr(self, "q_lin", None) is not None:
226
+ with tf.name_scope(self.q_lin.name):
227
+ self.q_lin.build([None, None, self.config.dim])
228
+ if getattr(self, "k_lin", None) is not None:
229
+ with tf.name_scope(self.k_lin.name):
230
+ self.k_lin.build([None, None, self.config.dim])
231
+ if getattr(self, "v_lin", None) is not None:
232
+ with tf.name_scope(self.v_lin.name):
233
+ self.v_lin.build([None, None, self.config.dim])
234
+ if getattr(self, "out_lin", None) is not None:
235
+ with tf.name_scope(self.out_lin.name):
236
+ self.out_lin.build([None, None, self.config.dim])
237
+
238
+
239
+ class TFFFN(tf.keras.layers.Layer):
240
+ def __init__(self, config, **kwargs):
241
+ super().__init__(**kwargs)
242
+ self.dropout = tf.keras.layers.Dropout(config.dropout)
243
+ self.lin1 = tf.keras.layers.Dense(
244
+ config.hidden_dim, kernel_initializer=get_initializer(config.initializer_range), name="lin1"
245
+ )
246
+ self.lin2 = tf.keras.layers.Dense(
247
+ config.dim, kernel_initializer=get_initializer(config.initializer_range), name="lin2"
248
+ )
249
+ self.activation = get_tf_activation(config.activation)
250
+ self.config = config
251
+
252
+ def call(self, input, training=False):
253
+ x = self.lin1(input)
254
+ x = self.activation(x)
255
+ x = self.lin2(x)
256
+ x = self.dropout(x, training=training)
257
+ return x
258
+
259
+ def build(self, input_shape=None):
260
+ if self.built:
261
+ return
262
+ self.built = True
263
+ if getattr(self, "lin1", None) is not None:
264
+ with tf.name_scope(self.lin1.name):
265
+ self.lin1.build([None, None, self.config.dim])
266
+ if getattr(self, "lin2", None) is not None:
267
+ with tf.name_scope(self.lin2.name):
268
+ self.lin2.build([None, None, self.config.hidden_dim])
269
+
270
+
271
+ class TFTransformerBlock(tf.keras.layers.Layer):
272
+ def __init__(self, config, **kwargs):
273
+ super().__init__(**kwargs)
274
+
275
+ self.n_heads = config.n_heads
276
+ self.dim = config.dim
277
+ self.hidden_dim = config.hidden_dim
278
+ self.dropout = tf.keras.layers.Dropout(config.dropout)
279
+ self.activation = config.activation
280
+ self.output_attentions = config.output_attentions
281
+
282
+ assert (
283
+ config.dim % config.n_heads == 0
284
+ ), f"Hidden size {config.dim} not dividable by number of heads {config.n_heads}"
285
+
286
+ self.attention = TFMultiHeadSelfAttention(config, name="attention")
287
+ self.sa_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-12, name="sa_layer_norm")
288
+
289
+ self.ffn = TFFFN(config, name="ffn")
290
+ self.output_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-12, name="output_layer_norm")
291
+ self.config = config
292
+
293
+ def call(self, x, attn_mask, head_mask, output_attentions, training=False): # removed: src_enc=None, src_len=None
294
+ """
295
+ Parameters:
296
+ x: tf.Tensor(bs, seq_length, dim)
297
+ attn_mask: tf.Tensor(bs, seq_length)
298
+
299
+ Outputs: sa_weights: tf.Tensor(bs, n_heads, seq_length, seq_length) The attention weights ffn_output:
300
+ tf.Tensor(bs, seq_length, dim) The output of the transformer block contextualization.
301
+ """
302
+ # Self-Attention
303
+ sa_output = self.attention(x, x, x, attn_mask, head_mask, output_attentions, training=training)
304
+ if output_attentions:
305
+ sa_output, sa_weights = sa_output # (bs, seq_length, dim), (bs, n_heads, seq_length, seq_length)
306
+ else: # To handle these `output_attentions` or `output_hidden_states` cases returning tuples
307
+ # assert type(sa_output) == tuple
308
+ sa_output = sa_output[0]
309
+ sa_output = self.sa_layer_norm(sa_output + x) # (bs, seq_length, dim)
310
+
311
+ # Feed Forward Network
312
+ ffn_output = self.ffn(sa_output, training=training) # (bs, seq_length, dim)
313
+ ffn_output = self.output_layer_norm(ffn_output + sa_output) # (bs, seq_length, dim)
314
+
315
+ output = (ffn_output,)
316
+ if output_attentions:
317
+ output = (sa_weights,) + output
318
+ return output
319
+
320
+ def build(self, input_shape=None):
321
+ if self.built:
322
+ return
323
+ self.built = True
324
+ if getattr(self, "attention", None) is not None:
325
+ with tf.name_scope(self.attention.name):
326
+ self.attention.build(None)
327
+ if getattr(self, "sa_layer_norm", None) is not None:
328
+ with tf.name_scope(self.sa_layer_norm.name):
329
+ self.sa_layer_norm.build([None, None, self.config.dim])
330
+ if getattr(self, "ffn", None) is not None:
331
+ with tf.name_scope(self.ffn.name):
332
+ self.ffn.build(None)
333
+ if getattr(self, "output_layer_norm", None) is not None:
334
+ with tf.name_scope(self.output_layer_norm.name):
335
+ self.output_layer_norm.build([None, None, self.config.dim])
336
+
337
+
338
+ class TFTransformer(tf.keras.layers.Layer):
339
+ def __init__(self, config, **kwargs):
340
+ super().__init__(**kwargs)
341
+ self.n_layers = config.n_layers
342
+ self.output_hidden_states = config.output_hidden_states
343
+ self.output_attentions = config.output_attentions
344
+
345
+ self.layer = [TFTransformerBlock(config, name=f"layer_._{i}") for i in range(config.n_layers)]
346
+
347
+ def call(self, x, attn_mask, head_mask, output_attentions, output_hidden_states, return_dict, training=False):
348
+ # docstyle-ignore
349
+ """
350
+ Parameters:
351
+ x: tf.Tensor(bs, seq_length, dim) Input sequence embedded.
352
+ attn_mask: tf.Tensor(bs, seq_length) Attention mask on the sequence.
353
+
354
+ Returns:
355
+ hidden_state: tf.Tensor(bs, seq_length, dim)
356
+ Sequence of hidden states in the last (top) layer
357
+ all_hidden_states: Tuple[tf.Tensor(bs, seq_length, dim)]
358
+ Tuple of length n_layers with the hidden states from each layer.
359
+ Optional: only if output_hidden_states=True
360
+ all_attentions: Tuple[tf.Tensor(bs, n_heads, seq_length, seq_length)]
361
+ Tuple of length n_layers with the attention weights from each layer
362
+ Optional: only if output_attentions=True
363
+ """
364
+ all_hidden_states = () if output_hidden_states else None
365
+ all_attentions = () if output_attentions else None
366
+
367
+ hidden_state = x
368
+ for i, layer_module in enumerate(self.layer):
369
+ if output_hidden_states:
370
+ all_hidden_states = all_hidden_states + (hidden_state,)
371
+
372
+ layer_outputs = layer_module(hidden_state, attn_mask, head_mask[i], output_attentions, training=training)
373
+ hidden_state = layer_outputs[-1]
374
+
375
+ if output_attentions:
376
+ assert len(layer_outputs) == 2
377
+ attentions = layer_outputs[0]
378
+ all_attentions = all_attentions + (attentions,)
379
+ else:
380
+ assert len(layer_outputs) == 1, f"Incorrect number of outputs {len(layer_outputs)} instead of 1"
381
+
382
+ # Add last layer
383
+ if output_hidden_states:
384
+ all_hidden_states = all_hidden_states + (hidden_state,)
385
+
386
+ if not return_dict:
387
+ return tuple(v for v in [hidden_state, all_hidden_states, all_attentions] if v is not None)
388
+ return TFBaseModelOutput(
389
+ last_hidden_state=hidden_state, hidden_states=all_hidden_states, attentions=all_attentions
390
+ )
391
+
392
+ def build(self, input_shape=None):
393
+ if self.built:
394
+ return
395
+ self.built = True
396
+ if getattr(self, "layer", None) is not None:
397
+ for layer in self.layer:
398
+ with tf.name_scope(layer.name):
399
+ layer.build(None)
400
+
401
+
402
+ @keras_serializable
403
+ class TFDistilBertMainLayer(tf.keras.layers.Layer):
404
+ config_class = DistilBertConfig
405
+
406
+ def __init__(self, config, **kwargs):
407
+ super().__init__(**kwargs)
408
+
409
+ self.config = config
410
+ self.num_hidden_layers = config.num_hidden_layers
411
+ self.output_attentions = config.output_attentions
412
+ self.output_hidden_states = config.output_hidden_states
413
+ self.return_dict = config.use_return_dict
414
+
415
+ self.embeddings = TFEmbeddings(config, name="embeddings") # Embeddings
416
+ self.transformer = TFTransformer(config, name="transformer") # Encoder
417
+
418
+ def get_input_embeddings(self):
419
+ return self.embeddings
420
+
421
+ def set_input_embeddings(self, value):
422
+ self.embeddings.weight = value
423
+ self.embeddings.vocab_size = value.shape[0]
424
+
425
+ def _prune_heads(self, heads_to_prune):
426
+ raise NotImplementedError
427
+
428
+ @unpack_inputs
429
+ def call(
430
+ self,
431
+ input_ids=None,
432
+ attention_mask=None,
433
+ head_mask=None,
434
+ inputs_embeds=None,
435
+ output_attentions=None,
436
+ output_hidden_states=None,
437
+ return_dict=None,
438
+ training=False,
439
+ ):
440
+ if input_ids is not None and inputs_embeds is not None:
441
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
442
+ elif input_ids is not None:
443
+ input_shape = shape_list(input_ids)
444
+ elif inputs_embeds is not None:
445
+ input_shape = shape_list(inputs_embeds)[:-1]
446
+ else:
447
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
448
+
449
+ if attention_mask is None:
450
+ attention_mask = tf.ones(input_shape) # (bs, seq_length)
451
+
452
+ attention_mask = tf.cast(attention_mask, dtype=tf.float32)
453
+
454
+ # Prepare head mask if needed
455
+ # 1.0 in head_mask indicate we keep the head
456
+ # attention_probs has shape bsz x n_heads x N x N
457
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
458
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
459
+ if head_mask is not None:
460
+ raise NotImplementedError
461
+ else:
462
+ head_mask = [None] * self.num_hidden_layers
463
+
464
+ embedding_output = self.embeddings(input_ids, inputs_embeds=inputs_embeds) # (bs, seq_length, dim)
465
+ tfmr_output = self.transformer(
466
+ embedding_output,
467
+ attention_mask,
468
+ head_mask,
469
+ output_attentions,
470
+ output_hidden_states,
471
+ return_dict,
472
+ training=training,
473
+ )
474
+
475
+ return tfmr_output # last-layer hidden-state, (all hidden_states), (all attentions)
476
+
477
+ def build(self, input_shape=None):
478
+ if self.built:
479
+ return
480
+ self.built = True
481
+ if getattr(self, "embeddings", None) is not None:
482
+ with tf.name_scope(self.embeddings.name):
483
+ self.embeddings.build(None)
484
+ if getattr(self, "transformer", None) is not None:
485
+ with tf.name_scope(self.transformer.name):
486
+ self.transformer.build(None)
487
+
488
+
489
+ # INTERFACE FOR ENCODER AND TASK SPECIFIC MODEL #
490
+ class TFDistilBertPreTrainedModel(TFPreTrainedModel):
491
+ """
492
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
493
+ models.
494
+ """
495
+
496
+ config_class = DistilBertConfig
497
+ base_model_prefix = "distilbert"
498
+
499
+
500
+ DISTILBERT_START_DOCSTRING = r"""
501
+
502
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
503
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
504
+ etc.)
505
+
506
+ This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
507
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
508
+ behavior.
509
+
510
+ <Tip>
511
+
512
+ TensorFlow models and layers in `transformers` accept two formats as input:
513
+
514
+ - having all inputs as keyword arguments (like PyTorch models), or
515
+ - having all inputs as a list, tuple or dict in the first positional argument.
516
+
517
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
518
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
519
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
520
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
521
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
522
+ positional argument:
523
+
524
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
525
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
526
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
527
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
528
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
529
+
530
+ Note that when creating models and layers with
531
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
532
+ about any of this, as you can just pass inputs like you would to any other Python function!
533
+
534
+ </Tip>
535
+
536
+ Parameters:
537
+ config ([`DistilBertConfig`]): Model configuration class with all the parameters of the model.
538
+ Initializing with a config file does not load the weights associated with the model, only the
539
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
540
+ """
541
+
542
+ DISTILBERT_INPUTS_DOCSTRING = r"""
543
+ Args:
544
+ input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`):
545
+ Indices of input sequence tokens in the vocabulary.
546
+
547
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
548
+ [`PreTrainedTokenizer.encode`] for details.
549
+
550
+ [What are input IDs?](../glossary#input-ids)
551
+ attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
552
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
553
+
554
+ - 1 for tokens that are **not masked**,
555
+ - 0 for tokens that are **masked**.
556
+
557
+ [What are attention masks?](../glossary#attention-mask)
558
+ head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
559
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
560
+
561
+ - 1 indicates the head is **not masked**,
562
+ - 0 indicates the head is **masked**.
563
+
564
+ inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
565
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
566
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
567
+ model's internal embedding lookup matrix.
568
+ output_attentions (`bool`, *optional*):
569
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
570
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
571
+ config will be used instead.
572
+ output_hidden_states (`bool`, *optional*):
573
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
574
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
575
+ used instead.
576
+ return_dict (`bool`, *optional*):
577
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
578
+ eager mode, in graph mode the value will always be set to True.
579
+ training (`bool`, *optional*, defaults to `False`):
580
+ Whether or not to use the model in training mode (some modules like dropout modules have different
581
+ behaviors between training and evaluation).
582
+ """
583
+
584
+
585
+ @add_start_docstrings(
586
+ "The bare DistilBERT encoder/transformer outputting raw hidden-states without any specific head on top.",
587
+ DISTILBERT_START_DOCSTRING,
588
+ )
589
+ class TFDistilBertModel(TFDistilBertPreTrainedModel):
590
+ def __init__(self, config, *inputs, **kwargs):
591
+ super().__init__(config, *inputs, **kwargs)
592
+ self.distilbert = TFDistilBertMainLayer(config, name="distilbert") # Embeddings
593
+
594
+ @unpack_inputs
595
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
596
+ @add_code_sample_docstrings(
597
+ checkpoint=_CHECKPOINT_FOR_DOC,
598
+ output_type=TFBaseModelOutput,
599
+ config_class=_CONFIG_FOR_DOC,
600
+ )
601
+ def call(
602
+ self,
603
+ input_ids: TFModelInputType | None = None,
604
+ attention_mask: np.ndarray | tf.Tensor | None = None,
605
+ head_mask: np.ndarray | tf.Tensor | None = None,
606
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
607
+ output_attentions: Optional[bool] = None,
608
+ output_hidden_states: Optional[bool] = None,
609
+ return_dict: Optional[bool] = None,
610
+ training: Optional[bool] = False,
611
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
612
+ outputs = self.distilbert(
613
+ input_ids=input_ids,
614
+ attention_mask=attention_mask,
615
+ head_mask=head_mask,
616
+ inputs_embeds=inputs_embeds,
617
+ output_attentions=output_attentions,
618
+ output_hidden_states=output_hidden_states,
619
+ return_dict=return_dict,
620
+ training=training,
621
+ )
622
+ return outputs
623
+
624
+ def build(self, input_shape=None):
625
+ if self.built:
626
+ return
627
+ self.built = True
628
+ if getattr(self, "distilbert", None) is not None:
629
+ with tf.name_scope(self.distilbert.name):
630
+ self.distilbert.build(None)
631
+
632
+
633
+ class TFDistilBertLMHead(tf.keras.layers.Layer):
634
+ def __init__(self, config, input_embeddings, **kwargs):
635
+ super().__init__(**kwargs)
636
+
637
+ self.config = config
638
+ self.dim = config.dim
639
+
640
+ # The output weights are the same as the input embeddings, but there is
641
+ # an output-only bias for each token.
642
+ self.input_embeddings = input_embeddings
643
+
644
+ def build(self, input_shape):
645
+ self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
646
+
647
+ super().build(input_shape)
648
+
649
+ def get_output_embeddings(self):
650
+ return self.input_embeddings
651
+
652
+ def set_output_embeddings(self, value):
653
+ self.input_embeddings.weight = value
654
+ self.input_embeddings.vocab_size = shape_list(value)[0]
655
+
656
+ def get_bias(self):
657
+ return {"bias": self.bias}
658
+
659
+ def set_bias(self, value):
660
+ self.bias = value["bias"]
661
+ self.config.vocab_size = shape_list(value["bias"])[0]
662
+
663
+ def call(self, hidden_states):
664
+ seq_length = shape_list(tensor=hidden_states)[1]
665
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.dim])
666
+ hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
667
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
668
+ hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
669
+
670
+ return hidden_states
671
+
672
+
673
+ @add_start_docstrings(
674
+ """DistilBert Model with a `masked language modeling` head on top.""",
675
+ DISTILBERT_START_DOCSTRING,
676
+ )
677
+ class TFDistilBertForMaskedLM(TFDistilBertPreTrainedModel, TFMaskedLanguageModelingLoss):
678
+ def __init__(self, config, *inputs, **kwargs):
679
+ super().__init__(config, *inputs, **kwargs)
680
+ self.config = config
681
+
682
+ self.distilbert = TFDistilBertMainLayer(config, name="distilbert")
683
+ self.vocab_transform = tf.keras.layers.Dense(
684
+ config.dim, kernel_initializer=get_initializer(config.initializer_range), name="vocab_transform"
685
+ )
686
+ self.act = get_tf_activation(config.activation)
687
+ self.vocab_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-12, name="vocab_layer_norm")
688
+ self.vocab_projector = TFDistilBertLMHead(config, self.distilbert.embeddings, name="vocab_projector")
689
+
690
+ def get_lm_head(self):
691
+ return self.vocab_projector
692
+
693
+ def get_prefix_bias_name(self):
694
+ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
695
+ return self.name + "/" + self.vocab_projector.name
696
+
697
+ @unpack_inputs
698
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
699
+ @add_code_sample_docstrings(
700
+ checkpoint=_CHECKPOINT_FOR_DOC,
701
+ output_type=TFMaskedLMOutput,
702
+ config_class=_CONFIG_FOR_DOC,
703
+ )
704
+ def call(
705
+ self,
706
+ input_ids: TFModelInputType | None = None,
707
+ attention_mask: np.ndarray | tf.Tensor | None = None,
708
+ head_mask: np.ndarray | tf.Tensor | None = None,
709
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
710
+ output_attentions: Optional[bool] = None,
711
+ output_hidden_states: Optional[bool] = None,
712
+ return_dict: Optional[bool] = None,
713
+ labels: np.ndarray | tf.Tensor | None = None,
714
+ training: Optional[bool] = False,
715
+ ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
716
+ r"""
717
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
718
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
719
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
720
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
721
+ """
722
+ distilbert_output = self.distilbert(
723
+ input_ids=input_ids,
724
+ attention_mask=attention_mask,
725
+ head_mask=head_mask,
726
+ inputs_embeds=inputs_embeds,
727
+ output_attentions=output_attentions,
728
+ output_hidden_states=output_hidden_states,
729
+ return_dict=return_dict,
730
+ training=training,
731
+ )
732
+ hidden_states = distilbert_output[0] # (bs, seq_length, dim)
733
+ prediction_logits = self.vocab_transform(hidden_states) # (bs, seq_length, dim)
734
+ prediction_logits = self.act(prediction_logits) # (bs, seq_length, dim)
735
+ prediction_logits = self.vocab_layer_norm(prediction_logits) # (bs, seq_length, dim)
736
+ prediction_logits = self.vocab_projector(prediction_logits)
737
+
738
+ loss = None if labels is None else self.hf_compute_loss(labels, prediction_logits)
739
+
740
+ if not return_dict:
741
+ output = (prediction_logits,) + distilbert_output[1:]
742
+ return ((loss,) + output) if loss is not None else output
743
+
744
+ return TFMaskedLMOutput(
745
+ loss=loss,
746
+ logits=prediction_logits,
747
+ hidden_states=distilbert_output.hidden_states,
748
+ attentions=distilbert_output.attentions,
749
+ )
750
+
751
+ def build(self, input_shape=None):
752
+ if self.built:
753
+ return
754
+ self.built = True
755
+ if getattr(self, "distilbert", None) is not None:
756
+ with tf.name_scope(self.distilbert.name):
757
+ self.distilbert.build(None)
758
+ if getattr(self, "vocab_transform", None) is not None:
759
+ with tf.name_scope(self.vocab_transform.name):
760
+ self.vocab_transform.build([None, None, self.config.dim])
761
+ if getattr(self, "vocab_layer_norm", None) is not None:
762
+ with tf.name_scope(self.vocab_layer_norm.name):
763
+ self.vocab_layer_norm.build([None, None, self.config.dim])
764
+ if getattr(self, "vocab_projector", None) is not None:
765
+ with tf.name_scope(self.vocab_projector.name):
766
+ self.vocab_projector.build(None)
767
+
768
+
769
+ @add_start_docstrings(
770
+ """
771
+ DistilBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the
772
+ pooled output) e.g. for GLUE tasks.
773
+ """,
774
+ DISTILBERT_START_DOCSTRING,
775
+ )
776
+ class TFDistilBertForSequenceClassification(TFDistilBertPreTrainedModel, TFSequenceClassificationLoss):
777
+ def __init__(self, config, *inputs, **kwargs):
778
+ super().__init__(config, *inputs, **kwargs)
779
+ self.num_labels = config.num_labels
780
+
781
+ self.distilbert = TFDistilBertMainLayer(config, name="distilbert")
782
+ self.pre_classifier = tf.keras.layers.Dense(
783
+ config.dim,
784
+ kernel_initializer=get_initializer(config.initializer_range),
785
+ activation="relu",
786
+ name="pre_classifier",
787
+ )
788
+ self.classifier = tf.keras.layers.Dense(
789
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
790
+ )
791
+ self.dropout = tf.keras.layers.Dropout(config.seq_classif_dropout)
792
+ self.config = config
793
+
794
+ @unpack_inputs
795
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
796
+ @add_code_sample_docstrings(
797
+ checkpoint=_CHECKPOINT_FOR_DOC,
798
+ output_type=TFSequenceClassifierOutput,
799
+ config_class=_CONFIG_FOR_DOC,
800
+ )
801
+ def call(
802
+ self,
803
+ input_ids: TFModelInputType | None = None,
804
+ attention_mask: np.ndarray | tf.Tensor | None = None,
805
+ head_mask: np.ndarray | tf.Tensor | None = None,
806
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
807
+ output_attentions: Optional[bool] = None,
808
+ output_hidden_states: Optional[bool] = None,
809
+ return_dict: Optional[bool] = None,
810
+ labels: np.ndarray | tf.Tensor | None = None,
811
+ training: Optional[bool] = False,
812
+ ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
813
+ r"""
814
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
815
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
816
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
817
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
818
+ """
819
+ distilbert_output = self.distilbert(
820
+ input_ids=input_ids,
821
+ attention_mask=attention_mask,
822
+ head_mask=head_mask,
823
+ inputs_embeds=inputs_embeds,
824
+ output_attentions=output_attentions,
825
+ output_hidden_states=output_hidden_states,
826
+ return_dict=return_dict,
827
+ training=training,
828
+ )
829
+ hidden_state = distilbert_output[0] # (bs, seq_len, dim)
830
+ pooled_output = hidden_state[:, 0] # (bs, dim)
831
+ pooled_output = self.pre_classifier(pooled_output) # (bs, dim)
832
+ pooled_output = self.dropout(pooled_output, training=training) # (bs, dim)
833
+ logits = self.classifier(pooled_output) # (bs, dim)
834
+
835
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
836
+
837
+ if not return_dict:
838
+ output = (logits,) + distilbert_output[1:]
839
+ return ((loss,) + output) if loss is not None else output
840
+
841
+ return TFSequenceClassifierOutput(
842
+ loss=loss,
843
+ logits=logits,
844
+ hidden_states=distilbert_output.hidden_states,
845
+ attentions=distilbert_output.attentions,
846
+ )
847
+
848
+ def build(self, input_shape=None):
849
+ if self.built:
850
+ return
851
+ self.built = True
852
+ if getattr(self, "distilbert", None) is not None:
853
+ with tf.name_scope(self.distilbert.name):
854
+ self.distilbert.build(None)
855
+ if getattr(self, "pre_classifier", None) is not None:
856
+ with tf.name_scope(self.pre_classifier.name):
857
+ self.pre_classifier.build([None, None, self.config.dim])
858
+ if getattr(self, "classifier", None) is not None:
859
+ with tf.name_scope(self.classifier.name):
860
+ self.classifier.build([None, None, self.config.dim])
861
+
862
+
863
+ @add_start_docstrings(
864
+ """
865
+ DistilBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
866
+ for Named-Entity-Recognition (NER) tasks.
867
+ """,
868
+ DISTILBERT_START_DOCSTRING,
869
+ )
870
+ class TFDistilBertForTokenClassification(TFDistilBertPreTrainedModel, TFTokenClassificationLoss):
871
+ def __init__(self, config, *inputs, **kwargs):
872
+ super().__init__(config, *inputs, **kwargs)
873
+ self.num_labels = config.num_labels
874
+
875
+ self.distilbert = TFDistilBertMainLayer(config, name="distilbert")
876
+ self.dropout = tf.keras.layers.Dropout(config.dropout)
877
+ self.classifier = tf.keras.layers.Dense(
878
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
879
+ )
880
+ self.config = config
881
+
882
+ @unpack_inputs
883
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
884
+ @add_code_sample_docstrings(
885
+ checkpoint=_CHECKPOINT_FOR_DOC,
886
+ output_type=TFTokenClassifierOutput,
887
+ config_class=_CONFIG_FOR_DOC,
888
+ )
889
+ def call(
890
+ self,
891
+ input_ids: TFModelInputType | None = None,
892
+ attention_mask: np.ndarray | tf.Tensor | None = None,
893
+ head_mask: np.ndarray | tf.Tensor | None = None,
894
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
895
+ output_attentions: Optional[bool] = None,
896
+ output_hidden_states: Optional[bool] = None,
897
+ return_dict: Optional[bool] = None,
898
+ labels: np.ndarray | tf.Tensor | None = None,
899
+ training: Optional[bool] = False,
900
+ ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
901
+ r"""
902
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
903
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
904
+ """
905
+ outputs = self.distilbert(
906
+ input_ids=input_ids,
907
+ attention_mask=attention_mask,
908
+ head_mask=head_mask,
909
+ inputs_embeds=inputs_embeds,
910
+ output_attentions=output_attentions,
911
+ output_hidden_states=output_hidden_states,
912
+ return_dict=return_dict,
913
+ training=training,
914
+ )
915
+ sequence_output = outputs[0]
916
+ sequence_output = self.dropout(sequence_output, training=training)
917
+ logits = self.classifier(sequence_output)
918
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
919
+
920
+ if not return_dict:
921
+ output = (logits,) + outputs[1:]
922
+ return ((loss,) + output) if loss is not None else output
923
+
924
+ return TFTokenClassifierOutput(
925
+ loss=loss,
926
+ logits=logits,
927
+ hidden_states=outputs.hidden_states,
928
+ attentions=outputs.attentions,
929
+ )
930
+
931
+ def build(self, input_shape=None):
932
+ if self.built:
933
+ return
934
+ self.built = True
935
+ if getattr(self, "distilbert", None) is not None:
936
+ with tf.name_scope(self.distilbert.name):
937
+ self.distilbert.build(None)
938
+ if getattr(self, "classifier", None) is not None:
939
+ with tf.name_scope(self.classifier.name):
940
+ self.classifier.build([None, None, self.config.hidden_size])
941
+
942
+
943
+ @add_start_docstrings(
944
+ """
945
+ DistilBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
946
+ a softmax) e.g. for RocStories/SWAG tasks.
947
+ """,
948
+ DISTILBERT_START_DOCSTRING,
949
+ )
950
+ class TFDistilBertForMultipleChoice(TFDistilBertPreTrainedModel, TFMultipleChoiceLoss):
951
+ def __init__(self, config, *inputs, **kwargs):
952
+ super().__init__(config, *inputs, **kwargs)
953
+
954
+ self.distilbert = TFDistilBertMainLayer(config, name="distilbert")
955
+ self.dropout = tf.keras.layers.Dropout(config.seq_classif_dropout)
956
+ self.pre_classifier = tf.keras.layers.Dense(
957
+ config.dim,
958
+ kernel_initializer=get_initializer(config.initializer_range),
959
+ activation="relu",
960
+ name="pre_classifier",
961
+ )
962
+ self.classifier = tf.keras.layers.Dense(
963
+ 1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
964
+ )
965
+ self.config = config
966
+
967
+ @unpack_inputs
968
+ @add_start_docstrings_to_model_forward(
969
+ DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
970
+ )
971
+ @add_code_sample_docstrings(
972
+ checkpoint=_CHECKPOINT_FOR_DOC,
973
+ output_type=TFMultipleChoiceModelOutput,
974
+ config_class=_CONFIG_FOR_DOC,
975
+ )
976
+ def call(
977
+ self,
978
+ input_ids: TFModelInputType | None = None,
979
+ attention_mask: np.ndarray | tf.Tensor | None = None,
980
+ head_mask: np.ndarray | tf.Tensor | None = None,
981
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
982
+ output_attentions: Optional[bool] = None,
983
+ output_hidden_states: Optional[bool] = None,
984
+ return_dict: Optional[bool] = None,
985
+ labels: np.ndarray | tf.Tensor | None = None,
986
+ training: Optional[bool] = False,
987
+ ) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]:
988
+ r"""
989
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
990
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
991
+ where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)
992
+ """
993
+ if input_ids is not None:
994
+ num_choices = shape_list(input_ids)[1]
995
+ seq_length = shape_list(input_ids)[2]
996
+ else:
997
+ num_choices = shape_list(inputs_embeds)[1]
998
+ seq_length = shape_list(inputs_embeds)[2]
999
+
1000
+ flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None
1001
+ flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None
1002
+ flat_inputs_embeds = (
1003
+ tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3]))
1004
+ if inputs_embeds is not None
1005
+ else None
1006
+ )
1007
+ distilbert_output = self.distilbert(
1008
+ flat_input_ids,
1009
+ flat_attention_mask,
1010
+ head_mask,
1011
+ flat_inputs_embeds,
1012
+ output_attentions,
1013
+ output_hidden_states,
1014
+ return_dict=return_dict,
1015
+ training=training,
1016
+ )
1017
+ hidden_state = distilbert_output[0] # (bs, seq_len, dim)
1018
+ pooled_output = hidden_state[:, 0] # (bs, dim)
1019
+ pooled_output = self.pre_classifier(pooled_output) # (bs, dim)
1020
+ pooled_output = self.dropout(pooled_output, training=training) # (bs, dim)
1021
+ logits = self.classifier(pooled_output)
1022
+ reshaped_logits = tf.reshape(logits, (-1, num_choices))
1023
+
1024
+ loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits)
1025
+
1026
+ if not return_dict:
1027
+ output = (reshaped_logits,) + distilbert_output[1:]
1028
+ return ((loss,) + output) if loss is not None else output
1029
+
1030
+ return TFMultipleChoiceModelOutput(
1031
+ loss=loss,
1032
+ logits=reshaped_logits,
1033
+ hidden_states=distilbert_output.hidden_states,
1034
+ attentions=distilbert_output.attentions,
1035
+ )
1036
+
1037
+ def build(self, input_shape=None):
1038
+ if self.built:
1039
+ return
1040
+ self.built = True
1041
+ if getattr(self, "distilbert", None) is not None:
1042
+ with tf.name_scope(self.distilbert.name):
1043
+ self.distilbert.build(None)
1044
+ if getattr(self, "pre_classifier", None) is not None:
1045
+ with tf.name_scope(self.pre_classifier.name):
1046
+ self.pre_classifier.build([None, None, self.config.dim])
1047
+ if getattr(self, "classifier", None) is not None:
1048
+ with tf.name_scope(self.classifier.name):
1049
+ self.classifier.build([None, None, self.config.dim])
1050
+
1051
+
1052
+ @add_start_docstrings(
1053
+ """
1054
+ DistilBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
1055
+ linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
1056
+ """,
1057
+ DISTILBERT_START_DOCSTRING,
1058
+ )
1059
+ class TFDistilBertForQuestionAnswering(TFDistilBertPreTrainedModel, TFQuestionAnsweringLoss):
1060
+ def __init__(self, config, *inputs, **kwargs):
1061
+ super().__init__(config, *inputs, **kwargs)
1062
+
1063
+ self.distilbert = TFDistilBertMainLayer(config, name="distilbert")
1064
+ self.qa_outputs = tf.keras.layers.Dense(
1065
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
1066
+ )
1067
+ assert config.num_labels == 2, f"Incorrect number of labels {config.num_labels} instead of 2"
1068
+ self.dropout = tf.keras.layers.Dropout(config.qa_dropout)
1069
+ self.config = config
1070
+
1071
+ @unpack_inputs
1072
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1073
+ @add_code_sample_docstrings(
1074
+ checkpoint=_CHECKPOINT_FOR_DOC,
1075
+ output_type=TFQuestionAnsweringModelOutput,
1076
+ config_class=_CONFIG_FOR_DOC,
1077
+ )
1078
+ def call(
1079
+ self,
1080
+ input_ids: TFModelInputType | None = None,
1081
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1082
+ head_mask: np.ndarray | tf.Tensor | None = None,
1083
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1084
+ output_attentions: Optional[bool] = None,
1085
+ output_hidden_states: Optional[bool] = None,
1086
+ return_dict: Optional[bool] = None,
1087
+ start_positions: np.ndarray | tf.Tensor | None = None,
1088
+ end_positions: np.ndarray | tf.Tensor | None = None,
1089
+ training: Optional[bool] = False,
1090
+ ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
1091
+ r"""
1092
+ start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1093
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1094
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1095
+ are not taken into account for computing the loss.
1096
+ end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1097
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1098
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1099
+ are not taken into account for computing the loss.
1100
+ """
1101
+ distilbert_output = self.distilbert(
1102
+ input_ids=input_ids,
1103
+ attention_mask=attention_mask,
1104
+ head_mask=head_mask,
1105
+ inputs_embeds=inputs_embeds,
1106
+ output_attentions=output_attentions,
1107
+ output_hidden_states=output_hidden_states,
1108
+ return_dict=return_dict,
1109
+ training=training,
1110
+ )
1111
+ hidden_states = distilbert_output[0] # (bs, max_query_len, dim)
1112
+ hidden_states = self.dropout(hidden_states, training=training) # (bs, max_query_len, dim)
1113
+ logits = self.qa_outputs(hidden_states) # (bs, max_query_len, 2)
1114
+ start_logits, end_logits = tf.split(logits, 2, axis=-1)
1115
+ start_logits = tf.squeeze(start_logits, axis=-1)
1116
+ end_logits = tf.squeeze(end_logits, axis=-1)
1117
+
1118
+ loss = None
1119
+ if start_positions is not None and end_positions is not None:
1120
+ labels = {"start_position": start_positions}
1121
+ labels["end_position"] = end_positions
1122
+ loss = self.hf_compute_loss(labels, (start_logits, end_logits))
1123
+
1124
+ if not return_dict:
1125
+ output = (start_logits, end_logits) + distilbert_output[1:]
1126
+ return ((loss,) + output) if loss is not None else output
1127
+
1128
+ return TFQuestionAnsweringModelOutput(
1129
+ loss=loss,
1130
+ start_logits=start_logits,
1131
+ end_logits=end_logits,
1132
+ hidden_states=distilbert_output.hidden_states,
1133
+ attentions=distilbert_output.attentions,
1134
+ )
1135
+
1136
+ def build(self, input_shape=None):
1137
+ if self.built:
1138
+ return
1139
+ self.built = True
1140
+ if getattr(self, "distilbert", None) is not None:
1141
+ with tf.name_scope(self.distilbert.name):
1142
+ self.distilbert.build(None)
1143
+ if getattr(self, "qa_outputs", None) is not None:
1144
+ with tf.name_scope(self.qa_outputs.name):
1145
+ self.qa_outputs.build([None, None, self.config.dim])
llava_next/lib/python3.10/site-packages/transformers/models/distilbert/tokenization_distilbert.py ADDED
@@ -0,0 +1,553 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for DistilBERT."""
16
+
17
+ import collections
18
+ import os
19
+ import unicodedata
20
+ from typing import List, Optional, Tuple
21
+
22
+ from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
23
+ from ...utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
29
+
30
+ PRETRAINED_VOCAB_FILES_MAP = {
31
+ "vocab_file": {
32
+ "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
33
+ "distilbert-base-uncased-distilled-squad": (
34
+ "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
35
+ ),
36
+ "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
37
+ "distilbert-base-cased-distilled-squad": (
38
+ "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
39
+ ),
40
+ "distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
41
+ "distilbert-base-multilingual-cased": (
42
+ "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
43
+ ),
44
+ }
45
+ }
46
+
47
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
48
+ "distilbert-base-uncased": 512,
49
+ "distilbert-base-uncased-distilled-squad": 512,
50
+ "distilbert-base-cased": 512,
51
+ "distilbert-base-cased-distilled-squad": 512,
52
+ "distilbert-base-german-cased": 512,
53
+ "distilbert-base-multilingual-cased": 512,
54
+ }
55
+
56
+
57
+ PRETRAINED_INIT_CONFIGURATION = {
58
+ "distilbert-base-uncased": {"do_lower_case": True},
59
+ "distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
60
+ "distilbert-base-cased": {"do_lower_case": False},
61
+ "distilbert-base-cased-distilled-squad": {"do_lower_case": False},
62
+ "distilbert-base-german-cased": {"do_lower_case": False},
63
+ "distilbert-base-multilingual-cased": {"do_lower_case": False},
64
+ }
65
+
66
+
67
+ # Copied from transformers.models.bert.tokenization_bert.load_vocab
68
+ def load_vocab(vocab_file):
69
+ """Loads a vocabulary file into a dictionary."""
70
+ vocab = collections.OrderedDict()
71
+ with open(vocab_file, "r", encoding="utf-8") as reader:
72
+ tokens = reader.readlines()
73
+ for index, token in enumerate(tokens):
74
+ token = token.rstrip("\n")
75
+ vocab[token] = index
76
+ return vocab
77
+
78
+
79
+ # Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize
80
+ def whitespace_tokenize(text):
81
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
82
+ text = text.strip()
83
+ if not text:
84
+ return []
85
+ tokens = text.split()
86
+ return tokens
87
+
88
+
89
+ class DistilBertTokenizer(PreTrainedTokenizer):
90
+ r"""
91
+ Construct a DistilBERT tokenizer. Based on WordPiece.
92
+
93
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
94
+ this superclass for more information regarding those methods.
95
+
96
+ Args:
97
+ vocab_file (`str`):
98
+ File containing the vocabulary.
99
+ do_lower_case (`bool`, *optional*, defaults to `True`):
100
+ Whether or not to lowercase the input when tokenizing.
101
+ do_basic_tokenize (`bool`, *optional*, defaults to `True`):
102
+ Whether or not to do basic tokenization before WordPiece.
103
+ never_split (`Iterable`, *optional*):
104
+ Collection of tokens which will never be split during tokenization. Only has an effect when
105
+ `do_basic_tokenize=True`
106
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
107
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
108
+ token instead.
109
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
110
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
111
+ sequence classification or for a text and a question for question answering. It is also used as the last
112
+ token of a sequence built with special tokens.
113
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
114
+ The token used for padding, for example when batching sequences of different lengths.
115
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
116
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
117
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
118
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
119
+ The token used for masking values. This is the token used when training this model with masked language
120
+ modeling. This is the token which the model will try to predict.
121
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
122
+ Whether or not to tokenize Chinese characters.
123
+
124
+ This should likely be deactivated for Japanese (see this
125
+ [issue](https://github.com/huggingface/transformers/issues/328)).
126
+ strip_accents (`bool`, *optional*):
127
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
128
+ value for `lowercase` (as in the original BERT).
129
+ """
130
+
131
+ vocab_files_names = VOCAB_FILES_NAMES
132
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
133
+ pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
134
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
135
+ model_input_names = ["input_ids", "attention_mask"]
136
+
137
+ def __init__(
138
+ self,
139
+ vocab_file,
140
+ do_lower_case=True,
141
+ do_basic_tokenize=True,
142
+ never_split=None,
143
+ unk_token="[UNK]",
144
+ sep_token="[SEP]",
145
+ pad_token="[PAD]",
146
+ cls_token="[CLS]",
147
+ mask_token="[MASK]",
148
+ tokenize_chinese_chars=True,
149
+ strip_accents=None,
150
+ **kwargs,
151
+ ):
152
+ if not os.path.isfile(vocab_file):
153
+ raise ValueError(
154
+ f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
155
+ " model use `tokenizer = DistilBertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
156
+ )
157
+ self.vocab = load_vocab(vocab_file)
158
+ self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
159
+ self.do_basic_tokenize = do_basic_tokenize
160
+ if do_basic_tokenize:
161
+ self.basic_tokenizer = BasicTokenizer(
162
+ do_lower_case=do_lower_case,
163
+ never_split=never_split,
164
+ tokenize_chinese_chars=tokenize_chinese_chars,
165
+ strip_accents=strip_accents,
166
+ )
167
+ self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
168
+
169
+ super().__init__(
170
+ do_lower_case=do_lower_case,
171
+ do_basic_tokenize=do_basic_tokenize,
172
+ never_split=never_split,
173
+ unk_token=unk_token,
174
+ sep_token=sep_token,
175
+ pad_token=pad_token,
176
+ cls_token=cls_token,
177
+ mask_token=mask_token,
178
+ tokenize_chinese_chars=tokenize_chinese_chars,
179
+ strip_accents=strip_accents,
180
+ **kwargs,
181
+ )
182
+
183
+ @property
184
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.do_lower_case
185
+ def do_lower_case(self):
186
+ return self.basic_tokenizer.do_lower_case
187
+
188
+ @property
189
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.vocab_size
190
+ def vocab_size(self):
191
+ return len(self.vocab)
192
+
193
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.get_vocab
194
+ def get_vocab(self):
195
+ return dict(self.vocab, **self.added_tokens_encoder)
196
+
197
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._tokenize
198
+ def _tokenize(self, text, split_special_tokens=False):
199
+ split_tokens = []
200
+ if self.do_basic_tokenize:
201
+ for token in self.basic_tokenizer.tokenize(
202
+ text, never_split=self.all_special_tokens if not split_special_tokens else None
203
+ ):
204
+ # If the token is part of the never_split set
205
+ if token in self.basic_tokenizer.never_split:
206
+ split_tokens.append(token)
207
+ else:
208
+ split_tokens += self.wordpiece_tokenizer.tokenize(token)
209
+ else:
210
+ split_tokens = self.wordpiece_tokenizer.tokenize(text)
211
+ return split_tokens
212
+
213
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._convert_token_to_id
214
+ def _convert_token_to_id(self, token):
215
+ """Converts a token (str) in an id using the vocab."""
216
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
217
+
218
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._convert_id_to_token
219
+ def _convert_id_to_token(self, index):
220
+ """Converts an index (integer) in a token (str) using the vocab."""
221
+ return self.ids_to_tokens.get(index, self.unk_token)
222
+
223
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.convert_tokens_to_string
224
+ def convert_tokens_to_string(self, tokens):
225
+ """Converts a sequence of tokens (string) in a single string."""
226
+ out_string = " ".join(tokens).replace(" ##", "").strip()
227
+ return out_string
228
+
229
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.build_inputs_with_special_tokens
230
+ def build_inputs_with_special_tokens(
231
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
232
+ ) -> List[int]:
233
+ """
234
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
235
+ adding special tokens. A BERT sequence has the following format:
236
+
237
+ - single sequence: `[CLS] X [SEP]`
238
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
239
+
240
+ Args:
241
+ token_ids_0 (`List[int]`):
242
+ List of IDs to which the special tokens will be added.
243
+ token_ids_1 (`List[int]`, *optional*):
244
+ Optional second list of IDs for sequence pairs.
245
+
246
+ Returns:
247
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
248
+ """
249
+ if token_ids_1 is None:
250
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
251
+ cls = [self.cls_token_id]
252
+ sep = [self.sep_token_id]
253
+ return cls + token_ids_0 + sep + token_ids_1 + sep
254
+
255
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.get_special_tokens_mask
256
+ def get_special_tokens_mask(
257
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
258
+ ) -> List[int]:
259
+ """
260
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
261
+ special tokens using the tokenizer `prepare_for_model` method.
262
+
263
+ Args:
264
+ token_ids_0 (`List[int]`):
265
+ List of IDs.
266
+ token_ids_1 (`List[int]`, *optional*):
267
+ Optional second list of IDs for sequence pairs.
268
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
269
+ Whether or not the token list is already formatted with special tokens for the model.
270
+
271
+ Returns:
272
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
273
+ """
274
+
275
+ if already_has_special_tokens:
276
+ return super().get_special_tokens_mask(
277
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
278
+ )
279
+
280
+ if token_ids_1 is not None:
281
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
282
+ return [1] + ([0] * len(token_ids_0)) + [1]
283
+
284
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.create_token_type_ids_from_sequences
285
+ def create_token_type_ids_from_sequences(
286
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
287
+ ) -> List[int]:
288
+ """
289
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
290
+ pair mask has the following format:
291
+
292
+ ```
293
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
294
+ | first sequence | second sequence |
295
+ ```
296
+
297
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
298
+
299
+ Args:
300
+ token_ids_0 (`List[int]`):
301
+ List of IDs.
302
+ token_ids_1 (`List[int]`, *optional*):
303
+ Optional second list of IDs for sequence pairs.
304
+
305
+ Returns:
306
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
307
+ """
308
+ sep = [self.sep_token_id]
309
+ cls = [self.cls_token_id]
310
+ if token_ids_1 is None:
311
+ return len(cls + token_ids_0 + sep) * [0]
312
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
313
+
314
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.save_vocabulary
315
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
316
+ index = 0
317
+ if os.path.isdir(save_directory):
318
+ vocab_file = os.path.join(
319
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
320
+ )
321
+ else:
322
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
323
+ with open(vocab_file, "w", encoding="utf-8") as writer:
324
+ for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
325
+ if index != token_index:
326
+ logger.warning(
327
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
328
+ " Please check that the vocabulary is not corrupted!"
329
+ )
330
+ index = token_index
331
+ writer.write(token + "\n")
332
+ index += 1
333
+ return (vocab_file,)
334
+
335
+
336
+ # Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
337
+ class BasicTokenizer(object):
338
+ """
339
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
340
+
341
+ Args:
342
+ do_lower_case (`bool`, *optional*, defaults to `True`):
343
+ Whether or not to lowercase the input when tokenizing.
344
+ never_split (`Iterable`, *optional*):
345
+ Collection of tokens which will never be split during tokenization. Only has an effect when
346
+ `do_basic_tokenize=True`
347
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
348
+ Whether or not to tokenize Chinese characters.
349
+
350
+ This should likely be deactivated for Japanese (see this
351
+ [issue](https://github.com/huggingface/transformers/issues/328)).
352
+ strip_accents (`bool`, *optional*):
353
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
354
+ value for `lowercase` (as in the original BERT).
355
+ do_split_on_punc (`bool`, *optional*, defaults to `True`):
356
+ In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
357
+ the full context of the words, such as contractions.
358
+ """
359
+
360
+ def __init__(
361
+ self,
362
+ do_lower_case=True,
363
+ never_split=None,
364
+ tokenize_chinese_chars=True,
365
+ strip_accents=None,
366
+ do_split_on_punc=True,
367
+ ):
368
+ if never_split is None:
369
+ never_split = []
370
+ self.do_lower_case = do_lower_case
371
+ self.never_split = set(never_split)
372
+ self.tokenize_chinese_chars = tokenize_chinese_chars
373
+ self.strip_accents = strip_accents
374
+ self.do_split_on_punc = do_split_on_punc
375
+
376
+ def tokenize(self, text, never_split=None):
377
+ """
378
+ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
379
+
380
+ Args:
381
+ never_split (`List[str]`, *optional*)
382
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
383
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
384
+ """
385
+ # union() returns a new set by concatenating the two sets.
386
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
387
+ text = self._clean_text(text)
388
+
389
+ # This was added on November 1st, 2018 for the multilingual and Chinese
390
+ # models. This is also applied to the English models now, but it doesn't
391
+ # matter since the English models were not trained on any Chinese data
392
+ # and generally don't have any Chinese data in them (there are Chinese
393
+ # characters in the vocabulary because Wikipedia does have some Chinese
394
+ # words in the English Wikipedia.).
395
+ if self.tokenize_chinese_chars:
396
+ text = self._tokenize_chinese_chars(text)
397
+ # prevents treating the same character with different unicode codepoints as different characters
398
+ unicode_normalized_text = unicodedata.normalize("NFC", text)
399
+ orig_tokens = whitespace_tokenize(unicode_normalized_text)
400
+ split_tokens = []
401
+ for token in orig_tokens:
402
+ if token not in never_split:
403
+ if self.do_lower_case:
404
+ token = token.lower()
405
+ if self.strip_accents is not False:
406
+ token = self._run_strip_accents(token)
407
+ elif self.strip_accents:
408
+ token = self._run_strip_accents(token)
409
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
410
+
411
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
412
+ return output_tokens
413
+
414
+ def _run_strip_accents(self, text):
415
+ """Strips accents from a piece of text."""
416
+ text = unicodedata.normalize("NFD", text)
417
+ output = []
418
+ for char in text:
419
+ cat = unicodedata.category(char)
420
+ if cat == "Mn":
421
+ continue
422
+ output.append(char)
423
+ return "".join(output)
424
+
425
+ def _run_split_on_punc(self, text, never_split=None):
426
+ """Splits punctuation on a piece of text."""
427
+ if not self.do_split_on_punc or (never_split is not None and text in never_split):
428
+ return [text]
429
+ chars = list(text)
430
+ i = 0
431
+ start_new_word = True
432
+ output = []
433
+ while i < len(chars):
434
+ char = chars[i]
435
+ if _is_punctuation(char):
436
+ output.append([char])
437
+ start_new_word = True
438
+ else:
439
+ if start_new_word:
440
+ output.append([])
441
+ start_new_word = False
442
+ output[-1].append(char)
443
+ i += 1
444
+
445
+ return ["".join(x) for x in output]
446
+
447
+ def _tokenize_chinese_chars(self, text):
448
+ """Adds whitespace around any CJK character."""
449
+ output = []
450
+ for char in text:
451
+ cp = ord(char)
452
+ if self._is_chinese_char(cp):
453
+ output.append(" ")
454
+ output.append(char)
455
+ output.append(" ")
456
+ else:
457
+ output.append(char)
458
+ return "".join(output)
459
+
460
+ def _is_chinese_char(self, cp):
461
+ """Checks whether CP is the codepoint of a CJK character."""
462
+ # This defines a "chinese character" as anything in the CJK Unicode block:
463
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
464
+ #
465
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
466
+ # despite its name. The modern Korean Hangul alphabet is a different block,
467
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
468
+ # space-separated words, so they are not treated specially and handled
469
+ # like the all of the other languages.
470
+ if (
471
+ (cp >= 0x4E00 and cp <= 0x9FFF)
472
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
473
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
474
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
475
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
476
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
477
+ or (cp >= 0xF900 and cp <= 0xFAFF)
478
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
479
+ ): #
480
+ return True
481
+
482
+ return False
483
+
484
+ def _clean_text(self, text):
485
+ """Performs invalid character removal and whitespace cleanup on text."""
486
+ output = []
487
+ for char in text:
488
+ cp = ord(char)
489
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
490
+ continue
491
+ if _is_whitespace(char):
492
+ output.append(" ")
493
+ else:
494
+ output.append(char)
495
+ return "".join(output)
496
+
497
+
498
+ # Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
499
+ class WordpieceTokenizer(object):
500
+ """Runs WordPiece tokenization."""
501
+
502
+ def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
503
+ self.vocab = vocab
504
+ self.unk_token = unk_token
505
+ self.max_input_chars_per_word = max_input_chars_per_word
506
+
507
+ def tokenize(self, text):
508
+ """
509
+ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
510
+ tokenization using the given vocabulary.
511
+
512
+ For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
513
+
514
+ Args:
515
+ text: A single token or whitespace separated tokens. This should have
516
+ already been passed through *BasicTokenizer*.
517
+
518
+ Returns:
519
+ A list of wordpiece tokens.
520
+ """
521
+
522
+ output_tokens = []
523
+ for token in whitespace_tokenize(text):
524
+ chars = list(token)
525
+ if len(chars) > self.max_input_chars_per_word:
526
+ output_tokens.append(self.unk_token)
527
+ continue
528
+
529
+ is_bad = False
530
+ start = 0
531
+ sub_tokens = []
532
+ while start < len(chars):
533
+ end = len(chars)
534
+ cur_substr = None
535
+ while start < end:
536
+ substr = "".join(chars[start:end])
537
+ if start > 0:
538
+ substr = "##" + substr
539
+ if substr in self.vocab:
540
+ cur_substr = substr
541
+ break
542
+ end -= 1
543
+ if cur_substr is None:
544
+ is_bad = True
545
+ break
546
+ sub_tokens.append(cur_substr)
547
+ start = end
548
+
549
+ if is_bad:
550
+ output_tokens.append(self.unk_token)
551
+ else:
552
+ output_tokens.extend(sub_tokens)
553
+ return output_tokens
llava_next/lib/python3.10/site-packages/transformers/models/distilbert/tokenization_distilbert_fast.py ADDED
@@ -0,0 +1,231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for DistilBERT."""
16
+
17
+ import json
18
+ from typing import List, Optional, Tuple
19
+
20
+ from tokenizers import normalizers
21
+
22
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
23
+ from ...utils import logging
24
+ from .tokenization_distilbert import DistilBertTokenizer
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
30
+
31
+ PRETRAINED_VOCAB_FILES_MAP = {
32
+ "vocab_file": {
33
+ "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
34
+ "distilbert-base-uncased-distilled-squad": (
35
+ "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
36
+ ),
37
+ "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
38
+ "distilbert-base-cased-distilled-squad": (
39
+ "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
40
+ ),
41
+ "distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
42
+ "distilbert-base-multilingual-cased": (
43
+ "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
44
+ ),
45
+ },
46
+ "tokenizer_file": {
47
+ "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
48
+ "distilbert-base-uncased-distilled-squad": (
49
+ "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
50
+ ),
51
+ "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
52
+ "distilbert-base-cased-distilled-squad": (
53
+ "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
54
+ ),
55
+ "distilbert-base-german-cased": (
56
+ "https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
57
+ ),
58
+ "distilbert-base-multilingual-cased": (
59
+ "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
60
+ ),
61
+ },
62
+ }
63
+
64
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
65
+ "distilbert-base-uncased": 512,
66
+ "distilbert-base-uncased-distilled-squad": 512,
67
+ "distilbert-base-cased": 512,
68
+ "distilbert-base-cased-distilled-squad": 512,
69
+ "distilbert-base-german-cased": 512,
70
+ "distilbert-base-multilingual-cased": 512,
71
+ }
72
+
73
+
74
+ PRETRAINED_INIT_CONFIGURATION = {
75
+ "distilbert-base-uncased": {"do_lower_case": True},
76
+ "distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
77
+ "distilbert-base-cased": {"do_lower_case": False},
78
+ "distilbert-base-cased-distilled-squad": {"do_lower_case": False},
79
+ "distilbert-base-german-cased": {"do_lower_case": False},
80
+ "distilbert-base-multilingual-cased": {"do_lower_case": False},
81
+ }
82
+
83
+
84
+ class DistilBertTokenizerFast(PreTrainedTokenizerFast):
85
+ r"""
86
+ Construct a "fast" DistilBERT tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.
87
+
88
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
89
+ refer to this superclass for more information regarding those methods.
90
+
91
+ Args:
92
+ vocab_file (`str`):
93
+ File containing the vocabulary.
94
+ do_lower_case (`bool`, *optional*, defaults to `True`):
95
+ Whether or not to lowercase the input when tokenizing.
96
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
97
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
98
+ token instead.
99
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
100
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
101
+ sequence classification or for a text and a question for question answering. It is also used as the last
102
+ token of a sequence built with special tokens.
103
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
104
+ The token used for padding, for example when batching sequences of different lengths.
105
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
106
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
107
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
108
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
109
+ The token used for masking values. This is the token used when training this model with masked language
110
+ modeling. This is the token which the model will try to predict.
111
+ clean_text (`bool`, *optional*, defaults to `True`):
112
+ Whether or not to clean the text before tokenization by removing any control characters and replacing all
113
+ whitespaces by the classic one.
114
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
115
+ Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
116
+ issue](https://github.com/huggingface/transformers/issues/328)).
117
+ strip_accents (`bool`, *optional*):
118
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
119
+ value for `lowercase` (as in the original BERT).
120
+ wordpieces_prefix (`str`, *optional*, defaults to `"##"`):
121
+ The prefix for subwords.
122
+ """
123
+
124
+ vocab_files_names = VOCAB_FILES_NAMES
125
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
126
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
127
+ pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
128
+ model_input_names = ["input_ids", "attention_mask"]
129
+ slow_tokenizer_class = DistilBertTokenizer
130
+
131
+ def __init__(
132
+ self,
133
+ vocab_file=None,
134
+ tokenizer_file=None,
135
+ do_lower_case=True,
136
+ unk_token="[UNK]",
137
+ sep_token="[SEP]",
138
+ pad_token="[PAD]",
139
+ cls_token="[CLS]",
140
+ mask_token="[MASK]",
141
+ tokenize_chinese_chars=True,
142
+ strip_accents=None,
143
+ **kwargs,
144
+ ):
145
+ super().__init__(
146
+ vocab_file,
147
+ tokenizer_file=tokenizer_file,
148
+ do_lower_case=do_lower_case,
149
+ unk_token=unk_token,
150
+ sep_token=sep_token,
151
+ pad_token=pad_token,
152
+ cls_token=cls_token,
153
+ mask_token=mask_token,
154
+ tokenize_chinese_chars=tokenize_chinese_chars,
155
+ strip_accents=strip_accents,
156
+ **kwargs,
157
+ )
158
+
159
+ normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
160
+ if (
161
+ normalizer_state.get("lowercase", do_lower_case) != do_lower_case
162
+ or normalizer_state.get("strip_accents", strip_accents) != strip_accents
163
+ or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars
164
+ ):
165
+ normalizer_class = getattr(normalizers, normalizer_state.pop("type"))
166
+ normalizer_state["lowercase"] = do_lower_case
167
+ normalizer_state["strip_accents"] = strip_accents
168
+ normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars
169
+ self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state)
170
+
171
+ self.do_lower_case = do_lower_case
172
+
173
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.build_inputs_with_special_tokens
174
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
175
+ """
176
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
177
+ adding special tokens. A BERT sequence has the following format:
178
+
179
+ - single sequence: `[CLS] X [SEP]`
180
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
181
+
182
+ Args:
183
+ token_ids_0 (`List[int]`):
184
+ List of IDs to which the special tokens will be added.
185
+ token_ids_1 (`List[int]`, *optional*):
186
+ Optional second list of IDs for sequence pairs.
187
+
188
+ Returns:
189
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
190
+ """
191
+ output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
192
+
193
+ if token_ids_1 is not None:
194
+ output += token_ids_1 + [self.sep_token_id]
195
+
196
+ return output
197
+
198
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.create_token_type_ids_from_sequences
199
+ def create_token_type_ids_from_sequences(
200
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
201
+ ) -> List[int]:
202
+ """
203
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
204
+ pair mask has the following format:
205
+
206
+ ```
207
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
208
+ | first sequence | second sequence |
209
+ ```
210
+
211
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
212
+
213
+ Args:
214
+ token_ids_0 (`List[int]`):
215
+ List of IDs.
216
+ token_ids_1 (`List[int]`, *optional*):
217
+ Optional second list of IDs for sequence pairs.
218
+
219
+ Returns:
220
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
221
+ """
222
+ sep = [self.sep_token_id]
223
+ cls = [self.cls_token_id]
224
+ if token_ids_1 is None:
225
+ return len(cls + token_ids_0 + sep) * [0]
226
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
227
+
228
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.save_vocabulary
229
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
230
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
231
+ return tuple(files)
llava_next/lib/python3.10/site-packages/transformers/models/phobert/__init__.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import _LazyModule
18
+
19
+
20
+ _import_structure = {"tokenization_phobert": ["PhobertTokenizer"]}
21
+
22
+
23
+ if TYPE_CHECKING:
24
+ from .tokenization_phobert import PhobertTokenizer
25
+
26
+ else:
27
+ import sys
28
+
29
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llava_next/lib/python3.10/site-packages/transformers/models/phobert/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (498 Bytes). View file
 
llava_next/lib/python3.10/site-packages/transformers/models/phobert/__pycache__/tokenization_phobert.cpython-310.pyc ADDED
Binary file (12.2 kB). View file
 
llava_next/lib/python3.10/site-packages/transformers/models/phobert/tokenization_phobert.py ADDED
@@ -0,0 +1,367 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) 2020, VinAI Research and the HuggingFace Inc. team.
3
+ # Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ Tokenization classes for PhoBERT"""
17
+
18
+
19
+ import os
20
+ import re
21
+ from shutil import copyfile
22
+ from typing import List, Optional, Tuple
23
+
24
+ from ...tokenization_utils import PreTrainedTokenizer
25
+ from ...utils import logging
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+ VOCAB_FILES_NAMES = {
31
+ "vocab_file": "vocab.txt",
32
+ "merges_file": "bpe.codes",
33
+ }
34
+
35
+ PRETRAINED_VOCAB_FILES_MAP = {
36
+ "vocab_file": {
37
+ "vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt",
38
+ "vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt",
39
+ },
40
+ "merges_file": {
41
+ "vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes",
42
+ "vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes",
43
+ },
44
+ }
45
+
46
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
47
+ "vinai/phobert-base": 256,
48
+ "vinai/phobert-large": 256,
49
+ }
50
+
51
+
52
+ def get_pairs(word):
53
+ """
54
+ Return set of symbol pairs in a word.
55
+
56
+ Word is represented as tuple of symbols (symbols being variable-length strings).
57
+ """
58
+ pairs = set()
59
+ prev_char = word[0]
60
+ for char in word[1:]:
61
+ pairs.add((prev_char, char))
62
+ prev_char = char
63
+
64
+ pairs = set(pairs)
65
+ return pairs
66
+
67
+
68
+ class PhobertTokenizer(PreTrainedTokenizer):
69
+ """
70
+ Construct a PhoBERT tokenizer. Based on Byte-Pair-Encoding.
71
+
72
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
73
+ this superclass for more information regarding those methods.
74
+
75
+ Args:
76
+ vocab_file (`str`):
77
+ Path to the vocabulary file.
78
+ merges_file (`str`):
79
+ Path to the merges file.
80
+ bos_token (`st`, *optional*, defaults to `"<s>"`):
81
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
82
+
83
+ <Tip>
84
+
85
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
86
+ sequence. The token used is the `cls_token`.
87
+
88
+ </Tip>
89
+
90
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
91
+ The end of sequence token.
92
+
93
+ <Tip>
94
+
95
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
96
+ The token used is the `sep_token`.
97
+
98
+ </Tip>
99
+
100
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
101
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
102
+ sequence classification or for a text and a question for question answering. It is also used as the last
103
+ token of a sequence built with special tokens.
104
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
105
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
106
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
107
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
108
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
109
+ token instead.
110
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
111
+ The token used for padding, for example when batching sequences of different lengths.
112
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
113
+ The token used for masking values. This is the token used when training this model with masked language
114
+ modeling. This is the token which the model will try to predict.
115
+ """
116
+
117
+ vocab_files_names = VOCAB_FILES_NAMES
118
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
119
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
120
+
121
+ def __init__(
122
+ self,
123
+ vocab_file,
124
+ merges_file,
125
+ bos_token="<s>",
126
+ eos_token="</s>",
127
+ sep_token="</s>",
128
+ cls_token="<s>",
129
+ unk_token="<unk>",
130
+ pad_token="<pad>",
131
+ mask_token="<mask>",
132
+ **kwargs,
133
+ ):
134
+ self.vocab_file = vocab_file
135
+ self.merges_file = merges_file
136
+
137
+ self.encoder = {}
138
+ self.encoder[str(bos_token)] = 0
139
+ self.encoder[str(pad_token)] = 1
140
+ self.encoder[str(eos_token)] = 2
141
+ self.encoder[str(unk_token)] = 3
142
+
143
+ self.add_from_file(vocab_file)
144
+
145
+ self.decoder = {v: k for k, v in self.encoder.items()}
146
+
147
+ with open(merges_file, encoding="utf-8") as merges_handle:
148
+ merges = merges_handle.read().split("\n")[:-1]
149
+ merges = [tuple(merge.split()[:-1]) for merge in merges]
150
+
151
+ self.bpe_ranks = dict(zip(merges, range(len(merges))))
152
+ self.cache = {}
153
+
154
+ super().__init__(
155
+ bos_token=bos_token,
156
+ eos_token=eos_token,
157
+ unk_token=unk_token,
158
+ sep_token=sep_token,
159
+ cls_token=cls_token,
160
+ pad_token=pad_token,
161
+ mask_token=mask_token,
162
+ **kwargs,
163
+ )
164
+
165
+ def build_inputs_with_special_tokens(
166
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
167
+ ) -> List[int]:
168
+ """
169
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
170
+ adding special tokens. A PhoBERT sequence has the following format:
171
+
172
+ - single sequence: `<s> X </s>`
173
+ - pair of sequences: `<s> A </s></s> B </s>`
174
+
175
+ Args:
176
+ token_ids_0 (`List[int]`):
177
+ List of IDs to which the special tokens will be added.
178
+ token_ids_1 (`List[int]`, *optional*):
179
+ Optional second list of IDs for sequence pairs.
180
+
181
+ Returns:
182
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
183
+ """
184
+
185
+ if token_ids_1 is None:
186
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
187
+ cls = [self.cls_token_id]
188
+ sep = [self.sep_token_id]
189
+ return cls + token_ids_0 + sep + sep + token_ids_1 + sep
190
+
191
+ def get_special_tokens_mask(
192
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
193
+ ) -> List[int]:
194
+ """
195
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
196
+ special tokens using the tokenizer `prepare_for_model` method.
197
+
198
+ Args:
199
+ token_ids_0 (`List[int]`):
200
+ List of IDs.
201
+ token_ids_1 (`List[int]`, *optional*):
202
+ Optional second list of IDs for sequence pairs.
203
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
204
+ Whether or not the token list is already formatted with special tokens for the model.
205
+
206
+ Returns:
207
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
208
+ """
209
+
210
+ if already_has_special_tokens:
211
+ return super().get_special_tokens_mask(
212
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
213
+ )
214
+
215
+ if token_ids_1 is None:
216
+ return [1] + ([0] * len(token_ids_0)) + [1]
217
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
218
+
219
+ def create_token_type_ids_from_sequences(
220
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
221
+ ) -> List[int]:
222
+ """
223
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. PhoBERT does not
224
+ make use of token type ids, therefore a list of zeros is returned.
225
+
226
+ Args:
227
+ token_ids_0 (`List[int]`):
228
+ List of IDs.
229
+ token_ids_1 (`List[int]`, *optional*):
230
+ Optional second list of IDs for sequence pairs.
231
+
232
+ Returns:
233
+ `List[int]`: List of zeros.
234
+ """
235
+
236
+ sep = [self.sep_token_id]
237
+ cls = [self.cls_token_id]
238
+
239
+ if token_ids_1 is None:
240
+ return len(cls + token_ids_0 + sep) * [0]
241
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
242
+
243
+ @property
244
+ def vocab_size(self):
245
+ return len(self.encoder)
246
+
247
+ def get_vocab(self):
248
+ return dict(self.encoder, **self.added_tokens_encoder)
249
+
250
+ def bpe(self, token):
251
+ if token in self.cache:
252
+ return self.cache[token]
253
+ word = tuple(token)
254
+ word = tuple(list(word[:-1]) + [word[-1] + "</w>"])
255
+ pairs = get_pairs(word)
256
+
257
+ if not pairs:
258
+ return token
259
+
260
+ while True:
261
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
262
+ if bigram not in self.bpe_ranks:
263
+ break
264
+ first, second = bigram
265
+ new_word = []
266
+ i = 0
267
+ while i < len(word):
268
+ try:
269
+ j = word.index(first, i)
270
+ except ValueError:
271
+ new_word.extend(word[i:])
272
+ break
273
+ else:
274
+ new_word.extend(word[i:j])
275
+ i = j
276
+
277
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
278
+ new_word.append(first + second)
279
+ i += 2
280
+ else:
281
+ new_word.append(word[i])
282
+ i += 1
283
+ new_word = tuple(new_word)
284
+ word = new_word
285
+ if len(word) == 1:
286
+ break
287
+ else:
288
+ pairs = get_pairs(word)
289
+ word = "@@ ".join(word)
290
+ word = word[:-4]
291
+ self.cache[token] = word
292
+ return word
293
+
294
+ def _tokenize(self, text):
295
+ """Tokenize a string."""
296
+ split_tokens = []
297
+
298
+ words = re.findall(r"\S+\n?", text)
299
+
300
+ for token in words:
301
+ split_tokens.extend(list(self.bpe(token).split(" ")))
302
+ return split_tokens
303
+
304
+ def _convert_token_to_id(self, token):
305
+ """Converts a token (str) in an id using the vocab."""
306
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
307
+
308
+ def _convert_id_to_token(self, index):
309
+ """Converts an index (integer) in a token (str) using the vocab."""
310
+ return self.decoder.get(index, self.unk_token)
311
+
312
+ def convert_tokens_to_string(self, tokens):
313
+ """Converts a sequence of tokens (string) in a single string."""
314
+ out_string = " ".join(tokens).replace("@@ ", "").strip()
315
+ return out_string
316
+
317
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
318
+ if not os.path.isdir(save_directory):
319
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
320
+ return
321
+ out_vocab_file = os.path.join(
322
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
323
+ )
324
+ out_merge_file = os.path.join(
325
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
326
+ )
327
+
328
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
329
+ copyfile(self.vocab_file, out_vocab_file)
330
+ elif not os.path.isfile(self.vocab_file):
331
+ with open(out_vocab_file, "wb") as fi:
332
+ content_spiece_model = self.sp_model.serialized_model_proto()
333
+ fi.write(content_spiece_model)
334
+
335
+ if os.path.abspath(self.merges_file) != os.path.abspath(out_merge_file):
336
+ copyfile(self.merges_file, out_merge_file)
337
+
338
+ return out_vocab_file, out_merge_file
339
+
340
+ # def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
341
+ # filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
342
+ # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
343
+ # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
344
+ # return ''.join(tokens_generated_so_far)
345
+
346
+ def add_from_file(self, f):
347
+ """
348
+ Loads a pre-existing dictionary from a text file and adds its symbols to this instance.
349
+ """
350
+ if isinstance(f, str):
351
+ try:
352
+ with open(f, "r", encoding="utf-8") as fd:
353
+ self.add_from_file(fd)
354
+ except FileNotFoundError as fnfe:
355
+ raise fnfe
356
+ except UnicodeError:
357
+ raise Exception(f"Incorrect encoding detected in {f}, please rebuild the dataset")
358
+ return
359
+
360
+ lines = f.readlines()
361
+ for lineTmp in lines:
362
+ line = lineTmp.strip()
363
+ idx = line.rfind(" ")
364
+ if idx == -1:
365
+ raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'")
366
+ word = line[:idx]
367
+ self.encoder[word] = len(self.encoder)
llava_next/lib/python3.10/site-packages/transformers/models/rag/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.25 kB). View file
 
llava_next/lib/python3.10/site-packages/transformers/models/rag/__pycache__/retrieval_rag.cpython-310.pyc ADDED
Binary file (26.6 kB). View file
 
llava_next/lib/python3.10/site-packages/transformers/models/rag/__pycache__/tokenization_rag.cpython-310.pyc ADDED
Binary file (3.66 kB). View file
 
llava_next/lib/python3.10/site-packages/transformers/models/rag/retrieval_rag.py ADDED
@@ -0,0 +1,666 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020, The RAG Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """RAG Retriever model implementation."""
16
+
17
+ import os
18
+ import pickle
19
+ import time
20
+ from typing import Iterable, List, Optional, Tuple
21
+
22
+ import numpy as np
23
+
24
+ from ...tokenization_utils import PreTrainedTokenizer
25
+ from ...tokenization_utils_base import BatchEncoding
26
+ from ...utils import cached_file, is_datasets_available, is_faiss_available, logging, requires_backends, strtobool
27
+ from .configuration_rag import RagConfig
28
+ from .tokenization_rag import RagTokenizer
29
+
30
+
31
+ if is_datasets_available():
32
+ from datasets import Dataset, load_dataset, load_from_disk
33
+
34
+ if is_faiss_available():
35
+ import faiss
36
+
37
+
38
+ logger = logging.get_logger(__name__)
39
+
40
+
41
+ LEGACY_INDEX_PATH = "https://storage.googleapis.com/huggingface-nlp/datasets/wiki_dpr/"
42
+
43
+
44
+ class Index:
45
+ """
46
+ A base class for the Indices encapsulated by the [`RagRetriever`].
47
+ """
48
+
49
+ def get_doc_dicts(self, doc_ids: np.ndarray) -> List[dict]:
50
+ """
51
+ Returns a list of dictionaries, containing titles and text of the retrieved documents.
52
+
53
+ Args:
54
+ doc_ids (`np.ndarray` of shape `(batch_size, n_docs)`):
55
+ A tensor of document indices.
56
+ """
57
+ raise NotImplementedError
58
+
59
+ def get_top_docs(self, question_hidden_states: np.ndarray, n_docs=5) -> Tuple[np.ndarray, np.ndarray]:
60
+ """
61
+ For each query in the batch, retrieves `n_docs` documents.
62
+
63
+ Args:
64
+ question_hidden_states (`np.ndarray` of shape `(batch_size, vector_size)`):
65
+ An array of query vectors.
66
+ n_docs (`int`):
67
+ The number of docs retrieved per query.
68
+
69
+ Returns:
70
+ `np.ndarray` of shape `(batch_size, n_docs)`: A tensor of indices of retrieved documents. `np.ndarray` of
71
+ shape `(batch_size, vector_size)`: A tensor of vector representations of retrieved documents.
72
+ """
73
+ raise NotImplementedError
74
+
75
+ def is_initialized(self):
76
+ """
77
+ Returns `True` if index is already initialized.
78
+ """
79
+ raise NotImplementedError
80
+
81
+ def init_index(self):
82
+ """
83
+ A function responsible for loading the index into memory. Should be called only once per training run of a RAG
84
+ model. E.g. if the model is trained on multiple GPUs in a distributed setup, only one of the workers will load
85
+ the index.
86
+ """
87
+ raise NotImplementedError
88
+
89
+
90
+ class LegacyIndex(Index):
91
+ """
92
+ An index which can be deserialized from the files built using https://github.com/facebookresearch/DPR. We use
93
+ default faiss index parameters as specified in that repository.
94
+
95
+ Args:
96
+ vector_size (`int`):
97
+ The dimension of indexed vectors.
98
+ index_path (`str`):
99
+ A path to a *directory* containing index files compatible with [`~models.rag.retrieval_rag.LegacyIndex`]
100
+ """
101
+
102
+ INDEX_FILENAME = "hf_bert_base.hnswSQ8_correct_phi_128.c_index"
103
+ PASSAGE_FILENAME = "psgs_w100.tsv.pkl"
104
+
105
+ def __init__(self, vector_size, index_path):
106
+ self.index_id_to_db_id = []
107
+ self.index_path = index_path
108
+ self.passages = self._load_passages()
109
+ self.vector_size = vector_size
110
+ self.index = None
111
+ self._index_initialized = False
112
+
113
+ def _resolve_path(self, index_path, filename):
114
+ is_local = os.path.isdir(index_path)
115
+ try:
116
+ # Load from URL or cache if already cached
117
+ resolved_archive_file = cached_file(index_path, filename)
118
+ except EnvironmentError:
119
+ msg = (
120
+ f"Can't load '{filename}'. Make sure that:\n\n"
121
+ f"- '{index_path}' is a correct remote path to a directory containing a file named {filename}\n\n"
122
+ f"- or '{index_path}' is the correct path to a directory containing a file named {filename}.\n\n"
123
+ )
124
+ raise EnvironmentError(msg)
125
+ if is_local:
126
+ logger.info(f"loading file {resolved_archive_file}")
127
+ else:
128
+ logger.info(f"loading file {filename} from cache at {resolved_archive_file}")
129
+ return resolved_archive_file
130
+
131
+ def _load_passages(self):
132
+ logger.info(f"Loading passages from {self.index_path}")
133
+ passages_path = self._resolve_path(self.index_path, self.PASSAGE_FILENAME)
134
+ if not strtobool(os.environ.get("TRUST_REMOTE_CODE", "False")):
135
+ raise ValueError(
136
+ "This part uses `pickle.load` which is insecure and will execute arbitrary code that is potentially "
137
+ "malicious. It's recommended to never unpickle data that could have come from an untrusted source, or "
138
+ "that could have been tampered with. If you already verified the pickle data and decided to use it, "
139
+ "you can set the environment variable `TRUST_REMOTE_CODE` to `True` to allow it."
140
+ )
141
+ with open(passages_path, "rb") as passages_file:
142
+ passages = pickle.load(passages_file)
143
+ return passages
144
+
145
+ def _deserialize_index(self):
146
+ logger.info(f"Loading index from {self.index_path}")
147
+ resolved_index_path = self._resolve_path(self.index_path, self.INDEX_FILENAME + ".index.dpr")
148
+ self.index = faiss.read_index(resolved_index_path)
149
+ resolved_meta_path = self._resolve_path(self.index_path, self.INDEX_FILENAME + ".index_meta.dpr")
150
+ if not strtobool(os.environ.get("TRUST_REMOTE_CODE", "False")):
151
+ raise ValueError(
152
+ "This part uses `pickle.load` which is insecure and will execute arbitrary code that is potentially "
153
+ "malicious. It's recommended to never unpickle data that could have come from an untrusted source, or "
154
+ "that could have been tampered with. If you already verified the pickle data and decided to use it, "
155
+ "you can set the environment variable `TRUST_REMOTE_CODE` to `True` to allow it."
156
+ )
157
+ with open(resolved_meta_path, "rb") as metadata_file:
158
+ self.index_id_to_db_id = pickle.load(metadata_file)
159
+ assert (
160
+ len(self.index_id_to_db_id) == self.index.ntotal
161
+ ), "Deserialized index_id_to_db_id should match faiss index size"
162
+
163
+ def is_initialized(self):
164
+ return self._index_initialized
165
+
166
+ def init_index(self):
167
+ index = faiss.IndexHNSWFlat(self.vector_size + 1, 512)
168
+ index.hnsw.efSearch = 128
169
+ index.hnsw.efConstruction = 200
170
+ self.index = index
171
+ self._deserialize_index()
172
+ self._index_initialized = True
173
+
174
+ def get_doc_dicts(self, doc_ids: np.array):
175
+ doc_list = []
176
+ for doc_ids_i in doc_ids:
177
+ ids = [str(int(doc_id)) for doc_id in doc_ids_i]
178
+ docs = [self.passages[doc_id] for doc_id in ids]
179
+ doc_list.append(docs)
180
+ doc_dicts = []
181
+ for docs in doc_list:
182
+ doc_dict = {}
183
+ doc_dict["title"] = [doc[1] for doc in docs]
184
+ doc_dict["text"] = [doc[0] for doc in docs]
185
+ doc_dicts.append(doc_dict)
186
+ return doc_dicts
187
+
188
+ def get_top_docs(self, question_hidden_states: np.ndarray, n_docs=5) -> Tuple[np.ndarray, np.ndarray]:
189
+ aux_dim = np.zeros(len(question_hidden_states), dtype="float32").reshape(-1, 1)
190
+ query_nhsw_vectors = np.hstack((question_hidden_states, aux_dim))
191
+ _, docs_ids = self.index.search(query_nhsw_vectors, n_docs)
192
+ vectors = [[self.index.reconstruct(int(doc_id))[:-1] for doc_id in doc_ids] for doc_ids in docs_ids]
193
+ ids = [[int(self.index_id_to_db_id[doc_id]) for doc_id in doc_ids] for doc_ids in docs_ids]
194
+ return np.array(ids), np.array(vectors)
195
+
196
+
197
+ class HFIndexBase(Index):
198
+ def __init__(self, vector_size, dataset, index_initialized=False):
199
+ self.vector_size = vector_size
200
+ self.dataset = dataset
201
+ self._index_initialized = index_initialized
202
+ self._check_dataset_format(with_index=index_initialized)
203
+ dataset.set_format("numpy", columns=["embeddings"], output_all_columns=True, dtype="float32")
204
+
205
+ def _check_dataset_format(self, with_index: bool):
206
+ if not isinstance(self.dataset, Dataset):
207
+ raise ValueError(f"Dataset should be a datasets.Dataset object, but got {type(self.dataset)}")
208
+ if len({"title", "text", "embeddings"} - set(self.dataset.column_names)) > 0:
209
+ raise ValueError(
210
+ "Dataset should be a dataset with the following columns: "
211
+ "title (str), text (str) and embeddings (arrays of dimension vector_size), "
212
+ f"but got columns {self.dataset.column_names}"
213
+ )
214
+ if with_index and "embeddings" not in self.dataset.list_indexes():
215
+ raise ValueError(
216
+ "Missing faiss index in the dataset. Make sure you called `dataset.add_faiss_index` to compute it "
217
+ "or `dataset.load_faiss_index` to load one from the disk."
218
+ )
219
+
220
+ def init_index(self):
221
+ raise NotImplementedError()
222
+
223
+ def is_initialized(self):
224
+ return self._index_initialized
225
+
226
+ def get_doc_dicts(self, doc_ids: np.ndarray) -> List[dict]:
227
+ return [self.dataset[doc_ids[i].tolist()] for i in range(doc_ids.shape[0])]
228
+
229
+ def get_top_docs(self, question_hidden_states: np.ndarray, n_docs=5) -> Tuple[np.ndarray, np.ndarray]:
230
+ _, ids = self.dataset.search_batch("embeddings", question_hidden_states, n_docs)
231
+ docs = [self.dataset[[i for i in indices if i >= 0]] for indices in ids]
232
+ vectors = [doc["embeddings"] for doc in docs]
233
+ for i in range(len(vectors)):
234
+ if len(vectors[i]) < n_docs:
235
+ vectors[i] = np.vstack([vectors[i], np.zeros((n_docs - len(vectors[i]), self.vector_size))])
236
+ return np.array(ids), np.array(vectors) # shapes (batch_size, n_docs) and (batch_size, n_docs, d)
237
+
238
+
239
+ class CanonicalHFIndex(HFIndexBase):
240
+ """
241
+ A wrapper around an instance of [`~datasets.Datasets`]. If `index_path` is set to `None`, we load the pre-computed
242
+ index available with the [`~datasets.arrow_dataset.Dataset`], otherwise, we load the index from the indicated path
243
+ on disk.
244
+
245
+ Args:
246
+ vector_size (`int`): the dimension of the passages embeddings used by the index
247
+ dataset_name (`str`, optional, defaults to `wiki_dpr`):
248
+ A dataset identifier of the indexed dataset on HuggingFace AWS bucket (list all available datasets and ids
249
+ with `datasets.list_datasets()`).
250
+ dataset_split (`str`, optional, defaults to `train`)
251
+ Which split of the `dataset` to load.
252
+ index_name (`str`, optional, defaults to `train`)
253
+ The index_name of the index associated with the `dataset`. The index loaded from `index_path` will be saved
254
+ under this name.
255
+ index_path (`str`, optional, defaults to `None`)
256
+ The path to the serialized faiss index on disk.
257
+ use_dummy_dataset (`bool`, optional, defaults to `False`):
258
+ If True, use the dummy configuration of the dataset for tests.
259
+ """
260
+
261
+ def __init__(
262
+ self,
263
+ vector_size: int,
264
+ dataset_name: str = "wiki_dpr",
265
+ dataset_split: str = "train",
266
+ index_name: Optional[str] = None,
267
+ index_path: Optional[str] = None,
268
+ use_dummy_dataset=False,
269
+ ):
270
+ if int(index_path is None) + int(index_name is None) != 1:
271
+ raise ValueError("Please provide `index_name` or `index_path`.")
272
+ self.dataset_name = dataset_name
273
+ self.dataset_split = dataset_split
274
+ self.index_name = index_name
275
+ self.index_path = index_path
276
+ self.use_dummy_dataset = use_dummy_dataset
277
+ logger.info(f"Loading passages from {self.dataset_name}")
278
+ dataset = load_dataset(
279
+ self.dataset_name, with_index=False, split=self.dataset_split, dummy=self.use_dummy_dataset
280
+ )
281
+ super().__init__(vector_size, dataset, index_initialized=False)
282
+
283
+ def init_index(self):
284
+ if self.index_path is not None:
285
+ logger.info(f"Loading index from {self.index_path}")
286
+ self.dataset.load_faiss_index("embeddings", file=self.index_path)
287
+ else:
288
+ logger.info(f"Loading index from {self.dataset_name} with index name {self.index_name}")
289
+ self.dataset = load_dataset(
290
+ self.dataset_name,
291
+ with_embeddings=True,
292
+ with_index=True,
293
+ split=self.dataset_split,
294
+ index_name=self.index_name,
295
+ dummy=self.use_dummy_dataset,
296
+ )
297
+ self.dataset.set_format("numpy", columns=["embeddings"], output_all_columns=True)
298
+ self._index_initialized = True
299
+
300
+
301
+ class CustomHFIndex(HFIndexBase):
302
+ """
303
+ A wrapper around an instance of [`~datasets.Datasets`]. The dataset and the index are both loaded from the
304
+ indicated paths on disk.
305
+
306
+ Args:
307
+ vector_size (`int`): the dimension of the passages embeddings used by the index
308
+ dataset_path (`str`):
309
+ The path to the serialized dataset on disk. The dataset should have 3 columns: title (str), text (str) and
310
+ embeddings (arrays of dimension vector_size)
311
+ index_path (`str`)
312
+ The path to the serialized faiss index on disk.
313
+ """
314
+
315
+ def __init__(self, vector_size: int, dataset, index_path=None):
316
+ super().__init__(vector_size, dataset, index_initialized=index_path is None)
317
+ self.index_path = index_path
318
+
319
+ @classmethod
320
+ def load_from_disk(cls, vector_size, dataset_path, index_path):
321
+ logger.info(f"Loading passages from {dataset_path}")
322
+ if dataset_path is None or index_path is None:
323
+ raise ValueError(
324
+ "Please provide `dataset_path` and `index_path` after calling `dataset.save_to_disk(dataset_path)` "
325
+ "and `dataset.get_index('embeddings').save(index_path)`."
326
+ )
327
+ dataset = load_from_disk(dataset_path)
328
+ return cls(vector_size=vector_size, dataset=dataset, index_path=index_path)
329
+
330
+ def init_index(self):
331
+ if not self.is_initialized():
332
+ logger.info(f"Loading index from {self.index_path}")
333
+ self.dataset.load_faiss_index("embeddings", file=self.index_path)
334
+ self._index_initialized = True
335
+
336
+
337
+ class RagRetriever:
338
+ """
339
+ Retriever used to get documents from vector queries. It retrieves the documents embeddings as well as the documents
340
+ contents, and it formats them to be used with a RagModel.
341
+
342
+ Args:
343
+ config ([`RagConfig`]):
344
+ The configuration of the RAG model this Retriever is used with. Contains parameters indicating which
345
+ `Index` to build. You can load your own custom dataset with `config.index_name="custom"` or use a canonical
346
+ one (default) from the datasets library with `config.index_name="wiki_dpr"` for example.
347
+ question_encoder_tokenizer ([`PreTrainedTokenizer`]):
348
+ The tokenizer that was used to tokenize the question. It is used to decode the question and then use the
349
+ generator_tokenizer.
350
+ generator_tokenizer ([`PreTrainedTokenizer`]):
351
+ The tokenizer used for the generator part of the RagModel.
352
+ index ([`~models.rag.retrieval_rag.Index`], optional, defaults to the one defined by the configuration):
353
+ If specified, use this index instead of the one built using the configuration
354
+
355
+ Examples:
356
+
357
+ ```python
358
+ >>> # To load the default "wiki_dpr" dataset with 21M passages from wikipedia (index name is 'compressed' or 'exact')
359
+ >>> from transformers import RagRetriever
360
+
361
+ >>> retriever = RagRetriever.from_pretrained(
362
+ ... "facebook/dpr-ctx_encoder-single-nq-base", dataset="wiki_dpr", index_name="compressed"
363
+ ... )
364
+
365
+ >>> # To load your own indexed dataset built with the datasets library. More info on how to build the indexed dataset in examples/rag/use_own_knowledge_dataset.py
366
+ >>> from transformers import RagRetriever
367
+
368
+ >>> dataset = (
369
+ ... ...
370
+ ... ) # dataset must be a datasets.Datasets object with columns "title", "text" and "embeddings", and it must have a faiss index
371
+ >>> retriever = RagRetriever.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base", indexed_dataset=dataset)
372
+
373
+ >>> # To load your own indexed dataset built with the datasets library that was saved on disk. More info in examples/rag/use_own_knowledge_dataset.py
374
+ >>> from transformers import RagRetriever
375
+
376
+ >>> dataset_path = "path/to/my/dataset" # dataset saved via *dataset.save_to_disk(...)*
377
+ >>> index_path = "path/to/my/index.faiss" # faiss index saved via *dataset.get_index("embeddings").save(...)*
378
+ >>> retriever = RagRetriever.from_pretrained(
379
+ ... "facebook/dpr-ctx_encoder-single-nq-base",
380
+ ... index_name="custom",
381
+ ... passages_path=dataset_path,
382
+ ... index_path=index_path,
383
+ ... )
384
+
385
+ >>> # To load the legacy index built originally for Rag's paper
386
+ >>> from transformers import RagRetriever
387
+
388
+ >>> retriever = RagRetriever.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base", index_name="legacy")
389
+ ```"""
390
+
391
+ def __init__(self, config, question_encoder_tokenizer, generator_tokenizer, index=None, init_retrieval=True):
392
+ self._init_retrieval = init_retrieval
393
+ requires_backends(self, ["datasets", "faiss"])
394
+ super().__init__()
395
+ self.index = index or self._build_index(config)
396
+ self.generator_tokenizer = generator_tokenizer
397
+ self.question_encoder_tokenizer = question_encoder_tokenizer
398
+
399
+ self.n_docs = config.n_docs
400
+ self.batch_size = config.retrieval_batch_size
401
+
402
+ self.config = config
403
+ if self._init_retrieval:
404
+ self.init_retrieval()
405
+
406
+ self.ctx_encoder_tokenizer = None
407
+ self.return_tokenized_docs = False
408
+
409
+ @staticmethod
410
+ def _build_index(config):
411
+ if config.index_name == "legacy":
412
+ return LegacyIndex(
413
+ config.retrieval_vector_size,
414
+ config.index_path or LEGACY_INDEX_PATH,
415
+ )
416
+ elif config.index_name == "custom":
417
+ return CustomHFIndex.load_from_disk(
418
+ vector_size=config.retrieval_vector_size,
419
+ dataset_path=config.passages_path,
420
+ index_path=config.index_path,
421
+ )
422
+ else:
423
+ return CanonicalHFIndex(
424
+ vector_size=config.retrieval_vector_size,
425
+ dataset_name=config.dataset,
426
+ dataset_split=config.dataset_split,
427
+ index_name=config.index_name,
428
+ index_path=config.index_path,
429
+ use_dummy_dataset=config.use_dummy_dataset,
430
+ )
431
+
432
+ @classmethod
433
+ def from_pretrained(cls, retriever_name_or_path, indexed_dataset=None, **kwargs):
434
+ requires_backends(cls, ["datasets", "faiss"])
435
+ config = kwargs.pop("config", None) or RagConfig.from_pretrained(retriever_name_or_path, **kwargs)
436
+ rag_tokenizer = RagTokenizer.from_pretrained(retriever_name_or_path, config=config)
437
+ question_encoder_tokenizer = rag_tokenizer.question_encoder
438
+ generator_tokenizer = rag_tokenizer.generator
439
+ if indexed_dataset is not None:
440
+ config.index_name = "custom"
441
+ index = CustomHFIndex(config.retrieval_vector_size, indexed_dataset)
442
+ else:
443
+ index = cls._build_index(config)
444
+ return cls(
445
+ config,
446
+ question_encoder_tokenizer=question_encoder_tokenizer,
447
+ generator_tokenizer=generator_tokenizer,
448
+ index=index,
449
+ )
450
+
451
+ def save_pretrained(self, save_directory):
452
+ if isinstance(self.index, CustomHFIndex):
453
+ if self.config.index_path is None:
454
+ index_path = os.path.join(save_directory, "hf_dataset_index.faiss")
455
+ self.index.dataset.get_index("embeddings").save(index_path)
456
+ self.config.index_path = index_path
457
+ if self.config.passages_path is None:
458
+ passages_path = os.path.join(save_directory, "hf_dataset")
459
+ # datasets don't support save_to_disk with indexes right now
460
+ faiss_index = self.index.dataset._indexes.pop("embeddings")
461
+ self.index.dataset.save_to_disk(passages_path)
462
+ self.index.dataset._indexes["embeddings"] = faiss_index
463
+ self.config.passages_path = passages_path
464
+ self.config.save_pretrained(save_directory)
465
+ rag_tokenizer = RagTokenizer(
466
+ question_encoder=self.question_encoder_tokenizer,
467
+ generator=self.generator_tokenizer,
468
+ )
469
+ rag_tokenizer.save_pretrained(save_directory)
470
+
471
+ def init_retrieval(self):
472
+ """
473
+ Retriever initialization function. It loads the index into memory.
474
+ """
475
+
476
+ logger.info("initializing retrieval")
477
+ self.index.init_index()
478
+
479
+ def postprocess_docs(self, docs, input_strings, prefix, n_docs, return_tensors=None):
480
+ r"""
481
+ Postprocessing retrieved `docs` and combining them with `input_strings`.
482
+
483
+ Args:
484
+ docs (`dict`):
485
+ Retrieved documents.
486
+ input_strings (`str`):
487
+ Input strings decoded by `preprocess_query`.
488
+ prefix (`str`):
489
+ Prefix added at the beginning of each input, typically used with T5-based models.
490
+
491
+ Return:
492
+ `tuple(tensors)`: a tuple consisting of two elements: contextualized `input_ids` and a compatible
493
+ `attention_mask`.
494
+ """
495
+
496
+ def cat_input_and_doc(doc_title, doc_text, input_string, prefix):
497
+ # TODO(Patrick): if we train more RAG models, I want to put the input first to take advantage of effortless truncation
498
+ # TODO(piktus): better handling of truncation
499
+ if doc_title.startswith('"'):
500
+ doc_title = doc_title[1:]
501
+ if doc_title.endswith('"'):
502
+ doc_title = doc_title[:-1]
503
+ if prefix is None:
504
+ prefix = ""
505
+ out = (prefix + doc_title + self.config.title_sep + doc_text + self.config.doc_sep + input_string).replace(
506
+ " ", " "
507
+ )
508
+ return out
509
+
510
+ rag_input_strings = [
511
+ cat_input_and_doc(
512
+ docs[i]["title"][j],
513
+ docs[i]["text"][j],
514
+ input_strings[i],
515
+ prefix,
516
+ )
517
+ for i in range(len(docs))
518
+ for j in range(n_docs)
519
+ ]
520
+
521
+ contextualized_inputs = self.generator_tokenizer.batch_encode_plus(
522
+ rag_input_strings,
523
+ max_length=self.config.max_combined_length,
524
+ return_tensors=return_tensors,
525
+ padding="max_length",
526
+ truncation=True,
527
+ )
528
+
529
+ return contextualized_inputs["input_ids"], contextualized_inputs["attention_mask"]
530
+
531
+ def _chunk_tensor(self, t: Iterable, chunk_size: int) -> List[Iterable]:
532
+ return [t[i : i + chunk_size] for i in range(0, len(t), chunk_size)]
533
+
534
+ def _main_retrieve(self, question_hidden_states: np.ndarray, n_docs: int) -> Tuple[np.ndarray, np.ndarray]:
535
+ question_hidden_states_batched = self._chunk_tensor(question_hidden_states, self.batch_size)
536
+ ids_batched = []
537
+ vectors_batched = []
538
+ for question_hidden_states in question_hidden_states_batched:
539
+ start_time = time.time()
540
+ ids, vectors = self.index.get_top_docs(question_hidden_states, n_docs)
541
+ logger.debug(
542
+ f"index search time: {time.time() - start_time} sec, batch size {question_hidden_states.shape}"
543
+ )
544
+ ids_batched.extend(ids)
545
+ vectors_batched.extend(vectors)
546
+ return (
547
+ np.array(ids_batched),
548
+ np.array(vectors_batched),
549
+ ) # shapes (batch_size, n_docs) and (batch_size, n_docs, d)
550
+
551
+ def retrieve(self, question_hidden_states: np.ndarray, n_docs: int) -> Tuple[np.ndarray, List[dict]]:
552
+ """
553
+ Retrieves documents for specified `question_hidden_states`.
554
+
555
+ Args:
556
+ question_hidden_states (`np.ndarray` of shape `(batch_size, vector_size)`):
557
+ A batch of query vectors to retrieve with.
558
+ n_docs (`int`):
559
+ The number of docs retrieved per query.
560
+
561
+ Return:
562
+ `Tuple[np.ndarray, np.ndarray, List[dict]]`: A tuple with the following objects:
563
+
564
+ - **retrieved_doc_embeds** (`np.ndarray` of shape `(batch_size, n_docs, dim)`) -- The retrieval embeddings
565
+ of the retrieved docs per query.
566
+ - **doc_ids** (`np.ndarray` of shape `(batch_size, n_docs)`) -- The ids of the documents in the index
567
+ - **doc_dicts** (`List[dict]`): The `retrieved_doc_embeds` examples per query.
568
+ """
569
+
570
+ doc_ids, retrieved_doc_embeds = self._main_retrieve(question_hidden_states, n_docs)
571
+ return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(doc_ids)
572
+
573
+ def set_ctx_encoder_tokenizer(self, ctx_encoder_tokenizer: PreTrainedTokenizer):
574
+ # used in end2end retriever training
575
+ self.ctx_encoder_tokenizer = ctx_encoder_tokenizer
576
+ self.return_tokenized_docs = True
577
+
578
+ def __call__(
579
+ self,
580
+ question_input_ids: List[List[int]],
581
+ question_hidden_states: np.ndarray,
582
+ prefix=None,
583
+ n_docs=None,
584
+ return_tensors=None,
585
+ ) -> BatchEncoding:
586
+ """
587
+ Retrieves documents for specified `question_hidden_states`.
588
+
589
+ Args:
590
+ question_input_ids (`List[List[int]]`) batch of input ids
591
+ question_hidden_states (`np.ndarray` of shape `(batch_size, vector_size)`:
592
+ A batch of query vectors to retrieve with.
593
+ prefix (`str`, *optional*):
594
+ The prefix used by the generator's tokenizer.
595
+ n_docs (`int`, *optional*):
596
+ The number of docs retrieved per query.
597
+ return_tensors (`str` or [`~utils.TensorType`], *optional*, defaults to "pt"):
598
+ If set, will return tensors instead of list of python integers. Acceptable values are:
599
+
600
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
601
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
602
+ - `'np'`: Return Numpy `np.ndarray` objects.
603
+
604
+ Returns: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
605
+
606
+ - **context_input_ids** -- List of token ids to be fed to a model.
607
+
608
+ [What are input IDs?](../glossary#input-ids)
609
+
610
+ - **context_attention_mask** -- List of indices specifying which tokens should be attended to by the model
611
+ (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`).
612
+
613
+ [What are attention masks?](../glossary#attention-mask)
614
+
615
+ - **retrieved_doc_embeds** -- List of embeddings of the retrieved documents
616
+ - **doc_ids** -- List of ids of the retrieved documents
617
+ """
618
+
619
+ n_docs = n_docs if n_docs is not None else self.n_docs
620
+ prefix = prefix if prefix is not None else self.config.generator.prefix
621
+ retrieved_doc_embeds, doc_ids, docs = self.retrieve(question_hidden_states, n_docs)
622
+
623
+ input_strings = self.question_encoder_tokenizer.batch_decode(question_input_ids, skip_special_tokens=True)
624
+ context_input_ids, context_attention_mask = self.postprocess_docs(
625
+ docs, input_strings, prefix, n_docs, return_tensors=return_tensors
626
+ )
627
+
628
+ if self.return_tokenized_docs:
629
+ retrieved_doc_text = []
630
+ retrieved_doc_title = []
631
+
632
+ for b_idx in range(len(docs)):
633
+ for doc_idx in range(n_docs):
634
+ retrieved_doc_text.append(docs[b_idx]["text"][doc_idx])
635
+ retrieved_doc_title.append(docs[b_idx]["title"][doc_idx])
636
+
637
+ tokenized_docs = self.ctx_encoder_tokenizer(
638
+ retrieved_doc_title,
639
+ retrieved_doc_text,
640
+ truncation=True,
641
+ padding="longest",
642
+ return_tensors=return_tensors,
643
+ )
644
+
645
+ return BatchEncoding(
646
+ {
647
+ "context_input_ids": context_input_ids,
648
+ "context_attention_mask": context_attention_mask,
649
+ "retrieved_doc_embeds": retrieved_doc_embeds,
650
+ "doc_ids": doc_ids,
651
+ "tokenized_doc_ids": tokenized_docs["input_ids"],
652
+ "tokenized_doc_attention_mask": tokenized_docs["attention_mask"],
653
+ },
654
+ tensor_type=return_tensors,
655
+ )
656
+
657
+ else:
658
+ return BatchEncoding(
659
+ {
660
+ "context_input_ids": context_input_ids,
661
+ "context_attention_mask": context_attention_mask,
662
+ "retrieved_doc_embeds": retrieved_doc_embeds,
663
+ "doc_ids": doc_ids,
664
+ },
665
+ tensor_type=return_tensors,
666
+ )
llava_next/lib/python3.10/site-packages/transformers/models/sam/__pycache__/convert_sam_original_to_hf_format.cpython-310.pyc ADDED
Binary file (4.99 kB). View file
 
llava_next/lib/python3.10/site-packages/transformers/models/sew/__init__.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
17
+
18
+
19
+ _import_structure = {"configuration_sew": ["SEW_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWConfig"]}
20
+
21
+ try:
22
+ if not is_torch_available():
23
+ raise OptionalDependencyNotAvailable()
24
+ except OptionalDependencyNotAvailable:
25
+ pass
26
+ else:
27
+ _import_structure["modeling_sew"] = [
28
+ "SEW_PRETRAINED_MODEL_ARCHIVE_LIST",
29
+ "SEWForCTC",
30
+ "SEWForSequenceClassification",
31
+ "SEWModel",
32
+ "SEWPreTrainedModel",
33
+ ]
34
+
35
+ if TYPE_CHECKING:
36
+ from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
37
+
38
+ try:
39
+ if not is_torch_available():
40
+ raise OptionalDependencyNotAvailable()
41
+ except OptionalDependencyNotAvailable:
42
+ pass
43
+ else:
44
+ from .modeling_sew import (
45
+ SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
46
+ SEWForCTC,
47
+ SEWForSequenceClassification,
48
+ SEWModel,
49
+ SEWPreTrainedModel,
50
+ )
51
+
52
+
53
+ else:
54
+ import sys
55
+
56
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llava_next/lib/python3.10/site-packages/transformers/models/sew/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (903 Bytes). View file
 
llava_next/lib/python3.10/site-packages/transformers/models/sew/__pycache__/configuration_sew.cpython-310.pyc ADDED
Binary file (12.5 kB). View file
 
llava_next/lib/python3.10/site-packages/transformers/models/sew/__pycache__/convert_sew_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (8 kB). View file
 
llava_next/lib/python3.10/site-packages/transformers/models/sew/__pycache__/modeling_sew.cpython-310.pyc ADDED
Binary file (33.8 kB). View file
 
llava_next/lib/python3.10/site-packages/transformers/models/sew/configuration_sew.py ADDED
@@ -0,0 +1,258 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 ASAPP Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ SEW model configuration"""
16
+
17
+ import functools
18
+ import operator
19
+
20
+ from ...configuration_utils import PretrainedConfig
21
+ from ...utils import logging
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+ SEW_PRETRAINED_CONFIG_ARCHIVE_MAP = {
27
+ "asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
28
+ # See all SEW models at https://huggingface.co/models?filter=sew
29
+ }
30
+
31
+
32
+ class SEWConfig(PretrainedConfig):
33
+ r"""
34
+ This is the configuration class to store the configuration of a [`SEWModel`]. It is used to instantiate a SEW model
35
+ according to the specified arguments, defining the model architecture. Instantiating a configuration with the
36
+ defaults will yield a similar configuration to that of the SEW
37
+ [asapp/sew-tiny-100k](https://huggingface.co/asapp/sew-tiny-100k) architecture.
38
+
39
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
40
+ documentation from [`PretrainedConfig`] for more information.
41
+
42
+
43
+ Args:
44
+ vocab_size (`int`, *optional*, defaults to 32):
45
+ Vocabulary size of the SEW model. Defines the number of different tokens that can be represented by the
46
+ `inputs_ids` passed when calling [`SEW`].
47
+ hidden_size (`int`, *optional*, defaults to 768):
48
+ Dimensionality of the encoder layers and the pooler layer.
49
+ num_hidden_layers (`int`, *optional*, defaults to 12):
50
+ Number of hidden layers in the Transformer encoder.
51
+ num_attention_heads (`int`, *optional*, defaults to 12):
52
+ Number of attention heads for each attention layer in the Transformer encoder.
53
+ intermediate_size (`int`, *optional*, defaults to 3072):
54
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
55
+ squeeze_factor (`int`, *optional*, defaults to 2):
56
+ Sequence length downsampling factor after the encoder and upsampling factor after the transformer.
57
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
58
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
59
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
60
+ hidden_dropout (`float`, *optional*, defaults to 0.1):
61
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
62
+ activation_dropout (`float`, *optional*, defaults to 0.1):
63
+ The dropout ratio for activations inside the fully connected layer.
64
+ attention_dropout (`float`, *optional*, defaults to 0.1):
65
+ The dropout ratio for the attention probabilities.
66
+ final_dropout (`float`, *optional*, defaults to 0.1):
67
+ The dropout probability for the final projection layer of [`SEWForCTC`].
68
+ layerdrop (`float`, *optional*, defaults to 0.1):
69
+ The LayerDrop probability. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more
70
+ details.
71
+ initializer_range (`float`, *optional*, defaults to 0.02):
72
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
73
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
74
+ The epsilon used by the layer normalization layers.
75
+ feat_extract_norm (`str`, *optional*, defaults to `"group"`):
76
+ The norm to be applied to 1D convolutional layers in feature encoder. One of `"group"` for group
77
+ normalization of only the first 1D convolutional layer or `"layer"` for layer normalization of all 1D
78
+ convolutional layers.
79
+ feat_proj_dropout (`float`, *optional*, defaults to 0.0):
80
+ The dropout probability for output of the feature encoder.
81
+ feat_extract_activation (`str, `optional`, defaults to `"gelu"`):
82
+ The non-linear activation function (function or string) in the 1D convolutional layers of the feature
83
+ extractor. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported.
84
+ conv_dim (`Tuple[int]` or `List[int]`, *optional*, defaults to `(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512)`):
85
+ A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the
86
+ feature encoder. The length of *conv_dim* defines the number of 1D convolutional layers.
87
+ conv_stride (`Tuple[int]` or `List[int]`, *optional*, defaults to `(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1)`):
88
+ A tuple of integers defining the stride of each 1D convolutional layer in the feature encoder. The length
89
+ of *conv_stride* defines the number of convolutional layers and has to match the length of *conv_dim*.
90
+ conv_kernel (`Tuple[int]` or `List[int]`, *optional*, defaults to `(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1)`):
91
+ A tuple of integers defining the kernel size of each 1D convolutional layer in the feature encoder. The
92
+ length of *conv_kernel* defines the number of convolutional layers and has to match the length of
93
+ *conv_dim*.
94
+ conv_bias (`bool`, *optional*, defaults to `False`):
95
+ Whether the 1D convolutional layers have a bias.
96
+ num_conv_pos_embeddings (`int`, *optional*, defaults to 128):
97
+ Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional
98
+ embeddings layer.
99
+ num_conv_pos_embedding_groups (`int`, *optional*, defaults to 16):
100
+ Number of groups of 1D convolutional positional embeddings layer.
101
+ apply_spec_augment (`bool`, *optional*, defaults to `True`):
102
+ Whether to apply *SpecAugment* data augmentation to the outputs of the feature encoder. For reference see
103
+ [SpecAugment: A Simple Data Augmentation Method for Automatic Speech
104
+ Recognition](https://arxiv.org/abs/1904.08779).
105
+ mask_time_prob (`float`, *optional*, defaults to 0.05):
106
+ Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking
107
+ procecure generates ''mask_time_prob*len(time_axis)/mask_time_length'' independent masks over the axis. If
108
+ reasoning from the propability of each feature vector to be chosen as the start of the vector span to be
109
+ masked, *mask_time_prob* should be `prob_vector_start*mask_time_length`. Note that overlap may decrease the
110
+ actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`.
111
+ mask_time_length (`int`, *optional*, defaults to 10):
112
+ Length of vector span along the time axis.
113
+ mask_time_min_masks (`int`, *optional*, defaults to 2),:
114
+ The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step,
115
+ irrespectively of `mask_feature_prob`. Only relevant if ''mask_time_prob*len(time_axis)/mask_time_length <
116
+ mask_time_min_masks''
117
+ mask_feature_prob (`float`, *optional*, defaults to 0.0):
118
+ Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The
119
+ masking procecure generates ''mask_feature_prob*len(feature_axis)/mask_time_length'' independent masks over
120
+ the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector
121
+ span to be masked, *mask_feature_prob* should be `prob_vector_start*mask_feature_length`. Note that overlap
122
+ may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is
123
+ True`.
124
+ mask_feature_length (`int`, *optional*, defaults to 10):
125
+ Length of vector span along the feature axis.
126
+ mask_feature_min_masks (`int`, *optional*, defaults to 0),:
127
+ The minimum number of masks of length `mask_feature_length` generated along the feature axis, each time
128
+ step, irrespectively of `mask_feature_prob`. Only relevant if
129
+ ''mask_feature_prob*len(feature_axis)/mask_feature_length < mask_feature_min_masks''
130
+ ctc_loss_reduction (`str`, *optional*, defaults to `"sum"`):
131
+ Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an
132
+ instance of [`SEWForCTC`].
133
+ ctc_zero_infinity (`bool`, *optional*, defaults to `False`):
134
+ Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly
135
+ occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance
136
+ of [`SEWForCTC`].
137
+ use_weighted_layer_sum (`bool`, *optional*, defaults to `False`):
138
+ Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an
139
+ instance of [`Wav2Vec2ForSequenceClassification`].
140
+ classifier_proj_size (`int`, *optional*, defaults to 256):
141
+ Dimensionality of the projection before token mean-pooling for classification.
142
+
143
+ Example:
144
+
145
+ ```python
146
+ >>> from transformers import SEWConfig, SEWModel
147
+
148
+ >>> # Initializing a SEW asapp/sew-tiny-100k style configuration
149
+ >>> configuration = SEWConfig()
150
+
151
+ >>> # Initializing a model (with random weights) from the asapp/sew-tiny-100k style configuration
152
+ >>> model = SEWModel(configuration)
153
+
154
+ >>> # Accessing the model configuration
155
+ >>> configuration = model.config
156
+ ```"""
157
+
158
+ model_type = "sew"
159
+
160
+ def __init__(
161
+ self,
162
+ vocab_size=32,
163
+ hidden_size=768,
164
+ num_hidden_layers=12,
165
+ num_attention_heads=12,
166
+ intermediate_size=3072,
167
+ squeeze_factor=2,
168
+ hidden_act="gelu",
169
+ hidden_dropout=0.1,
170
+ activation_dropout=0.1,
171
+ attention_dropout=0.1,
172
+ feat_proj_dropout=0.0,
173
+ final_dropout=0.1,
174
+ layerdrop=0.1,
175
+ initializer_range=0.02,
176
+ layer_norm_eps=1e-5,
177
+ feat_extract_norm="group",
178
+ feat_extract_activation="gelu",
179
+ conv_dim=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512),
180
+ conv_stride=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1),
181
+ conv_kernel=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1),
182
+ conv_bias=False,
183
+ num_conv_pos_embeddings=128,
184
+ num_conv_pos_embedding_groups=16,
185
+ apply_spec_augment=True,
186
+ mask_time_prob=0.05,
187
+ mask_time_length=10,
188
+ mask_time_min_masks=2,
189
+ mask_feature_prob=0.0,
190
+ mask_feature_length=10,
191
+ mask_feature_min_masks=0,
192
+ ctc_loss_reduction="mean",
193
+ ctc_zero_infinity=False,
194
+ use_weighted_layer_sum=False,
195
+ classifier_proj_size=256,
196
+ pad_token_id=0,
197
+ bos_token_id=1,
198
+ eos_token_id=2,
199
+ **kwargs,
200
+ ):
201
+ super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id)
202
+ self.hidden_size = hidden_size
203
+ self.feat_extract_norm = feat_extract_norm
204
+ self.feat_extract_activation = feat_extract_activation
205
+ self.conv_dim = list(conv_dim)
206
+ self.conv_stride = list(conv_stride)
207
+ self.conv_kernel = list(conv_kernel)
208
+ self.conv_bias = conv_bias
209
+ self.num_conv_pos_embeddings = num_conv_pos_embeddings
210
+ self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups
211
+ self.num_feat_extract_layers = len(self.conv_dim)
212
+ self.num_hidden_layers = num_hidden_layers
213
+ self.intermediate_size = intermediate_size
214
+ self.squeeze_factor = squeeze_factor
215
+ self.hidden_act = hidden_act
216
+ self.num_attention_heads = num_attention_heads
217
+ self.hidden_dropout = hidden_dropout
218
+ self.attention_dropout = attention_dropout
219
+ self.activation_dropout = activation_dropout
220
+ self.feat_proj_dropout = feat_proj_dropout
221
+ self.final_dropout = final_dropout
222
+ self.layerdrop = layerdrop
223
+ self.layer_norm_eps = layer_norm_eps
224
+ self.initializer_range = initializer_range
225
+ self.vocab_size = vocab_size
226
+
227
+ if (
228
+ (len(self.conv_stride) != self.num_feat_extract_layers)
229
+ or (len(self.conv_kernel) != self.num_feat_extract_layers)
230
+ or (len(self.conv_dim) != self.num_feat_extract_layers)
231
+ ):
232
+ raise ValueError(
233
+ "Configuration for convolutional layers is incorrect. "
234
+ "It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`, "
235
+ f"but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride) "
236
+ f"= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`."
237
+ )
238
+
239
+ # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
240
+ self.apply_spec_augment = apply_spec_augment
241
+ self.mask_time_prob = mask_time_prob
242
+ self.mask_time_length = mask_time_length
243
+ self.mask_time_min_masks = mask_time_min_masks
244
+ self.mask_feature_prob = mask_feature_prob
245
+ self.mask_feature_length = mask_feature_length
246
+ self.mask_feature_min_masks = mask_feature_min_masks
247
+
248
+ # ctc loss
249
+ self.ctc_loss_reduction = ctc_loss_reduction
250
+ self.ctc_zero_infinity = ctc_zero_infinity
251
+
252
+ # sequence classification
253
+ self.use_weighted_layer_sum = use_weighted_layer_sum
254
+ self.classifier_proj_size = classifier_proj_size
255
+
256
+ @property
257
+ def inputs_to_logits_ratio(self):
258
+ return functools.reduce(operator.mul, self.conv_stride, 1)
llava_next/lib/python3.10/site-packages/transformers/models/sew/convert_sew_original_pytorch_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,306 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert SEW checkpoint."""
16
+
17
+
18
+ import argparse
19
+ import json
20
+ import os
21
+
22
+ import fairseq
23
+ import torch
24
+ from fairseq.data import Dictionary
25
+
26
+ # Register SEW's fairseq modules
27
+ from sew_asapp import tasks # noqa: F401
28
+
29
+ from transformers import (
30
+ SEWConfig,
31
+ SEWForCTC,
32
+ SEWModel,
33
+ Wav2Vec2CTCTokenizer,
34
+ Wav2Vec2FeatureExtractor,
35
+ Wav2Vec2Processor,
36
+ logging,
37
+ )
38
+
39
+
40
+ logging.set_verbosity_info()
41
+ logger = logging.get_logger(__name__)
42
+
43
+ MAPPING = {
44
+ "post_extract_proj": "feature_projection",
45
+ "encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
46
+ "self_attn.k_proj": "encoder.layers.*.attention.k_proj",
47
+ "self_attn.v_proj": "encoder.layers.*.attention.v_proj",
48
+ "self_attn.q_proj": "encoder.layers.*.attention.q_proj",
49
+ "self_attn.out_proj": "encoder.layers.*.attention.out_proj",
50
+ "self_attn_layer_norm": "encoder.layers.*.layer_norm",
51
+ "fc1": "encoder.layers.*.feed_forward.intermediate_dense",
52
+ "fc2": "encoder.layers.*.feed_forward.output_dense",
53
+ "final_layer_norm": "encoder.layers.*.final_layer_norm",
54
+ "encoder.upsample.0": "encoder.upsample.projection",
55
+ "encoder.layer_norm": "encoder.layer_norm",
56
+ "w2v_model.layer_norm": "layer_norm",
57
+ "w2v_encoder.proj": "lm_head",
58
+ "mask_emb": "masked_spec_embed",
59
+ }
60
+
61
+
62
+ def set_recursively(hf_pointer, key, value, full_name, weight_type):
63
+ for attribute in key.split("."):
64
+ hf_pointer = getattr(hf_pointer, attribute)
65
+
66
+ if weight_type is not None:
67
+ hf_shape = getattr(hf_pointer, weight_type).shape
68
+ else:
69
+ hf_shape = hf_pointer.shape
70
+
71
+ assert hf_shape == value.shape, (
72
+ f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
73
+ f" {value.shape} for {full_name}"
74
+ )
75
+
76
+ if weight_type == "weight":
77
+ hf_pointer.weight.data = value
78
+ elif weight_type == "weight_g":
79
+ hf_pointer.weight_g.data = value
80
+ elif weight_type == "weight_v":
81
+ hf_pointer.weight_v.data = value
82
+ elif weight_type == "bias":
83
+ hf_pointer.bias.data = value
84
+ else:
85
+ hf_pointer.data = value
86
+
87
+ logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.")
88
+
89
+
90
+ def recursively_load_weights(fairseq_model, hf_model, is_finetuned):
91
+ unused_weights = []
92
+ fairseq_dict = fairseq_model.state_dict()
93
+
94
+ feature_extractor = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
95
+
96
+ for name, value in fairseq_dict.items():
97
+ is_used = False
98
+ if "conv_layers" in name:
99
+ load_conv_layer(
100
+ name,
101
+ value,
102
+ feature_extractor,
103
+ unused_weights,
104
+ hf_model.config.feat_extract_norm == "group",
105
+ )
106
+ is_used = True
107
+ else:
108
+ for key, mapped_key in MAPPING.items():
109
+ mapped_key = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
110
+
111
+ if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
112
+ is_used = True
113
+ if "*" in mapped_key:
114
+ layer_index = name.split(key)[0].split(".")[-2]
115
+ mapped_key = mapped_key.replace("*", layer_index)
116
+ if "weight_g" in name:
117
+ weight_type = "weight_g"
118
+ elif "weight_v" in name:
119
+ weight_type = "weight_v"
120
+ elif "weight" in name:
121
+ weight_type = "weight"
122
+ elif "bias" in name:
123
+ weight_type = "bias"
124
+ else:
125
+ weight_type = None
126
+ set_recursively(hf_model, mapped_key, value, name, weight_type)
127
+ continue
128
+ if not is_used:
129
+ unused_weights.append(name)
130
+
131
+ logger.warning(f"Unused weights: {unused_weights}")
132
+
133
+
134
+ def load_conv_layer(full_name, value, feature_extractor, unused_weights, use_group_norm):
135
+ name = full_name.split("conv_layers.")[-1]
136
+ items = name.split(".")
137
+ layer_id = int(items[0])
138
+ type_id = int(items[1])
139
+
140
+ if type_id == 0:
141
+ if "bias" in name:
142
+ assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
143
+ f"{full_name} has size {value.shape}, but"
144
+ f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
145
+ )
146
+ feature_extractor.conv_layers[layer_id].conv.bias.data = value
147
+ logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
148
+ elif "weight" in name:
149
+ assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
150
+ f"{full_name} has size {value.shape}, but"
151
+ f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
152
+ )
153
+ feature_extractor.conv_layers[layer_id].conv.weight.data = value
154
+ logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
155
+ elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
156
+ if "bias" in name:
157
+ assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
158
+ f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
159
+ " found."
160
+ )
161
+ feature_extractor.conv_layers[layer_id].layer_norm.bias.data = value
162
+ logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
163
+ elif "weight" in name:
164
+ assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
165
+ f"{full_name} has size {value.shape}, but"
166
+ f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
167
+ )
168
+ feature_extractor.conv_layers[layer_id].layer_norm.weight.data = value
169
+ logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
170
+ else:
171
+ unused_weights.append(full_name)
172
+
173
+
174
+ def convert_config(model, is_finetuned):
175
+ config = SEWConfig()
176
+ if is_finetuned:
177
+ fs_config = model.w2v_encoder.w2v_model.cfg
178
+ else:
179
+ fs_config = model.cfg
180
+
181
+ config.conv_bias = fs_config.conv_bias
182
+ conv_layers = eval(fs_config.conv_feature_layers)
183
+ config.conv_dim = [x[0] for x in conv_layers]
184
+ config.conv_kernel = [x[1] for x in conv_layers]
185
+ config.conv_stride = [x[2] for x in conv_layers]
186
+ config.feat_extract_activation = "gelu"
187
+ config.feat_extract_norm = "layer" if fs_config.extractor_mode == "layer_norm" else "group"
188
+ config.final_dropout = 0.0
189
+ config.hidden_act = fs_config.activation_fn.name
190
+ config.hidden_size = fs_config.encoder_embed_dim
191
+ config.initializer_range = 0.02
192
+ config.intermediate_size = fs_config.encoder_ffn_embed_dim
193
+ config.layer_norm_eps = 1e-5
194
+ config.layerdrop = fs_config.encoder_layerdrop
195
+ config.num_attention_heads = fs_config.encoder_attention_heads
196
+ config.num_conv_pos_embedding_groups = fs_config.conv_pos_groups
197
+ config.num_conv_pos_embeddings = fs_config.conv_pos
198
+ config.num_feat_extract_layers = len(conv_layers)
199
+ config.num_hidden_layers = fs_config.encoder_layers
200
+ config.squeeze_factor = fs_config.squeeze_factor
201
+
202
+ # take care of any params that are overridden by the Wav2VecCtc model
203
+ if is_finetuned:
204
+ fs_config = model.cfg
205
+ config.final_dropout = fs_config.final_dropout
206
+ config.layerdrop = fs_config.layerdrop
207
+ config.activation_dropout = fs_config.activation_dropout
208
+ config.apply_spec_augment = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
209
+ config.attention_dropout = fs_config.attention_dropout
210
+ config.feat_proj_dropout = fs_config.dropout_input
211
+ config.hidden_dropout = fs_config.dropout
212
+ config.mask_feature_length = fs_config.mask_channel_length
213
+ config.mask_feature_prob = fs_config.mask_channel_prob
214
+ config.mask_time_length = fs_config.mask_length
215
+ config.mask_time_prob = fs_config.mask_prob
216
+
217
+ config.feature_extractor_type = "Wav2Vec2FeatureExtractor"
218
+ config.tokenizer_class = "Wav2Vec2CTCTokenizer"
219
+
220
+ return config
221
+
222
+
223
+ @torch.no_grad()
224
+ def convert_sew_checkpoint(
225
+ checkpoint_path, pytorch_dump_folder_path, config_path=None, dict_path=None, is_finetuned=True
226
+ ):
227
+ """
228
+ Copy/paste/tweak model's weights to transformers design.
229
+ """
230
+
231
+ if is_finetuned:
232
+ model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
233
+ [checkpoint_path], arg_overrides={"data": "/".join(dict_path.split("/")[:-1])}
234
+ )
235
+ else:
236
+ model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path])
237
+
238
+ if config_path is not None:
239
+ config = SEWConfig.from_pretrained(config_path)
240
+ else:
241
+ config = convert_config(model[0], is_finetuned)
242
+ model = model[0].eval()
243
+
244
+ return_attention_mask = True if config.feat_extract_norm == "layer" else False
245
+ feature_extractor = Wav2Vec2FeatureExtractor(
246
+ feature_size=1,
247
+ sampling_rate=16000,
248
+ padding_value=0,
249
+ do_normalize=True,
250
+ return_attention_mask=return_attention_mask,
251
+ )
252
+
253
+ if is_finetuned:
254
+ if dict_path:
255
+ target_dict = Dictionary.load(dict_path)
256
+
257
+ # important change bos & pad token id since CTC symbol is <pad> and
258
+ # not <s> as in fairseq
259
+ target_dict.indices[target_dict.bos_word] = target_dict.pad_index
260
+ target_dict.indices[target_dict.pad_word] = target_dict.bos_index
261
+ config.bos_token_id = target_dict.pad_index
262
+ config.pad_token_id = target_dict.bos_index
263
+ config.eos_token_id = target_dict.eos_index
264
+ config.vocab_size = len(target_dict.symbols)
265
+ vocab_path = os.path.join(pytorch_dump_folder_path, "vocab.json")
266
+ if not os.path.isdir(pytorch_dump_folder_path):
267
+ logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(pytorch_dump_folder_path))
268
+ return
269
+ os.makedirs(pytorch_dump_folder_path, exist_ok=True)
270
+ with open(vocab_path, "w", encoding="utf-8") as vocab_handle:
271
+ json.dump(target_dict.indices, vocab_handle)
272
+ tokenizer = Wav2Vec2CTCTokenizer(
273
+ vocab_path,
274
+ unk_token=target_dict.unk_word,
275
+ pad_token=target_dict.pad_word,
276
+ bos_token=target_dict.bos_word,
277
+ eos_token=target_dict.eos_word,
278
+ word_delimiter_token="|",
279
+ do_lower_case=False,
280
+ )
281
+ processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)
282
+ processor.save_pretrained(pytorch_dump_folder_path)
283
+
284
+ hf_model = SEWForCTC(config)
285
+ else:
286
+ hf_model = SEWModel(config)
287
+ feature_extractor.save_pretrained(pytorch_dump_folder_path)
288
+
289
+ recursively_load_weights(model, hf_model, is_finetuned)
290
+
291
+ hf_model.save_pretrained(pytorch_dump_folder_path)
292
+
293
+
294
+ if __name__ == "__main__":
295
+ parser = argparse.ArgumentParser()
296
+ parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
297
+ parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
298
+ parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
299
+ parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
300
+ parser.add_argument(
301
+ "--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
302
+ )
303
+ args = parser.parse_args()
304
+ convert_sew_checkpoint(
305
+ args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
306
+ )
llava_next/lib/python3.10/site-packages/transformers/models/sew/modeling_sew.py ADDED
@@ -0,0 +1,1230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 ASAPP Inc. and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch SEW model."""
16
+
17
+ import math
18
+ import warnings
19
+ from typing import Optional, Tuple, Union
20
+
21
+ import numpy as np
22
+ import torch
23
+ import torch.utils.checkpoint
24
+ from torch import nn
25
+ from torch.nn import CrossEntropyLoss
26
+
27
+ from ...activations import ACT2FN
28
+ from ...integrations.deepspeed import is_deepspeed_zero3_enabled
29
+ from ...modeling_outputs import BaseModelOutput, CausalLMOutput, SequenceClassifierOutput
30
+ from ...modeling_utils import PreTrainedModel
31
+ from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
32
+ from .configuration_sew import SEWConfig
33
+
34
+
35
+ logger = logging.get_logger(__name__)
36
+
37
+
38
+ _HIDDEN_STATES_START_POSITION = 1
39
+
40
+ # General docstring
41
+ _CONFIG_FOR_DOC = "SEWConfig"
42
+
43
+ # Base docstring
44
+ _CHECKPOINT_FOR_DOC = "asapp/sew-tiny-100k-ft-ls100h"
45
+ _EXPECTED_OUTPUT_SHAPE = [1, 292, 512]
46
+
47
+ # CTC docstring
48
+ _CTC_EXPECTED_OUTPUT = (
49
+ "'MISTER QUILTER IS THE APPOSTILE OF THE MIDDLE CLASSES AND WE ARE GLAD TO WELCOME HIS GOSPOLLE'"
50
+ )
51
+ _CTC_EXPECTED_LOSS = 0.42
52
+
53
+ # Audio class docstring
54
+ _SEQ_CLASS_CHECKPOINT = "anton-l/sew-mid-100k-ft-keyword-spotting"
55
+ _SEQ_CLASS_EXPECTED_OUTPUT = "'_unknown_'"
56
+ _SEQ_CLASS_EXPECTED_LOSS = 9.52
57
+
58
+ SEW_PRETRAINED_MODEL_ARCHIVE_LIST = [
59
+ "asapp/sew-tiny-100k",
60
+ "asapp/sew-small-100k",
61
+ "asapp/sew-mid-100k",
62
+ # See all SEW models at https://huggingface.co/models?filter=sew
63
+ ]
64
+
65
+
66
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2._compute_mask_indices
67
+ def _compute_mask_indices(
68
+ shape: Tuple[int, int],
69
+ mask_prob: float,
70
+ mask_length: int,
71
+ attention_mask: Optional[torch.LongTensor] = None,
72
+ min_masks: int = 0,
73
+ ) -> np.ndarray:
74
+ """
75
+ Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for
76
+ ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on
77
+ CPU as part of the preprocessing during training.
78
+
79
+ Args:
80
+ shape: The shape for which to compute masks. This should be of a tuple of size 2 where
81
+ the first element is the batch size and the second element is the length of the axis to span.
82
+ mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of
83
+ independently generated mask spans of length `mask_length` is computed by
84
+ `mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the
85
+ actual percentage will be smaller.
86
+ mask_length: size of the mask
87
+ min_masks: minimum number of masked spans
88
+ attention_mask: A (right-padded) attention mask which independently shortens the feature axis of
89
+ each batch dimension.
90
+ """
91
+ batch_size, sequence_length = shape
92
+
93
+ if mask_length < 1:
94
+ raise ValueError("`mask_length` has to be bigger than 0.")
95
+
96
+ if mask_length > sequence_length:
97
+ raise ValueError(
98
+ f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}"
99
+ f" and `sequence_length`: {sequence_length}`"
100
+ )
101
+
102
+ # epsilon is used for probabilistic rounding
103
+ epsilon = np.random.rand(1).item()
104
+
105
+ def compute_num_masked_span(input_length):
106
+ """Given input length, compute how many spans should be masked"""
107
+ num_masked_span = int(mask_prob * input_length / mask_length + epsilon)
108
+ num_masked_span = max(num_masked_span, min_masks)
109
+
110
+ # make sure num masked span <= sequence_length
111
+ if num_masked_span * mask_length > sequence_length:
112
+ num_masked_span = sequence_length // mask_length
113
+
114
+ # make sure num_masked span is also <= input_length - (mask_length - 1)
115
+ if input_length - (mask_length - 1) < num_masked_span:
116
+ num_masked_span = max(input_length - (mask_length - 1), 0)
117
+
118
+ return num_masked_span
119
+
120
+ # compute number of masked spans in batch
121
+ input_lengths = (
122
+ attention_mask.sum(-1).detach().tolist()
123
+ if attention_mask is not None
124
+ else [sequence_length for _ in range(batch_size)]
125
+ )
126
+
127
+ # SpecAugment mask to fill
128
+ spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=bool)
129
+ spec_aug_mask_idxs = []
130
+
131
+ max_num_masked_span = compute_num_masked_span(sequence_length)
132
+
133
+ if max_num_masked_span == 0:
134
+ return spec_aug_mask
135
+
136
+ for input_length in input_lengths:
137
+ # compute num of masked spans for this input
138
+ num_masked_span = compute_num_masked_span(input_length)
139
+
140
+ # get random indices to mask
141
+ spec_aug_mask_idx = np.random.choice(
142
+ np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False
143
+ )
144
+
145
+ # pick first sampled index that will serve as a dummy index to pad vector
146
+ # to ensure same dimension for all batches due to probabilistic rounding
147
+ # Picking first sample just pads those vectors twice.
148
+ if len(spec_aug_mask_idx) == 0:
149
+ # this case can only happen if `input_length` is strictly smaller then
150
+ # `sequence_length` in which case the last token has to be a padding
151
+ # token which we can use as a dummy mask id
152
+ dummy_mask_idx = sequence_length - 1
153
+ else:
154
+ dummy_mask_idx = spec_aug_mask_idx[0]
155
+
156
+ spec_aug_mask_idx = np.concatenate(
157
+ [spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx]
158
+ )
159
+ spec_aug_mask_idxs.append(spec_aug_mask_idx)
160
+
161
+ spec_aug_mask_idxs = np.array(spec_aug_mask_idxs)
162
+
163
+ # expand masked indices to masked spans
164
+ spec_aug_mask_idxs = np.broadcast_to(
165
+ spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length)
166
+ )
167
+ spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length)
168
+
169
+ # add offset to the starting indexes so that indexes now create a span
170
+ offsets = np.arange(mask_length)[None, None, :]
171
+ offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape(
172
+ batch_size, max_num_masked_span * mask_length
173
+ )
174
+ spec_aug_mask_idxs = spec_aug_mask_idxs + offsets
175
+
176
+ # ensure that we cannot have indices larger than sequence_length
177
+ if spec_aug_mask_idxs.max() > sequence_length - 1:
178
+ spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1
179
+
180
+ # scatter indices to mask
181
+ np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1)
182
+
183
+ return spec_aug_mask
184
+
185
+
186
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2NoLayerNormConvLayer with Wav2Vec2->SEW
187
+ class SEWNoLayerNormConvLayer(nn.Module):
188
+ def __init__(self, config, layer_id=0):
189
+ super().__init__()
190
+ self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
191
+ self.out_conv_dim = config.conv_dim[layer_id]
192
+
193
+ self.conv = nn.Conv1d(
194
+ self.in_conv_dim,
195
+ self.out_conv_dim,
196
+ kernel_size=config.conv_kernel[layer_id],
197
+ stride=config.conv_stride[layer_id],
198
+ bias=config.conv_bias,
199
+ )
200
+ self.activation = ACT2FN[config.feat_extract_activation]
201
+
202
+ def forward(self, hidden_states):
203
+ hidden_states = self.conv(hidden_states)
204
+ hidden_states = self.activation(hidden_states)
205
+ return hidden_states
206
+
207
+
208
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2LayerNormConvLayer with Wav2Vec2->SEW
209
+ class SEWLayerNormConvLayer(nn.Module):
210
+ def __init__(self, config, layer_id=0):
211
+ super().__init__()
212
+ self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
213
+ self.out_conv_dim = config.conv_dim[layer_id]
214
+
215
+ self.conv = nn.Conv1d(
216
+ self.in_conv_dim,
217
+ self.out_conv_dim,
218
+ kernel_size=config.conv_kernel[layer_id],
219
+ stride=config.conv_stride[layer_id],
220
+ bias=config.conv_bias,
221
+ )
222
+ self.layer_norm = nn.LayerNorm(self.out_conv_dim, elementwise_affine=True)
223
+ self.activation = ACT2FN[config.feat_extract_activation]
224
+
225
+ def forward(self, hidden_states):
226
+ hidden_states = self.conv(hidden_states)
227
+
228
+ hidden_states = hidden_states.transpose(-2, -1)
229
+ hidden_states = self.layer_norm(hidden_states)
230
+ hidden_states = hidden_states.transpose(-2, -1)
231
+
232
+ hidden_states = self.activation(hidden_states)
233
+ return hidden_states
234
+
235
+
236
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2GroupNormConvLayer with Wav2Vec2->SEW
237
+ class SEWGroupNormConvLayer(nn.Module):
238
+ def __init__(self, config, layer_id=0):
239
+ super().__init__()
240
+ self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
241
+ self.out_conv_dim = config.conv_dim[layer_id]
242
+
243
+ self.conv = nn.Conv1d(
244
+ self.in_conv_dim,
245
+ self.out_conv_dim,
246
+ kernel_size=config.conv_kernel[layer_id],
247
+ stride=config.conv_stride[layer_id],
248
+ bias=config.conv_bias,
249
+ )
250
+ self.activation = ACT2FN[config.feat_extract_activation]
251
+
252
+ self.layer_norm = nn.GroupNorm(num_groups=self.out_conv_dim, num_channels=self.out_conv_dim, affine=True)
253
+
254
+ def forward(self, hidden_states):
255
+ hidden_states = self.conv(hidden_states)
256
+ hidden_states = self.layer_norm(hidden_states)
257
+ hidden_states = self.activation(hidden_states)
258
+ return hidden_states
259
+
260
+
261
+ class SEWPositionalConvEmbedding(nn.Module):
262
+ def __init__(self, config):
263
+ super().__init__()
264
+ self.conv = nn.Conv1d(
265
+ config.hidden_size,
266
+ config.hidden_size,
267
+ kernel_size=config.num_conv_pos_embeddings,
268
+ padding=config.num_conv_pos_embeddings // 2,
269
+ groups=config.num_conv_pos_embedding_groups,
270
+ stride=config.squeeze_factor,
271
+ )
272
+
273
+ if is_deepspeed_zero3_enabled():
274
+ import deepspeed
275
+
276
+ with deepspeed.zero.GatheredParameters(self.conv.weight, modifier_rank=0):
277
+ self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2)
278
+ deepspeed.zero.register_external_parameter(self, self.conv.weight_v)
279
+ deepspeed.zero.register_external_parameter(self, self.conv.weight_g)
280
+ else:
281
+ self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2)
282
+
283
+ self.padding = SEWSamePadLayer(config.num_conv_pos_embeddings)
284
+ self.activation = ACT2FN[config.feat_extract_activation]
285
+
286
+ def forward(self, hidden_states):
287
+ hidden_states = self.conv(hidden_states)
288
+ hidden_states = self.padding(hidden_states)
289
+ hidden_states = self.activation(hidden_states)
290
+
291
+ return hidden_states
292
+
293
+
294
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2SamePadLayer with Wav2Vec2->SEW
295
+ class SEWSamePadLayer(nn.Module):
296
+ def __init__(self, num_conv_pos_embeddings):
297
+ super().__init__()
298
+ self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0
299
+
300
+ def forward(self, hidden_states):
301
+ if self.num_pad_remove > 0:
302
+ hidden_states = hidden_states[:, :, : -self.num_pad_remove]
303
+ return hidden_states
304
+
305
+
306
+ class SEWUpsampling(nn.Module):
307
+ def __init__(self, config):
308
+ super().__init__()
309
+ self.projection = nn.Linear(config.hidden_size, config.hidden_size * config.squeeze_factor)
310
+ self.activation = ACT2FN[config.feat_extract_activation]
311
+ self.squeeze_factor = config.squeeze_factor
312
+
313
+ def forward(self, hidden_states):
314
+ hidden_states = self.projection(hidden_states)
315
+ hidden_states = self.activation(hidden_states)
316
+
317
+ if self.squeeze_factor > 1:
318
+ # transform embedding channels to sequence length
319
+ bsz, src_len, src_embed_dim = hidden_states.size()
320
+ tgt_len = src_len * self.squeeze_factor
321
+ tgt_embed_dim = src_embed_dim // self.squeeze_factor
322
+ hidden_states = hidden_states.reshape(bsz, src_len, self.squeeze_factor, tgt_embed_dim)
323
+ hidden_states = hidden_states.reshape(bsz, tgt_len, tgt_embed_dim)
324
+
325
+ return hidden_states
326
+
327
+
328
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureEncoder with Wav2Vec2->SEW
329
+ class SEWFeatureEncoder(nn.Module):
330
+ """Construct the features from raw audio waveform"""
331
+
332
+ def __init__(self, config):
333
+ super().__init__()
334
+
335
+ if config.feat_extract_norm == "group":
336
+ conv_layers = [SEWGroupNormConvLayer(config, layer_id=0)] + [
337
+ SEWNoLayerNormConvLayer(config, layer_id=i + 1) for i in range(config.num_feat_extract_layers - 1)
338
+ ]
339
+ elif config.feat_extract_norm == "layer":
340
+ conv_layers = [SEWLayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers)]
341
+ else:
342
+ raise ValueError(
343
+ f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']"
344
+ )
345
+ self.conv_layers = nn.ModuleList(conv_layers)
346
+ self.gradient_checkpointing = False
347
+ self._requires_grad = True
348
+
349
+ def _freeze_parameters(self):
350
+ for param in self.parameters():
351
+ param.requires_grad = False
352
+ self._requires_grad = False
353
+
354
+ def forward(self, input_values):
355
+ hidden_states = input_values[:, None]
356
+
357
+ # make sure hidden_states require grad for gradient_checkpointing
358
+ if self._requires_grad and self.training:
359
+ hidden_states.requires_grad = True
360
+
361
+ for conv_layer in self.conv_layers:
362
+ if self._requires_grad and self.gradient_checkpointing and self.training:
363
+ hidden_states = self._gradient_checkpointing_func(
364
+ conv_layer.__call__,
365
+ hidden_states,
366
+ )
367
+ else:
368
+ hidden_states = conv_layer(hidden_states)
369
+
370
+ return hidden_states
371
+
372
+
373
+ class SEWFeatureExtractor(SEWFeatureEncoder):
374
+ def __init__(self, config):
375
+ super().__init__(config)
376
+ warnings.warn(
377
+ f"The class `{self.__class__.__name__}` has been depreciated "
378
+ "and will be removed in Transformers v5. "
379
+ f"Use `{self.__class__.__bases__[0].__name__}` instead.",
380
+ FutureWarning,
381
+ )
382
+
383
+
384
+ # Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->SEW
385
+ class SEWAttention(nn.Module):
386
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
387
+
388
+ def __init__(
389
+ self,
390
+ embed_dim: int,
391
+ num_heads: int,
392
+ dropout: float = 0.0,
393
+ is_decoder: bool = False,
394
+ bias: bool = True,
395
+ is_causal: bool = False,
396
+ config: Optional[SEWConfig] = None,
397
+ ):
398
+ super().__init__()
399
+ self.embed_dim = embed_dim
400
+ self.num_heads = num_heads
401
+ self.dropout = dropout
402
+ self.head_dim = embed_dim // num_heads
403
+ self.config = config
404
+
405
+ if (self.head_dim * num_heads) != self.embed_dim:
406
+ raise ValueError(
407
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
408
+ f" and `num_heads`: {num_heads})."
409
+ )
410
+ self.scaling = self.head_dim**-0.5
411
+ self.is_decoder = is_decoder
412
+ self.is_causal = is_causal
413
+
414
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
415
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
416
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
417
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
418
+
419
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
420
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
421
+
422
+ def forward(
423
+ self,
424
+ hidden_states: torch.Tensor,
425
+ key_value_states: Optional[torch.Tensor] = None,
426
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
427
+ attention_mask: Optional[torch.Tensor] = None,
428
+ layer_head_mask: Optional[torch.Tensor] = None,
429
+ output_attentions: bool = False,
430
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
431
+ """Input shape: Batch x Time x Channel"""
432
+
433
+ # if key_value_states are provided this layer is used as a cross-attention layer
434
+ # for the decoder
435
+ is_cross_attention = key_value_states is not None
436
+
437
+ bsz, tgt_len, _ = hidden_states.size()
438
+
439
+ # get query proj
440
+ query_states = self.q_proj(hidden_states) * self.scaling
441
+ # get key, value proj
442
+ # `past_key_value[0].shape[2] == key_value_states.shape[1]`
443
+ # is checking that the `sequence_length` of the `past_key_value` is the same as
444
+ # the provided `key_value_states` to support prefix tuning
445
+ if (
446
+ is_cross_attention
447
+ and past_key_value is not None
448
+ and past_key_value[0].shape[2] == key_value_states.shape[1]
449
+ ):
450
+ # reuse k,v, cross_attentions
451
+ key_states = past_key_value[0]
452
+ value_states = past_key_value[1]
453
+ elif is_cross_attention:
454
+ # cross_attentions
455
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
456
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
457
+ elif past_key_value is not None:
458
+ # reuse k, v, self_attention
459
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
460
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
461
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
462
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
463
+ else:
464
+ # self_attention
465
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
466
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
467
+
468
+ if self.is_decoder:
469
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
470
+ # Further calls to cross_attention layer can then reuse all cross-attention
471
+ # key/value_states (first "if" case)
472
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
473
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
474
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
475
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
476
+ past_key_value = (key_states, value_states)
477
+
478
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
479
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
480
+ key_states = key_states.reshape(*proj_shape)
481
+ value_states = value_states.reshape(*proj_shape)
482
+
483
+ src_len = key_states.size(1)
484
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
485
+
486
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
487
+ raise ValueError(
488
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
489
+ f" {attn_weights.size()}"
490
+ )
491
+
492
+ if attention_mask is not None:
493
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
494
+ raise ValueError(
495
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
496
+ )
497
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
498
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
499
+
500
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
501
+
502
+ if layer_head_mask is not None:
503
+ if layer_head_mask.size() != (self.num_heads,):
504
+ raise ValueError(
505
+ f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
506
+ f" {layer_head_mask.size()}"
507
+ )
508
+ attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
509
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
510
+
511
+ if output_attentions:
512
+ # this operation is a bit awkward, but it's required to
513
+ # make sure that attn_weights keeps its gradient.
514
+ # In order to do so, attn_weights have to be reshaped
515
+ # twice and have to be reused in the following
516
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
517
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
518
+ else:
519
+ attn_weights_reshaped = None
520
+
521
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
522
+
523
+ attn_output = torch.bmm(attn_probs, value_states)
524
+
525
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
526
+ raise ValueError(
527
+ f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
528
+ f" {attn_output.size()}"
529
+ )
530
+
531
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
532
+ attn_output = attn_output.transpose(1, 2)
533
+
534
+ # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
535
+ # partitioned across GPUs when using tensor-parallelism.
536
+ attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
537
+
538
+ attn_output = self.out_proj(attn_output)
539
+
540
+ return attn_output, attn_weights_reshaped, past_key_value
541
+
542
+
543
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeedForward with Wav2Vec2->SEW
544
+ class SEWFeedForward(nn.Module):
545
+ def __init__(self, config):
546
+ super().__init__()
547
+ self.intermediate_dropout = nn.Dropout(config.activation_dropout)
548
+
549
+ self.intermediate_dense = nn.Linear(config.hidden_size, config.intermediate_size)
550
+ if isinstance(config.hidden_act, str):
551
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
552
+ else:
553
+ self.intermediate_act_fn = config.hidden_act
554
+
555
+ self.output_dense = nn.Linear(config.intermediate_size, config.hidden_size)
556
+ self.output_dropout = nn.Dropout(config.hidden_dropout)
557
+
558
+ def forward(self, hidden_states):
559
+ hidden_states = self.intermediate_dense(hidden_states)
560
+ hidden_states = self.intermediate_act_fn(hidden_states)
561
+ hidden_states = self.intermediate_dropout(hidden_states)
562
+
563
+ hidden_states = self.output_dense(hidden_states)
564
+ hidden_states = self.output_dropout(hidden_states)
565
+ return hidden_states
566
+
567
+
568
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2EncoderLayer with Wav2Vec2->SEW
569
+ class SEWEncoderLayer(nn.Module):
570
+ def __init__(self, config):
571
+ super().__init__()
572
+ self.attention = SEWAttention(
573
+ embed_dim=config.hidden_size,
574
+ num_heads=config.num_attention_heads,
575
+ dropout=config.attention_dropout,
576
+ is_decoder=False,
577
+ )
578
+ self.dropout = nn.Dropout(config.hidden_dropout)
579
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
580
+ self.feed_forward = SEWFeedForward(config)
581
+ self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
582
+
583
+ def forward(self, hidden_states, attention_mask=None, output_attentions=False):
584
+ attn_residual = hidden_states
585
+ hidden_states, attn_weights, _ = self.attention(
586
+ hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
587
+ )
588
+ hidden_states = self.dropout(hidden_states)
589
+ hidden_states = attn_residual + hidden_states
590
+
591
+ hidden_states = self.layer_norm(hidden_states)
592
+ hidden_states = hidden_states + self.feed_forward(hidden_states)
593
+ hidden_states = self.final_layer_norm(hidden_states)
594
+
595
+ outputs = (hidden_states,)
596
+
597
+ if output_attentions:
598
+ outputs += (attn_weights,)
599
+
600
+ return outputs
601
+
602
+
603
+ class SEWEncoder(nn.Module):
604
+ def __init__(self, config):
605
+ super().__init__()
606
+ self.config = config
607
+ self.pos_conv_embed = SEWPositionalConvEmbedding(config)
608
+ self.pool = nn.AvgPool1d(config.squeeze_factor, config.squeeze_factor)
609
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
610
+ self.dropout = nn.Dropout(config.hidden_dropout)
611
+ self.layers = nn.ModuleList([SEWEncoderLayer(config) for _ in range(config.num_hidden_layers)])
612
+ self.upsample = SEWUpsampling(config)
613
+ self.gradient_checkpointing = False
614
+
615
+ def forward(
616
+ self,
617
+ hidden_states,
618
+ attention_mask=None,
619
+ output_attentions=False,
620
+ output_hidden_states=False,
621
+ return_dict=True,
622
+ ):
623
+ all_hidden_states = () if output_hidden_states else None
624
+ all_self_attentions = () if output_attentions else None
625
+
626
+ if attention_mask is not None:
627
+ # make sure padded tokens output 0
628
+ hidden_states[~attention_mask] = 0.0
629
+
630
+ input_lengths = (attention_mask.long()).sum(-1)
631
+ # apply pooling formula to get real output_lengths
632
+ output_lengths = input_lengths // self.config.squeeze_factor
633
+ max_encoder_length = hidden_states.shape[1] // self.config.squeeze_factor
634
+ attention_ids = (
635
+ torch.arange(0, max_encoder_length, device=output_lengths.device)
636
+ .view(1, -1)
637
+ .expand(output_lengths.shape[0], -1)
638
+ )
639
+ attention_mask = (attention_ids < output_lengths.view(-1, 1)).long()
640
+
641
+ # extend attention_mask
642
+ attention_mask = 1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)
643
+ attention_mask = attention_mask * torch.finfo(hidden_states.dtype).min
644
+ attention_mask = attention_mask.expand(
645
+ attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]
646
+ )
647
+
648
+ n_input_timesteps = hidden_states.shape[1]
649
+
650
+ hidden_states = hidden_states.transpose(1, 2)
651
+ position_embeddings = self.pos_conv_embed(hidden_states)
652
+ pooled_hidden_states = self.pool(hidden_states)
653
+ min_length = min(position_embeddings.size(-1), pooled_hidden_states.size(-1))
654
+ hidden_states = pooled_hidden_states[..., :min_length] + position_embeddings[..., :min_length]
655
+ hidden_states = hidden_states.transpose(1, 2)
656
+
657
+ hidden_states = self.layer_norm(hidden_states)
658
+ hidden_states = self.dropout(hidden_states)
659
+
660
+ deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()
661
+
662
+ for layer in self.layers:
663
+ if output_hidden_states:
664
+ all_hidden_states = all_hidden_states + (hidden_states,)
665
+
666
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
667
+ dropout_probability = torch.rand([])
668
+
669
+ skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False
670
+ if not skip_the_layer or deepspeed_zero3_is_enabled:
671
+ # under deepspeed zero3 all gpus must run in sync
672
+ if self.gradient_checkpointing and self.training:
673
+ layer_outputs = self._gradient_checkpointing_func(
674
+ layer.__call__,
675
+ hidden_states,
676
+ attention_mask,
677
+ output_attentions,
678
+ )
679
+ else:
680
+ layer_outputs = layer(
681
+ hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
682
+ )
683
+ hidden_states = layer_outputs[0]
684
+
685
+ if skip_the_layer:
686
+ layer_outputs = (None, None)
687
+
688
+ if output_attentions:
689
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
690
+
691
+ if output_hidden_states:
692
+ all_hidden_states = all_hidden_states + (hidden_states,)
693
+
694
+ hidden_states = self.upsample(hidden_states)
695
+ if hidden_states.shape[1] < n_input_timesteps:
696
+ hidden_states = nn.functional.pad(hidden_states, (0, 0, 0, n_input_timesteps - hidden_states.shape[1]))
697
+
698
+ if not return_dict:
699
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
700
+ return BaseModelOutput(
701
+ last_hidden_state=hidden_states,
702
+ hidden_states=all_hidden_states,
703
+ attentions=all_self_attentions,
704
+ )
705
+
706
+
707
+ class SEWPreTrainedModel(PreTrainedModel):
708
+ """
709
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
710
+ models.
711
+ """
712
+
713
+ config_class = SEWConfig
714
+ base_model_prefix = "sew"
715
+ main_input_name = "input_values"
716
+ supports_gradient_checkpointing = True
717
+
718
+ def _init_weights(self, module):
719
+ """Initialize the weights"""
720
+ if isinstance(module, SEWPositionalConvEmbedding):
721
+ nn.init.normal_(
722
+ module.conv.weight,
723
+ mean=0,
724
+ std=2 * math.sqrt(1 / (module.conv.kernel_size[0] * module.conv.in_channels)),
725
+ )
726
+ nn.init.constant_(module.conv.bias, 0)
727
+ elif isinstance(module, nn.Linear):
728
+ # Slightly different from the TF version which uses truncated_normal for initialization
729
+ # cf https://github.com/pytorch/pytorch/pull/5617
730
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
731
+ elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)):
732
+ module.bias.data.zero_()
733
+ module.weight.data.fill_(1.0)
734
+ elif isinstance(module, nn.Conv1d):
735
+ if is_deepspeed_zero3_enabled():
736
+ import deepspeed
737
+
738
+ if hasattr(module, "weight_v") and hasattr(module, "weight_g"):
739
+ with deepspeed.zero.GatheredParameters([module.weight_v, module.weight_g], modifier_rank=0):
740
+ nn.init.kaiming_normal_(module.weight.data)
741
+ else:
742
+ with deepspeed.zero.GatheredParameters(module.weight, modifier_rank=0):
743
+ nn.init.kaiming_normal_(module.weight.data)
744
+ else:
745
+ nn.init.kaiming_normal_(module.weight.data)
746
+
747
+ if isinstance(module, (nn.Linear, nn.Conv1d)) and module.bias is not None:
748
+ module.bias.data.zero_()
749
+
750
+ def _get_feat_extract_output_lengths(self, input_lengths: Union[torch.LongTensor, int]):
751
+ """
752
+ Computes the output length of the convolutional layers
753
+ """
754
+
755
+ def _conv_out_length(input_length, kernel_size, stride):
756
+ # 1D convolutional layer output length formula taken
757
+ # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
758
+ return torch.div(input_length - kernel_size, stride, rounding_mode="floor") + 1
759
+
760
+ for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride):
761
+ input_lengths = _conv_out_length(input_lengths, kernel_size, stride)
762
+
763
+ return input_lengths
764
+
765
+ def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor):
766
+ output_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long)
767
+ batch_size = attention_mask.shape[0]
768
+
769
+ attention_mask = torch.zeros(
770
+ (batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device
771
+ )
772
+ # these two operations makes sure that all values before the output lengths idxs are attended to
773
+ attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1
774
+ attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
775
+ return attention_mask
776
+
777
+
778
+ SEW_START_DOCSTRING = r"""
779
+ SEW was proposed in [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech
780
+ Recognition](https://arxiv.org/abs/2109.06870) by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger,
781
+ Yoav Artzi.
782
+
783
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
784
+ library implements for all its model (such as downloading or saving etc.).
785
+
786
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
787
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
788
+ behavior.
789
+
790
+ Parameters:
791
+ config ([`SEWConfig`]): Model configuration class with all the parameters of the model.
792
+ Initializing with a config file does not load the weights associated with the model, only the
793
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
794
+ """
795
+
796
+
797
+ SEW_INPUTS_DOCSTRING = r"""
798
+ Args:
799
+ input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
800
+ Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file
801
+ into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (`pip install
802
+ soundfile`). To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and
803
+ conversion into a tensor of type `torch.FloatTensor`. See [`Wav2Vec2Processor.__call__`] for details.
804
+ attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
805
+ Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0,
806
+ 1]`:
807
+
808
+ - 1 for tokens that are **not masked**,
809
+ - 0 for tokens that are **masked**.
810
+
811
+ [What are attention masks?](../glossary#attention-mask)
812
+
813
+ output_attentions (`bool`, *optional*):
814
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
815
+ tensors for more detail.
816
+ output_hidden_states (`bool`, *optional*):
817
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
818
+ more detail.
819
+ return_dict (`bool`, *optional*):
820
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
821
+ """
822
+
823
+
824
+ @add_start_docstrings(
825
+ "The bare SEW Model transformer outputting raw hidden-states without any specific head on top.",
826
+ SEW_START_DOCSTRING,
827
+ )
828
+ class SEWModel(SEWPreTrainedModel):
829
+ def __init__(self, config: SEWConfig):
830
+ super().__init__(config)
831
+ self.config = config
832
+ self.feature_extractor = SEWFeatureEncoder(config)
833
+ self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps)
834
+
835
+ self.project_features = config.conv_dim[-1] != config.hidden_size
836
+ if self.project_features:
837
+ self.feature_projection = nn.Linear(config.conv_dim[-1], config.hidden_size)
838
+ self.feature_dropout = nn.Dropout(config.feat_proj_dropout)
839
+
840
+ if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0:
841
+ self.masked_spec_embed = nn.Parameter(torch.FloatTensor(config.hidden_size).uniform_())
842
+
843
+ self.encoder = SEWEncoder(config)
844
+
845
+ # Initialize weights and apply final processing
846
+ self.post_init()
847
+
848
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model._mask_hidden_states
849
+ def _mask_hidden_states(
850
+ self,
851
+ hidden_states: torch.FloatTensor,
852
+ mask_time_indices: Optional[torch.FloatTensor] = None,
853
+ attention_mask: Optional[torch.LongTensor] = None,
854
+ ):
855
+ """
856
+ Masks extracted features along time axis and/or along feature axis according to
857
+ [SpecAugment](https://arxiv.org/abs/1904.08779).
858
+ """
859
+
860
+ # `config.apply_spec_augment` can set masking to False
861
+ if not getattr(self.config, "apply_spec_augment", True):
862
+ return hidden_states
863
+
864
+ # generate indices & apply SpecAugment along time axis
865
+ batch_size, sequence_length, hidden_size = hidden_states.size()
866
+
867
+ if mask_time_indices is not None:
868
+ # apply SpecAugment along time axis with given mask_time_indices
869
+ hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
870
+ elif self.config.mask_time_prob > 0 and self.training:
871
+ mask_time_indices = _compute_mask_indices(
872
+ (batch_size, sequence_length),
873
+ mask_prob=self.config.mask_time_prob,
874
+ mask_length=self.config.mask_time_length,
875
+ attention_mask=attention_mask,
876
+ min_masks=self.config.mask_time_min_masks,
877
+ )
878
+ mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool)
879
+ hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
880
+
881
+ if self.config.mask_feature_prob > 0 and self.training:
882
+ # generate indices & apply SpecAugment along feature axis
883
+ mask_feature_indices = _compute_mask_indices(
884
+ (batch_size, hidden_size),
885
+ mask_prob=self.config.mask_feature_prob,
886
+ mask_length=self.config.mask_feature_length,
887
+ min_masks=self.config.mask_feature_min_masks,
888
+ )
889
+ mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool)
890
+ mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1)
891
+ hidden_states[mask_feature_indices] = 0
892
+
893
+ return hidden_states
894
+
895
+ @add_start_docstrings_to_model_forward(SEW_INPUTS_DOCSTRING)
896
+ @add_code_sample_docstrings(
897
+ checkpoint=_CHECKPOINT_FOR_DOC,
898
+ output_type=BaseModelOutput,
899
+ config_class=_CONFIG_FOR_DOC,
900
+ modality="audio",
901
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
902
+ )
903
+ def forward(
904
+ self,
905
+ input_values: Optional[torch.Tensor],
906
+ attention_mask: Optional[torch.Tensor] = None,
907
+ mask_time_indices: Optional[torch.FloatTensor] = None,
908
+ output_attentions: Optional[bool] = None,
909
+ output_hidden_states: Optional[bool] = None,
910
+ return_dict: Optional[bool] = None,
911
+ ) -> Union[Tuple, BaseModelOutput]:
912
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
913
+ output_hidden_states = (
914
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
915
+ )
916
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
917
+
918
+ extract_features = self.feature_extractor(input_values)
919
+ extract_features = extract_features.transpose(1, 2)
920
+ extract_features = self.layer_norm(extract_features)
921
+
922
+ if self.project_features:
923
+ extract_features = self.feature_projection(extract_features)
924
+ hidden_states = self.feature_dropout(extract_features)
925
+
926
+ if attention_mask is not None:
927
+ # compute reduced attention_mask corresponding to feature vectors
928
+ attention_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask)
929
+
930
+ hidden_states = self._mask_hidden_states(hidden_states, mask_time_indices=mask_time_indices)
931
+
932
+ encoder_outputs = self.encoder(
933
+ hidden_states,
934
+ attention_mask=attention_mask,
935
+ output_attentions=output_attentions,
936
+ output_hidden_states=output_hidden_states,
937
+ return_dict=return_dict,
938
+ )
939
+
940
+ hidden_states = encoder_outputs[0]
941
+
942
+ if not return_dict:
943
+ return (hidden_states,) + encoder_outputs[1:]
944
+
945
+ return BaseModelOutput(
946
+ last_hidden_state=hidden_states,
947
+ hidden_states=encoder_outputs.hidden_states,
948
+ attentions=encoder_outputs.attentions,
949
+ )
950
+
951
+
952
+ @add_start_docstrings(
953
+ """SEW Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).""",
954
+ SEW_START_DOCSTRING,
955
+ )
956
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForCTC with Wav2Vec2->SEW, wav2vec2->sew, WAV_2_VEC_2->SEW
957
+ class SEWForCTC(SEWPreTrainedModel):
958
+ def __init__(self, config, target_lang: Optional[str] = None):
959
+ super().__init__(config)
960
+
961
+ self.sew = SEWModel(config)
962
+ self.dropout = nn.Dropout(config.final_dropout)
963
+
964
+ self.target_lang = target_lang
965
+
966
+ if config.vocab_size is None:
967
+ raise ValueError(
968
+ f"You are trying to instantiate {self.__class__} with a configuration that "
969
+ "does not define the vocabulary size of the language model head. Please "
970
+ "instantiate the model as follows: `SEWForCTC.from_pretrained(..., vocab_size=vocab_size)`. "
971
+ "or define `vocab_size` of your model's configuration."
972
+ )
973
+ output_hidden_size = (
974
+ config.output_hidden_size if hasattr(config, "add_adapter") and config.add_adapter else config.hidden_size
975
+ )
976
+ self.lm_head = nn.Linear(output_hidden_size, config.vocab_size)
977
+
978
+ # Initialize weights and apply final processing
979
+ self.post_init()
980
+
981
+ def tie_weights(self):
982
+ """
983
+ This method overwrites [`~PreTrainedModel.tie_weights`] so that adapter weights can be correctly loaded when
984
+ passing `target_lang=...` to `from_pretrained(...)`.
985
+
986
+ This method is **not** supposed to be called by the user and is prone to be changed in the future.
987
+ """
988
+
989
+ # Note that `tie_weights` is usually used to tie input and output embedding weights. The method is re-purposed to
990
+ # correctly load adapter layers for SEW so that we do not have to introduce a new API to
991
+ # [`PreTrainedModel`]. While slightly hacky, SEW never has to tie input and output embeddings, so that it is
992
+ # ok to repurpose this function here.
993
+ target_lang = self.target_lang
994
+
995
+ if target_lang is not None and getattr(self.config, "adapter_attn_dim", None) is None:
996
+ raise ValueError(f"Cannot pass `target_lang`: {target_lang} if `config.adapter_attn_dim` is not defined.")
997
+ elif target_lang is None and getattr(self.config, "adapter_attn_dim", None) is not None:
998
+ logger.info("By default `target_lang` is set to 'eng'.")
999
+ elif target_lang is not None:
1000
+ self.load_adapter(target_lang, force_load=True)
1001
+
1002
+ def freeze_feature_extractor(self):
1003
+ """
1004
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
1005
+ not be updated during training.
1006
+ """
1007
+ warnings.warn(
1008
+ "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
1009
+ "Please use the equivalent `freeze_feature_encoder` method instead.",
1010
+ FutureWarning,
1011
+ )
1012
+ self.freeze_feature_encoder()
1013
+
1014
+ def freeze_feature_encoder(self):
1015
+ """
1016
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
1017
+ not be updated during training.
1018
+ """
1019
+ self.sew.feature_extractor._freeze_parameters()
1020
+
1021
+ def freeze_base_model(self):
1022
+ """
1023
+ Calling this function will disable the gradient computation for the base model so that its parameters will not
1024
+ be updated during training. Only the classification head will be updated.
1025
+ """
1026
+ for param in self.sew.parameters():
1027
+ param.requires_grad = False
1028
+
1029
+ @add_start_docstrings_to_model_forward(SEW_INPUTS_DOCSTRING)
1030
+ @add_code_sample_docstrings(
1031
+ checkpoint=_CHECKPOINT_FOR_DOC,
1032
+ output_type=CausalLMOutput,
1033
+ config_class=_CONFIG_FOR_DOC,
1034
+ expected_output=_CTC_EXPECTED_OUTPUT,
1035
+ expected_loss=_CTC_EXPECTED_LOSS,
1036
+ )
1037
+ def forward(
1038
+ self,
1039
+ input_values: Optional[torch.Tensor],
1040
+ attention_mask: Optional[torch.Tensor] = None,
1041
+ output_attentions: Optional[bool] = None,
1042
+ output_hidden_states: Optional[bool] = None,
1043
+ return_dict: Optional[bool] = None,
1044
+ labels: Optional[torch.Tensor] = None,
1045
+ ) -> Union[Tuple, CausalLMOutput]:
1046
+ r"""
1047
+ labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*):
1048
+ Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to
1049
+ the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`.
1050
+ All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,
1051
+ config.vocab_size - 1]`.
1052
+ """
1053
+
1054
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1055
+
1056
+ outputs = self.sew(
1057
+ input_values,
1058
+ attention_mask=attention_mask,
1059
+ output_attentions=output_attentions,
1060
+ output_hidden_states=output_hidden_states,
1061
+ return_dict=return_dict,
1062
+ )
1063
+
1064
+ hidden_states = outputs[0]
1065
+ hidden_states = self.dropout(hidden_states)
1066
+
1067
+ logits = self.lm_head(hidden_states)
1068
+
1069
+ loss = None
1070
+ if labels is not None:
1071
+ if labels.max() >= self.config.vocab_size:
1072
+ raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}")
1073
+
1074
+ # retrieve loss input_lengths from attention_mask
1075
+ attention_mask = (
1076
+ attention_mask if attention_mask is not None else torch.ones_like(input_values, dtype=torch.long)
1077
+ )
1078
+ input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long)
1079
+
1080
+ # assuming that padded tokens are filled with -100
1081
+ # when not being attended to
1082
+ labels_mask = labels >= 0
1083
+ target_lengths = labels_mask.sum(-1)
1084
+ flattened_targets = labels.masked_select(labels_mask)
1085
+
1086
+ # ctc_loss doesn't support fp16
1087
+ log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1)
1088
+
1089
+ with torch.backends.cudnn.flags(enabled=False):
1090
+ loss = nn.functional.ctc_loss(
1091
+ log_probs,
1092
+ flattened_targets,
1093
+ input_lengths,
1094
+ target_lengths,
1095
+ blank=self.config.pad_token_id,
1096
+ reduction=self.config.ctc_loss_reduction,
1097
+ zero_infinity=self.config.ctc_zero_infinity,
1098
+ )
1099
+
1100
+ if not return_dict:
1101
+ output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
1102
+ return ((loss,) + output) if loss is not None else output
1103
+
1104
+ return CausalLMOutput(
1105
+ loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions
1106
+ )
1107
+
1108
+
1109
+ @add_start_docstrings(
1110
+ """
1111
+ SEW Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like SUPERB
1112
+ Keyword Spotting.
1113
+ """,
1114
+ SEW_START_DOCSTRING,
1115
+ )
1116
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification with Wav2Vec2->SEW, wav2vec2->sew, WAV_2_VEC_2->SEW
1117
+ class SEWForSequenceClassification(SEWPreTrainedModel):
1118
+ def __init__(self, config):
1119
+ super().__init__(config)
1120
+
1121
+ if hasattr(config, "add_adapter") and config.add_adapter:
1122
+ raise ValueError(
1123
+ "Sequence classification does not support the use of SEW adapters (config.add_adapter=True)"
1124
+ )
1125
+ self.sew = SEWModel(config)
1126
+ num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings
1127
+ if config.use_weighted_layer_sum:
1128
+ self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
1129
+ self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size)
1130
+ self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels)
1131
+
1132
+ # Initialize weights and apply final processing
1133
+ self.post_init()
1134
+
1135
+ def freeze_feature_extractor(self):
1136
+ """
1137
+ Calling this function will disable the gradient computation for the feature encoder so that its parameters will
1138
+ not be updated during training.
1139
+ """
1140
+ warnings.warn(
1141
+ "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
1142
+ "Please use the equivalent `freeze_feature_encoder` method instead.",
1143
+ FutureWarning,
1144
+ )
1145
+ self.freeze_feature_encoder()
1146
+
1147
+ def freeze_feature_encoder(self):
1148
+ """
1149
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
1150
+ not be updated during training.
1151
+ """
1152
+ self.sew.feature_extractor._freeze_parameters()
1153
+
1154
+ def freeze_base_model(self):
1155
+ """
1156
+ Calling this function will disable the gradient computation for the base model so that its parameters will not
1157
+ be updated during training. Only the classification head will be updated.
1158
+ """
1159
+ for param in self.sew.parameters():
1160
+ param.requires_grad = False
1161
+
1162
+ @add_start_docstrings_to_model_forward(SEW_INPUTS_DOCSTRING)
1163
+ @add_code_sample_docstrings(
1164
+ checkpoint=_SEQ_CLASS_CHECKPOINT,
1165
+ output_type=SequenceClassifierOutput,
1166
+ config_class=_CONFIG_FOR_DOC,
1167
+ modality="audio",
1168
+ expected_output=_SEQ_CLASS_EXPECTED_OUTPUT,
1169
+ expected_loss=_SEQ_CLASS_EXPECTED_LOSS,
1170
+ )
1171
+ def forward(
1172
+ self,
1173
+ input_values: Optional[torch.Tensor],
1174
+ attention_mask: Optional[torch.Tensor] = None,
1175
+ output_attentions: Optional[bool] = None,
1176
+ output_hidden_states: Optional[bool] = None,
1177
+ return_dict: Optional[bool] = None,
1178
+ labels: Optional[torch.Tensor] = None,
1179
+ ) -> Union[Tuple, SequenceClassifierOutput]:
1180
+ r"""
1181
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1182
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1183
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1184
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1185
+ """
1186
+
1187
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1188
+ output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
1189
+
1190
+ outputs = self.sew(
1191
+ input_values,
1192
+ attention_mask=attention_mask,
1193
+ output_attentions=output_attentions,
1194
+ output_hidden_states=output_hidden_states,
1195
+ return_dict=return_dict,
1196
+ )
1197
+
1198
+ if self.config.use_weighted_layer_sum:
1199
+ hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
1200
+ hidden_states = torch.stack(hidden_states, dim=1)
1201
+ norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
1202
+ hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
1203
+ else:
1204
+ hidden_states = outputs[0]
1205
+
1206
+ hidden_states = self.projector(hidden_states)
1207
+ if attention_mask is None:
1208
+ pooled_output = hidden_states.mean(dim=1)
1209
+ else:
1210
+ padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask)
1211
+ hidden_states[~padding_mask] = 0.0
1212
+ pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1)
1213
+
1214
+ logits = self.classifier(pooled_output)
1215
+
1216
+ loss = None
1217
+ if labels is not None:
1218
+ loss_fct = CrossEntropyLoss()
1219
+ loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
1220
+
1221
+ if not return_dict:
1222
+ output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
1223
+ return ((loss,) + output) if loss is not None else output
1224
+
1225
+ return SequenceClassifierOutput(
1226
+ loss=loss,
1227
+ logits=logits,
1228
+ hidden_states=outputs.hidden_states,
1229
+ attentions=outputs.attentions,
1230
+ )
llava_next/lib/python3.10/site-packages/transformers/models/speech_encoder_decoder/configuration_speech_encoder_decoder.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+
18
+ from ...configuration_utils import PretrainedConfig
19
+ from ...utils import logging
20
+ from ..auto.configuration_auto import AutoConfig
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ class SpeechEncoderDecoderConfig(PretrainedConfig):
27
+ r"""
28
+ [`SpeechEncoderDecoderConfig`] is the configuration class to store the configuration of a
29
+ [`SpeechEncoderDecoderModel`]. It is used to instantiate an Encoder Decoder model according to the specified
30
+ arguments, defining the encoder and decoder configs.
31
+
32
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
33
+ documentation from [`PretrainedConfig`] for more information.
34
+
35
+ Args:
36
+ kwargs (*optional*):
37
+ Dictionary of keyword arguments. Notably:
38
+
39
+ - **encoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines
40
+ the encoder config.
41
+ - **decoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines
42
+ the decoder config.
43
+
44
+ Examples:
45
+
46
+ ```python
47
+ >>> from transformers import BertConfig, Wav2Vec2Config, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel
48
+
49
+ >>> # Initializing a Wav2Vec2 & BERT style configuration
50
+ >>> config_encoder = Wav2Vec2Config()
51
+ >>> config_decoder = BertConfig()
52
+
53
+ >>> config = SpeechEncoderDecoderConfig.from_encoder_decoder_configs(config_encoder, config_decoder)
54
+
55
+ >>> # Initializing a Wav2Vec2Bert model from a Wav2Vec2 & bert-base-uncased style configurations
56
+ >>> model = SpeechEncoderDecoderModel(config=config)
57
+
58
+ >>> # Accessing the model configuration
59
+ >>> config_encoder = model.config.encoder
60
+ >>> config_decoder = model.config.decoder
61
+ >>> # set decoder config to causal lm
62
+ >>> config_decoder.is_decoder = True
63
+ >>> config_decoder.add_cross_attention = True
64
+
65
+ >>> # Saving the model, including its configuration
66
+ >>> model.save_pretrained("my-model")
67
+
68
+ >>> # loading model and config from pretrained folder
69
+ >>> encoder_decoder_config = SpeechEncoderDecoderConfig.from_pretrained("my-model")
70
+ >>> model = SpeechEncoderDecoderModel.from_pretrained("my-model", config=encoder_decoder_config)
71
+ ```"""
72
+
73
+ model_type = "speech-encoder-decoder"
74
+ is_composition = True
75
+
76
+ def __init__(self, **kwargs):
77
+ super().__init__(**kwargs)
78
+ if "encoder" not in kwargs or "decoder" not in kwargs:
79
+ raise ValueError(
80
+ f"A configuraton of type {self.model_type} cannot be instantiated because not both `encoder` and"
81
+ f" `decoder` sub-configurations are passed, but only {kwargs}"
82
+ )
83
+
84
+ encoder_config = kwargs.pop("encoder")
85
+ encoder_model_type = encoder_config.pop("model_type")
86
+ decoder_config = kwargs.pop("decoder")
87
+ decoder_model_type = decoder_config.pop("model_type")
88
+
89
+ self.encoder = AutoConfig.for_model(encoder_model_type, **encoder_config)
90
+ self.decoder = AutoConfig.for_model(decoder_model_type, **decoder_config)
91
+ self.is_encoder_decoder = True
92
+
93
+ @classmethod
94
+ def from_encoder_decoder_configs(
95
+ cls, encoder_config: PretrainedConfig, decoder_config: PretrainedConfig, **kwargs
96
+ ) -> PretrainedConfig:
97
+ r"""
98
+ Instantiate a [`SpeechEncoderDecoderConfig`] (or a derived class) from a pre-trained encoder model
99
+ configuration and decoder model configuration.
100
+
101
+ Returns:
102
+ [`SpeechEncoderDecoderConfig`]: An instance of a configuration object
103
+ """
104
+ logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config")
105
+ decoder_config.is_decoder = True
106
+ decoder_config.add_cross_attention = True
107
+
108
+ return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **kwargs)
llava_next/lib/python3.10/site-packages/transformers/models/speech_encoder_decoder/modeling_speech_encoder_decoder.py ADDED
@@ -0,0 +1,604 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Classes to support Speech-Encoder-Text-Decoder architectures"""
16
+
17
+
18
+ from typing import Optional, Tuple, Union
19
+
20
+ import torch
21
+ from torch import nn
22
+ from torch.nn import CrossEntropyLoss
23
+
24
+ from ...configuration_utils import PretrainedConfig
25
+ from ...modeling_outputs import BaseModelOutput, Seq2SeqLMOutput
26
+ from ...modeling_utils import PreTrainedModel
27
+ from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
28
+ from ..auto.configuration_auto import AutoConfig
29
+ from ..auto.modeling_auto import AutoModel, AutoModelForCausalLM
30
+ from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
31
+
32
+
33
+ logger = logging.get_logger(__name__)
34
+
35
+ _CONFIG_FOR_DOC = "SpeechEncoderDecoderConfig"
36
+
37
+ SPEECH_ENCODER_DECODER_START_DOCSTRING = r"""
38
+ This class can be used to initialize a speech-sequence-to-text-sequence model with any pretrained speech
39
+ autoencoding model as the encoder and any pretrained text autoregressive model as the decoder. The encoder is
40
+ loaded via [`~AutoModel.from_pretrained`] function and the decoder is loaded via
41
+ [`~AutoModelForCausalLM.from_pretrained`] function. Cross-attention layers are automatically added to the decoder
42
+ and should be fine-tuned on a downstream generative task, like summarization.
43
+
44
+ The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation
45
+ tasks was shown in [Leveraging Pre-trained Checkpoints for Sequence Generation
46
+ Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi
47
+ Zhou, Wei Li, Peter J. Liu.
48
+
49
+ Additionally, in [Large-Scale Self- and Semi-Supervised Learning for Speech
50
+ Translation](https://arxiv.org/abs/2104.06678) it is shown how leveraging large pretrained speech models for speech
51
+ translation yields a significant performance improvement.
52
+
53
+ After such an Speech-Encoder Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other
54
+ models (see the examples for more information).
55
+
56
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
57
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
58
+ etc.)
59
+
60
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
61
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
62
+ and behavior.
63
+
64
+ Parameters:
65
+ config ([`SpeechEncoderDecoderConfig`]): Model configuration class with all the parameters of the model.
66
+ Initializing with a config file does not load the weights associated with the model, only the
67
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
68
+ """
69
+
70
+ SPEECH_ENCODER_DECODER_INPUTS_DOCSTRING = r"""
71
+ Args:
72
+ inputs (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, feature_dim)`, *optional*):
73
+ Float values of input raw speech waveform or speech features. Values can be obtained by loading a `.flac`
74
+ or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile
75
+ library (`pip install soundfile`). To prepare the array into `inputs`, either the [`Wav2Vec2Processor`] or
76
+ [`Speech2TextProcessor`] should be used for padding and conversion into a tensor of type
77
+ `torch.FloatTensor`.
78
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
79
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
80
+
81
+ - 1 for tokens that are **not masked**,
82
+ - 0 for tokens that are **masked**.
83
+
84
+ [What are attention masks?](../glossary#attention-mask)
85
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
86
+ Indices of decoder input sequence tokens in the vocabulary.
87
+
88
+ Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
89
+ [`PreTrainedTokenizer.__call__`] for details.
90
+
91
+ [What are input IDs?](../glossary#input-ids)
92
+
93
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
94
+ `past_key_values`).
95
+
96
+ For training, `decoder_input_ids` are automatically created by the model by shifting the `labels` to the
97
+ right, replacing -100 by the `pad_token_id` and prepending them with the `decoder_start_token_id`.
98
+ decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
99
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
100
+ be used by default.
101
+ encoder_outputs (`tuple(torch.FloatTensor)`, *optional*):
102
+ This tuple must consist of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
103
+ `last_hidden_state` (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`) is a tensor
104
+ of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the
105
+ decoder.
106
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
107
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
108
+
109
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
110
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
111
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
112
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
113
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
114
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
115
+ model's internal embedding lookup matrix.
116
+ decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
117
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
118
+ representation. This is useful if you want more control over how to convert `decoder_input_ids` indices
119
+ into associated vectors than the model's internal embedding lookup matrix.
120
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
121
+ Labels for computing the masked language modeling loss for the decoder. Indices should be in `[-100, 0,
122
+ ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored
123
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
124
+ use_cache (`bool`, *optional*):
125
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
126
+ `past_key_values`).
127
+ output_attentions (`bool`, *optional*):
128
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
129
+ tensors for more detail.
130
+ output_hidden_states (`bool`, *optional*):
131
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
132
+ more detail.
133
+ input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
134
+ Float values of input raw speech waveform. Values can be obtained by loading a *.flac* or *.wav* audio file
135
+ into an array of type *List[float]* or a *numpy.ndarray*, *e.g.* via the soundfile library (*pip install
136
+ soundfile*). To prepare the array into *input_values*, the [`Wav2Vec2Processor`] should be used for padding
137
+ and conversion into a tensor of type *torch.FloatTensor*. See [`Wav2Vec2Processor.__call__`] for details.
138
+ input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, feature_size)`, *optional*):
139
+ Float values of fbank features extracted from the raw speech waveform. Raw speech waveform can be obtained
140
+ by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.*
141
+ via the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the
142
+ [`Speech2TextFeatureExtractor`] should be used for extracting the fbank features, padding and conversion
143
+ into a tensor of type `torch.FloatTensor`. See [`~Speech2TextFeatureExtractor.__call__`]
144
+ return_dict (`bool`, *optional*):
145
+ If set to `True`, the model will return a [`~utils.Seq2SeqLMOutput`] instead of a plain tuple.
146
+ kwargs (*optional*): Remaining dictionary of keyword arguments. Keyword arguments come in two flavors:
147
+
148
+ - Without a prefix which will be input as `**encoder_kwargs` for the encoder forward function.
149
+ - With a *decoder_* prefix which will be input as `**decoder_kwargs` for the decoder forward function.
150
+ """
151
+
152
+
153
+ # Copied from transformers.models.encoder_decoder.modeling_encoder_decoder.shift_tokens_right
154
+ def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
155
+ """
156
+ Shift input ids one token to the right.
157
+ """
158
+ shifted_input_ids = input_ids.new_zeros(input_ids.shape)
159
+ shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
160
+ if decoder_start_token_id is None:
161
+ raise ValueError("Make sure to set the decoder_start_token_id attribute of the model's configuration.")
162
+ shifted_input_ids[:, 0] = decoder_start_token_id
163
+
164
+ if pad_token_id is None:
165
+ raise ValueError("Make sure to set the pad_token_id attribute of the model's configuration.")
166
+ # replace possible -100 values in labels by `pad_token_id`
167
+ shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
168
+
169
+ return shifted_input_ids
170
+
171
+
172
+ @add_start_docstrings(SPEECH_ENCODER_DECODER_START_DOCSTRING)
173
+ class SpeechEncoderDecoderModel(PreTrainedModel):
174
+ r"""
175
+ [`SpeechEncoderDecoderModel`] is a generic model class that will be instantiated as a transformer architecture with
176
+ one of the base model classes of the library as encoder and another one as decoder when created with the
177
+ :meth*~transformers.AutoModel.from_pretrained* class method for the encoder and
178
+ :meth*~transformers.AutoModelForCausalLM.from_pretrained* class method for the decoder.
179
+ """
180
+
181
+ config_class = SpeechEncoderDecoderConfig
182
+ base_model_prefix = "speech_encoder_decoder"
183
+ main_input_name = "inputs"
184
+ supports_gradient_checkpointing = True
185
+
186
+ def __init__(
187
+ self,
188
+ config: Optional[PretrainedConfig] = None,
189
+ encoder: Optional[PreTrainedModel] = None,
190
+ decoder: Optional[PreTrainedModel] = None,
191
+ ):
192
+ if config is None and (encoder is None or decoder is None):
193
+ raise ValueError("Either a configuration or an encoder and a decoder has to be provided.")
194
+ if config is None:
195
+ config = SpeechEncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config)
196
+ else:
197
+ if not isinstance(config, self.config_class):
198
+ raise ValueError(f"Config: {config} has to be of type {self.config_class}")
199
+
200
+ if config.decoder.cross_attention_hidden_size is not None:
201
+ if config.decoder.cross_attention_hidden_size != config.encoder.hidden_size:
202
+ raise ValueError(
203
+ "If `cross_attention_hidden_size` is specified in the decoder's configuration, it has to be equal"
204
+ f" to the encoder's `hidden_size`. Got {config.decoder.cross_attention_hidden_size} for"
205
+ f" `config.decoder.cross_attention_hidden_size` and {config.encoder.hidden_size} for"
206
+ " `config.encoder.hidden_size`."
207
+ )
208
+
209
+ # initialize with config
210
+ # make sure input & output embeddings is not tied
211
+ config.tie_word_embeddings = False
212
+ super().__init__(config)
213
+
214
+ if encoder is None:
215
+ encoder = AutoModel.from_config(config.encoder)
216
+
217
+ if decoder is None:
218
+ decoder = AutoModelForCausalLM.from_config(config.decoder)
219
+
220
+ self.encoder = encoder
221
+ self.decoder = decoder
222
+
223
+ if self.encoder.config.to_dict() != self.config.encoder.to_dict():
224
+ logger.warning(
225
+ f"Config of the encoder: {self.encoder.__class__} is overwritten by shared encoder config:"
226
+ f" {self.config.encoder}"
227
+ )
228
+ if self.decoder.config.to_dict() != self.config.decoder.to_dict():
229
+ logger.warning(
230
+ f"Config of the decoder: {self.decoder.__class__} is overwritten by shared decoder config:"
231
+ f" {self.config.decoder}"
232
+ )
233
+
234
+ # make sure that the individual model's config refers to the shared config
235
+ # so that the updates to the config will be synced
236
+ self.encoder.config = self.config.encoder
237
+ self.decoder.config = self.config.decoder
238
+
239
+ # get encoder output hidden size
240
+ self.encoder_output_dim = getattr(config.encoder, "output_hidden_size", config.encoder.hidden_size)
241
+ if (
242
+ self.encoder_output_dim != self.decoder.config.hidden_size
243
+ and self.decoder.config.cross_attention_hidden_size is None
244
+ ):
245
+ # encoder outputs might need to be projected to different dimension for decoder
246
+ self.enc_to_dec_proj = nn.Linear(self.encoder.config.hidden_size, self.decoder.config.hidden_size)
247
+
248
+ if self.encoder.get_output_embeddings() is not None:
249
+ raise ValueError(
250
+ f"The encoder {self.encoder} should not have a LM Head. Please use a model without LM Head"
251
+ )
252
+
253
+ def get_encoder(self):
254
+ return self.encoder
255
+
256
+ def get_decoder(self):
257
+ return self.decoder
258
+
259
+ def get_output_embeddings(self):
260
+ return self.decoder.get_output_embeddings()
261
+
262
+ def set_output_embeddings(self, new_embeddings):
263
+ return self.decoder.set_output_embeddings(new_embeddings)
264
+
265
+ def freeze_feature_encoder(self):
266
+ """
267
+ Calling this function will disable the gradient computation for the feature encoder of the speech encoder so
268
+ that its parameters will not be updated during training.
269
+ """
270
+ self.encoder.freeze_feature_encoder()
271
+
272
+ @classmethod
273
+ def from_pretrained(cls, *args, **kwargs):
274
+ # At the moment fast initialization is not supported for composite models
275
+ if kwargs.get("_fast_init", False):
276
+ logger.warning(
277
+ "Fast initialization is currently not supported for SpeechEncoderDecoderModel. "
278
+ "Falling back to slow initialization..."
279
+ )
280
+ kwargs["_fast_init"] = False
281
+ return super().from_pretrained(*args, **kwargs)
282
+
283
+ @classmethod
284
+ def from_encoder_decoder_pretrained(
285
+ cls,
286
+ encoder_pretrained_model_name_or_path: str = None,
287
+ decoder_pretrained_model_name_or_path: str = None,
288
+ *model_args,
289
+ **kwargs,
290
+ ) -> PreTrainedModel:
291
+ r"""
292
+ Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model
293
+ checkpoints.
294
+
295
+
296
+ The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train
297
+ the model, you need to first set it back in training mode with `model.train()`.
298
+
299
+ Params:
300
+ encoder_pretrained_model_name_or_path (`str`, *optional*):
301
+ Information necessary to initiate the encoder. Can be either:
302
+
303
+ - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
304
+ Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a
305
+ user or organization name, like `dbmdz/bert-base-german-cased`.
306
+ - A path to a *directory* containing model weights saved using
307
+ [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
308
+ - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In
309
+ this case, `from_tf` should be set to `True` and a configuration object should be provided as
310
+ `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a
311
+ PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
312
+
313
+ decoder_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`):
314
+ Information necessary to initiate the decoder. Can be either:
315
+
316
+ - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
317
+ Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a
318
+ user or organization name, like `dbmdz/bert-base-german-cased`.
319
+ - A path to a *directory* containing model weights saved using
320
+ [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
321
+ - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In
322
+ this case, `from_tf` should be set to `True` and a configuration object should be provided as
323
+ `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a
324
+ PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
325
+
326
+ model_args (remaining positional arguments, *optional*):
327
+ All remaning positional arguments will be passed to the underlying model's `__init__` method.
328
+
329
+ kwargs (remaining dictionary of keyword arguments, *optional*):
330
+ Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
331
+ `output_attentions=True`).
332
+
333
+ - To update the encoder configuration, use the prefix *encoder_* for each configuration parameter.
334
+ - To update the decoder configuration, use the prefix *decoder_* for each configuration parameter.
335
+ - To update the parent model configuration, do not use a prefix for each configuration parameter.
336
+
337
+ Behaves differently depending on whether a `config` is provided or automatically loaded.
338
+
339
+ Example:
340
+
341
+ ```python
342
+ >>> from transformers import SpeechEncoderDecoderModel
343
+
344
+ >>> # initialize a wav2vec2bert from a pretrained Wav2Vec2 and a pretrained BERT model. Note that the cross-attention layers will be randomly initialized
345
+ >>> model = SpeechEncoderDecoderModel.from_encoder_decoder_pretrained(
346
+ ... "facebook/wav2vec2-base-960h", "bert-base-uncased"
347
+ ... )
348
+ >>> # saving model after fine-tuning
349
+ >>> model.save_pretrained("./wav2vec2bert")
350
+ >>> # load fine-tuned model
351
+ >>> model = SpeechEncoderDecoderModel.from_pretrained("./wav2vec2bert")
352
+ ```"""
353
+
354
+ kwargs_encoder = {
355
+ argument[len("encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("encoder_")
356
+ }
357
+
358
+ kwargs_decoder = {
359
+ argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
360
+ }
361
+
362
+ # remove encoder, decoder kwargs from kwargs
363
+ for key in kwargs_encoder.keys():
364
+ del kwargs["encoder_" + key]
365
+ for key in kwargs_decoder.keys():
366
+ del kwargs["decoder_" + key]
367
+
368
+ # Load and initialize the encoder and decoder
369
+ # The distinction between encoder and decoder at the model level is made
370
+ # by the value of the flag `is_decoder` that we need to set correctly.
371
+ encoder = kwargs_encoder.pop("model", None)
372
+ if encoder is None:
373
+ if encoder_pretrained_model_name_or_path is None:
374
+ raise ValueError(
375
+ "If `encoder_model` is not defined as an argument, a `encoder_pretrained_model_name_or_path` has "
376
+ "to be defined."
377
+ )
378
+
379
+ if "config" not in kwargs_encoder:
380
+ encoder_config, kwargs_encoder = AutoConfig.from_pretrained(
381
+ encoder_pretrained_model_name_or_path, **kwargs_encoder, return_unused_kwargs=True
382
+ )
383
+
384
+ if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True:
385
+ logger.info(
386
+ f"Initializing {encoder_pretrained_model_name_or_path} as a encoder model "
387
+ "from a decoder model. Cross-attention and casual mask are disabled."
388
+ )
389
+ encoder_config.is_decoder = False
390
+ encoder_config.add_cross_attention = False
391
+
392
+ kwargs_encoder["config"] = encoder_config
393
+
394
+ encoder = AutoModel.from_pretrained(encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder)
395
+
396
+ decoder = kwargs_decoder.pop("model", None)
397
+ if decoder is None:
398
+ if decoder_pretrained_model_name_or_path is None:
399
+ raise ValueError(
400
+ "If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has "
401
+ "to be defined."
402
+ )
403
+
404
+ if "config" not in kwargs_decoder:
405
+ decoder_config, kwargs_decoder = AutoConfig.from_pretrained(
406
+ decoder_pretrained_model_name_or_path, **kwargs_decoder, return_unused_kwargs=True
407
+ )
408
+
409
+ if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False:
410
+ logger.info(
411
+ f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention"
412
+ f" layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if"
413
+ f" {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers."
414
+ )
415
+ decoder_config.is_decoder = True
416
+ decoder_config.add_cross_attention = True
417
+
418
+ kwargs_decoder["config"] = decoder_config
419
+
420
+ if kwargs_decoder["config"].is_decoder is False or kwargs_decoder["config"].add_cross_attention is False:
421
+ logger.warning(
422
+ f"Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. "
423
+ f"In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, "
424
+ "make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` "
425
+ "passed to `.from_encoder_decoder_pretrained(...)` are set to `True` or do not pass a "
426
+ "`decoder_config` to `.from_encoder_decoder_pretrained(...)`"
427
+ )
428
+
429
+ decoder = AutoModelForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder)
430
+
431
+ # instantiate config with corresponding kwargs
432
+ config = SpeechEncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config, **kwargs)
433
+
434
+ # make sure input & output embeddings is not tied
435
+ config.tie_word_embeddings = False
436
+ return cls(encoder=encoder, decoder=decoder, config=config)
437
+
438
+ @add_start_docstrings_to_model_forward(SPEECH_ENCODER_DECODER_INPUTS_DOCSTRING)
439
+ @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
440
+ def forward(
441
+ self,
442
+ inputs: Optional[torch.FloatTensor] = None,
443
+ attention_mask: Optional[torch.FloatTensor] = None,
444
+ decoder_input_ids: Optional[torch.LongTensor] = None,
445
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
446
+ encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None,
447
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
448
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
449
+ labels: Optional[torch.LongTensor] = None,
450
+ use_cache: Optional[bool] = None,
451
+ output_attentions: Optional[bool] = None,
452
+ output_hidden_states: Optional[bool] = None,
453
+ input_values: Optional[torch.FloatTensor] = None,
454
+ input_features: Optional[torch.FloatTensor] = None,
455
+ return_dict: Optional[bool] = None,
456
+ **kwargs,
457
+ ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]:
458
+ r"""
459
+ Returns:
460
+
461
+ Examples:
462
+
463
+ ```python
464
+ >>> from transformers import SpeechEncoderDecoderModel, AutoProcessor
465
+ >>> from datasets import load_dataset
466
+ >>> import torch
467
+
468
+ >>> processor = AutoProcessor.from_pretrained("facebook/wav2vec2-xls-r-300m-en-to-15")
469
+ >>> model = SpeechEncoderDecoderModel.from_pretrained("facebook/wav2vec2-xls-r-300m-en-to-15")
470
+
471
+ >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
472
+
473
+ >>> input_values = processor(ds[0]["audio"]["array"], return_tensors="pt").input_values
474
+ >>> # Inference: Translate English speech to German
475
+ >>> generated = model.generate(input_values)
476
+ >>> decoded = processor.batch_decode(generated, skip_special_tokens=True)[0]
477
+ >>> decoded
478
+ 'Mr. Quilter ist der Apostel der Mittelschicht und wir freuen uns, sein Evangelium willkommen heißen zu können.'
479
+
480
+ >>> # Training: Train model on English transcription
481
+ >>> labels = processor(text=ds[0]["text"], return_tensors="pt").input_ids
482
+
483
+ >>> loss = model(input_values, labels=labels).loss
484
+ >>> loss.backward()
485
+ ```"""
486
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
487
+
488
+ kwargs_encoder = {argument: value for argument, value in kwargs.items() if not argument.startswith("decoder_")}
489
+
490
+ kwargs_decoder = {
491
+ argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
492
+ }
493
+
494
+ if encoder_outputs is None:
495
+ if inputs is None:
496
+ if input_values is not None and input_features is not None:
497
+ raise ValueError("You cannot specify both input_values and input_features at the same time")
498
+ elif input_values is not None:
499
+ inputs = input_values
500
+ elif input_features is not None:
501
+ inputs = input_features
502
+ else:
503
+ raise ValueError("You have to specify either input_values or input_features")
504
+
505
+ encoder_outputs = self.encoder(
506
+ inputs,
507
+ attention_mask=attention_mask,
508
+ output_attentions=output_attentions,
509
+ output_hidden_states=output_hidden_states,
510
+ return_dict=return_dict,
511
+ **kwargs_encoder,
512
+ )
513
+ elif isinstance(encoder_outputs, tuple):
514
+ encoder_outputs = BaseModelOutput(*encoder_outputs)
515
+
516
+ encoder_hidden_states = encoder_outputs[0]
517
+
518
+ # optionally project encoder_hidden_states
519
+ if (
520
+ self.encoder_output_dim != self.decoder.config.hidden_size
521
+ and self.decoder.config.cross_attention_hidden_size is None
522
+ ):
523
+ encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states)
524
+
525
+ # compute correct encoder attention mask
526
+ if attention_mask is not None:
527
+ encoder_attention_mask = self.encoder._get_feature_vector_attention_mask(
528
+ encoder_hidden_states.shape[1], attention_mask
529
+ )
530
+ else:
531
+ encoder_attention_mask = None
532
+
533
+ if (labels is not None) and (decoder_input_ids is None and decoder_inputs_embeds is None):
534
+ decoder_input_ids = shift_tokens_right(
535
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
536
+ )
537
+
538
+ # Decode
539
+ decoder_outputs = self.decoder(
540
+ input_ids=decoder_input_ids,
541
+ attention_mask=decoder_attention_mask,
542
+ encoder_hidden_states=encoder_hidden_states,
543
+ encoder_attention_mask=encoder_attention_mask,
544
+ inputs_embeds=decoder_inputs_embeds,
545
+ output_attentions=output_attentions,
546
+ output_hidden_states=output_hidden_states,
547
+ use_cache=use_cache,
548
+ past_key_values=past_key_values,
549
+ return_dict=return_dict,
550
+ **kwargs_decoder,
551
+ )
552
+
553
+ # Compute loss independent from decoder (as some shift the logits inside them)
554
+ loss = None
555
+ if labels is not None:
556
+ logits = decoder_outputs.logits if return_dict else decoder_outputs[0]
557
+ loss_fct = CrossEntropyLoss()
558
+ loss = loss_fct(logits.reshape(-1, self.decoder.config.vocab_size), labels.reshape(-1))
559
+
560
+ if not return_dict:
561
+ if loss is not None:
562
+ return (loss,) + decoder_outputs + encoder_outputs
563
+ else:
564
+ return decoder_outputs + encoder_outputs
565
+
566
+ return Seq2SeqLMOutput(
567
+ loss=loss,
568
+ logits=decoder_outputs.logits,
569
+ past_key_values=decoder_outputs.past_key_values,
570
+ decoder_hidden_states=decoder_outputs.hidden_states,
571
+ decoder_attentions=decoder_outputs.attentions,
572
+ cross_attentions=decoder_outputs.cross_attentions,
573
+ encoder_last_hidden_state=encoder_hidden_states,
574
+ encoder_hidden_states=encoder_outputs.hidden_states,
575
+ encoder_attentions=encoder_outputs.attentions,
576
+ )
577
+
578
+ def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
579
+ return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
580
+
581
+ def prepare_inputs_for_generation(
582
+ self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, encoder_outputs=None, **kwargs
583
+ ):
584
+ decoder_inputs = self.decoder.prepare_inputs_for_generation(input_ids, past_key_values=past_key_values)
585
+ decoder_attention_mask = decoder_inputs["attention_mask"] if "attention_mask" in decoder_inputs else None
586
+ input_dict = {
587
+ "attention_mask": attention_mask,
588
+ "decoder_attention_mask": decoder_attention_mask,
589
+ "decoder_input_ids": decoder_inputs["input_ids"],
590
+ "encoder_outputs": encoder_outputs,
591
+ "past_key_values": decoder_inputs["past_key_values"],
592
+ "use_cache": use_cache,
593
+ }
594
+ return input_dict
595
+
596
+ def resize_token_embeddings(self, *args, **kwargs):
597
+ raise NotImplementedError(
598
+ "Resizing the embedding layers via the SpeechEncoderDecoderModel directly is not supported. Please use the"
599
+ " respective methods of the wrapped decoder object (model.decoder.resize_token_embeddings(...))"
600
+ )
601
+
602
+ def _reorder_cache(self, past_key_values, beam_idx):
603
+ # apply decoder cache reordering here
604
+ return self.decoder._reorder_cache(past_key_values, beam_idx)
llava_next/lib/python3.10/site-packages/transformers/models/videomae/__init__.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_videomae": ["VIDEOMAE_PRETRAINED_CONFIG_ARCHIVE_MAP", "VideoMAEConfig"],
21
+ }
22
+
23
+ try:
24
+ if not is_torch_available():
25
+ raise OptionalDependencyNotAvailable()
26
+ except OptionalDependencyNotAvailable:
27
+ pass
28
+ else:
29
+ _import_structure["modeling_videomae"] = [
30
+ "VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST",
31
+ "VideoMAEForPreTraining",
32
+ "VideoMAEModel",
33
+ "VideoMAEPreTrainedModel",
34
+ "VideoMAEForVideoClassification",
35
+ ]
36
+
37
+ try:
38
+ if not is_vision_available():
39
+ raise OptionalDependencyNotAvailable()
40
+ except OptionalDependencyNotAvailable:
41
+ pass
42
+ else:
43
+ _import_structure["feature_extraction_videomae"] = ["VideoMAEFeatureExtractor"]
44
+ _import_structure["image_processing_videomae"] = ["VideoMAEImageProcessor"]
45
+
46
+ if TYPE_CHECKING:
47
+ from .configuration_videomae import VIDEOMAE_PRETRAINED_CONFIG_ARCHIVE_MAP, VideoMAEConfig
48
+
49
+ try:
50
+ if not is_torch_available():
51
+ raise OptionalDependencyNotAvailable()
52
+ except OptionalDependencyNotAvailable:
53
+ pass
54
+ else:
55
+ from .modeling_videomae import (
56
+ VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST,
57
+ VideoMAEForPreTraining,
58
+ VideoMAEForVideoClassification,
59
+ VideoMAEModel,
60
+ VideoMAEPreTrainedModel,
61
+ )
62
+
63
+ try:
64
+ if not is_vision_available():
65
+ raise OptionalDependencyNotAvailable()
66
+ except OptionalDependencyNotAvailable:
67
+ pass
68
+ else:
69
+ from .feature_extraction_videomae import VideoMAEFeatureExtractor
70
+ from .image_processing_videomae import VideoMAEImageProcessor
71
+
72
+ else:
73
+ import sys
74
+
75
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llava_next/lib/python3.10/site-packages/transformers/models/videomae/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.31 kB). View file
 
llava_next/lib/python3.10/site-packages/transformers/models/videomae/__pycache__/configuration_videomae.cpython-310.pyc ADDED
Binary file (5.79 kB). View file
 
llava_next/lib/python3.10/site-packages/transformers/models/videomae/__pycache__/convert_videomae_to_pytorch.cpython-310.pyc ADDED
Binary file (9.02 kB). View file
 
llava_next/lib/python3.10/site-packages/transformers/models/videomae/__pycache__/feature_extraction_videomae.cpython-310.pyc ADDED
Binary file (1.02 kB). View file
 
llava_next/lib/python3.10/site-packages/transformers/models/videomae/__pycache__/image_processing_videomae.cpython-310.pyc ADDED
Binary file (13.3 kB). View file
 
llava_next/lib/python3.10/site-packages/transformers/models/videomae/__pycache__/modeling_videomae.cpython-310.pyc ADDED
Binary file (35.4 kB). View file
 
llava_next/lib/python3.10/site-packages/transformers/models/videomae/configuration_videomae.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ VideoMAE model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+ VIDEOMAE_PRETRAINED_CONFIG_ARCHIVE_MAP = {
24
+ "MCG-NJU/videomae-base": "https://huggingface.co/MCG-NJU/videomae-base/resolve/main/config.json",
25
+ }
26
+
27
+
28
+ class VideoMAEConfig(PretrainedConfig):
29
+ r"""
30
+ This is the configuration class to store the configuration of a [`VideoMAEModel`]. It is used to instantiate a
31
+ VideoMAE model according to the specified arguments, defining the model architecture. Instantiating a configuration
32
+ with the defaults will yield a similar configuration to that of the VideoMAE
33
+ [MCG-NJU/videomae-base](https://huggingface.co/MCG-NJU/videomae-base) architecture.
34
+
35
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
36
+ documentation from [`PretrainedConfig`] for more information.
37
+
38
+ Args:
39
+ image_size (`int`, *optional*, defaults to 224):
40
+ The size (resolution) of each image.
41
+ patch_size (`int`, *optional*, defaults to 16):
42
+ The size (resolution) of each patch.
43
+ num_channels (`int`, *optional*, defaults to 3):
44
+ The number of input channels.
45
+ num_frames (`int`, *optional*, defaults to 16):
46
+ The number of frames in each video.
47
+ tubelet_size (`int`, *optional*, defaults to 2):
48
+ The number of tubelets.
49
+ hidden_size (`int`, *optional*, defaults to 768):
50
+ Dimensionality of the encoder layers and the pooler layer.
51
+ num_hidden_layers (`int`, *optional*, defaults to 12):
52
+ Number of hidden layers in the Transformer encoder.
53
+ num_attention_heads (`int`, *optional*, defaults to 12):
54
+ Number of attention heads for each attention layer in the Transformer encoder.
55
+ intermediate_size (`int`, *optional*, defaults to 3072):
56
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
57
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
58
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
59
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
60
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
61
+ The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.
62
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
63
+ The dropout ratio for the attention probabilities.
64
+ initializer_range (`float`, *optional*, defaults to 0.02):
65
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
66
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
67
+ The epsilon used by the layer normalization layers.
68
+ qkv_bias (`bool`, *optional*, defaults to `True`):
69
+ Whether to add a bias to the queries, keys and values.
70
+ use_mean_pooling (`bool`, *optional*, defaults to `True`):
71
+ Whether to mean pool the final hidden states instead of using the final hidden state of the [CLS] token.
72
+ decoder_num_attention_heads (`int`, *optional*, defaults to 6):
73
+ Number of attention heads for each attention layer in the decoder.
74
+ decoder_hidden_size (`int`, *optional*, defaults to 384):
75
+ Dimensionality of the decoder.
76
+ decoder_num_hidden_layers (`int`, *optional*, defaults to 4):
77
+ Number of hidden layers in the decoder.
78
+ decoder_intermediate_size (`int`, *optional*, defaults to 1536):
79
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the decoder.
80
+ norm_pix_loss (`bool`, *optional*, defaults to `True`):
81
+ Whether to normalize the target patch pixels.
82
+
83
+ Example:
84
+
85
+ ```python
86
+ >>> from transformers import VideoMAEConfig, VideoMAEModel
87
+
88
+ >>> # Initializing a VideoMAE videomae-base style configuration
89
+ >>> configuration = VideoMAEConfig()
90
+
91
+ >>> # Randomly initializing a model from the configuration
92
+ >>> model = VideoMAEModel(configuration)
93
+
94
+ >>> # Accessing the model configuration
95
+ >>> configuration = model.config
96
+ ```"""
97
+
98
+ model_type = "videomae"
99
+
100
+ def __init__(
101
+ self,
102
+ image_size=224,
103
+ patch_size=16,
104
+ num_channels=3,
105
+ num_frames=16,
106
+ tubelet_size=2,
107
+ hidden_size=768,
108
+ num_hidden_layers=12,
109
+ num_attention_heads=12,
110
+ intermediate_size=3072,
111
+ hidden_act="gelu",
112
+ hidden_dropout_prob=0.0,
113
+ attention_probs_dropout_prob=0.0,
114
+ initializer_range=0.02,
115
+ layer_norm_eps=1e-12,
116
+ qkv_bias=True,
117
+ use_mean_pooling=True,
118
+ decoder_num_attention_heads=6,
119
+ decoder_hidden_size=384,
120
+ decoder_num_hidden_layers=4,
121
+ decoder_intermediate_size=1536,
122
+ norm_pix_loss=True,
123
+ **kwargs,
124
+ ):
125
+ super().__init__(**kwargs)
126
+
127
+ self.image_size = image_size
128
+ self.patch_size = patch_size
129
+ self.num_channels = num_channels
130
+ self.num_frames = num_frames
131
+ self.tubelet_size = tubelet_size
132
+
133
+ self.hidden_size = hidden_size
134
+ self.num_hidden_layers = num_hidden_layers
135
+ self.num_attention_heads = num_attention_heads
136
+ self.intermediate_size = intermediate_size
137
+ self.hidden_act = hidden_act
138
+ self.hidden_dropout_prob = hidden_dropout_prob
139
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
140
+ self.initializer_range = initializer_range
141
+ self.layer_norm_eps = layer_norm_eps
142
+ self.qkv_bias = qkv_bias
143
+ self.use_mean_pooling = use_mean_pooling
144
+
145
+ self.decoder_num_attention_heads = decoder_num_attention_heads
146
+ self.decoder_hidden_size = decoder_hidden_size
147
+ self.decoder_num_hidden_layers = decoder_num_hidden_layers
148
+ self.decoder_intermediate_size = decoder_intermediate_size
149
+ self.norm_pix_loss = norm_pix_loss