diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/clap/__init__.py b/llava_next/lib/python3.10/site-packages/transformers/models/clap/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..57e39b6e1fa66085b4571324ee61e35468204b7e
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/transformers/models/clap/__init__.py
@@ -0,0 +1,76 @@
+# Copyright 2023 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
+
+
+_import_structure = {
+ "configuration_clap": [
+ "CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "ClapAudioConfig",
+ "ClapConfig",
+ "ClapTextConfig",
+ ],
+ "processing_clap": ["ClapProcessor"],
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_clap"] = [
+ "CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "ClapModel",
+ "ClapPreTrainedModel",
+ "ClapTextModel",
+ "ClapTextModelWithProjection",
+ "ClapAudioModel",
+ "ClapAudioModelWithProjection",
+ ]
+ _import_structure["feature_extraction_clap"] = ["ClapFeatureExtractor"]
+
+if TYPE_CHECKING:
+ from .configuration_clap import (
+ CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
+ ClapAudioConfig,
+ ClapConfig,
+ ClapTextConfig,
+ )
+ from .processing_clap import ClapProcessor
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .feature_extraction_clap import ClapFeatureExtractor
+ from .modeling_clap import (
+ CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
+ ClapAudioModel,
+ ClapAudioModelWithProjection,
+ ClapModel,
+ ClapPreTrainedModel,
+ ClapTextModel,
+ ClapTextModelWithProjection,
+ )
+
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/clap/__pycache__/__init__.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/clap/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0acac090819d80b3127179c20f73224104a9f400
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/clap/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/clap/__pycache__/configuration_clap.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/clap/__pycache__/configuration_clap.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ef30a19c18739e30a597a1b42e68645904372387
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/clap/__pycache__/configuration_clap.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/clap/__pycache__/convert_clap_original_pytorch_to_hf.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/clap/__pycache__/convert_clap_original_pytorch_to_hf.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..95c04f890927e9924c6864354bd1433089453c9b
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/clap/__pycache__/convert_clap_original_pytorch_to_hf.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/clap/__pycache__/feature_extraction_clap.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/clap/__pycache__/feature_extraction_clap.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..299cdaa7d0012b3fb5f3ab4c9fe1797f7edaee6a
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/clap/__pycache__/feature_extraction_clap.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/clap/__pycache__/modeling_clap.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/clap/__pycache__/modeling_clap.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..54767fd82c754327e0757201ef51adb2c3e38b4f
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/clap/__pycache__/modeling_clap.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/clap/__pycache__/processing_clap.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/clap/__pycache__/processing_clap.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a4fa1a1d632aaa9fdb7773711bd2787f954c7206
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/clap/__pycache__/processing_clap.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/clap/feature_extraction_clap.py b/llava_next/lib/python3.10/site-packages/transformers/models/clap/feature_extraction_clap.py
new file mode 100644
index 0000000000000000000000000000000000000000..ce18fedd19b109ee9af3b6c7de964e6a217abeef
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/transformers/models/clap/feature_extraction_clap.py
@@ -0,0 +1,363 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Feature extractor class for CLAP."""
+
+
+import copy
+from typing import Any, Dict, List, Optional, Union
+
+import numpy as np
+import torch
+
+from ...audio_utils import mel_filter_bank, spectrogram, window_function
+from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
+from ...feature_extraction_utils import BatchFeature
+from ...utils import TensorType, logging
+
+
+logger = logging.get_logger(__name__)
+
+
+class ClapFeatureExtractor(SequenceFeatureExtractor):
+ r"""
+ Constructs a CLAP feature extractor.
+
+ This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains
+ most of the main methods. Users should refer to this superclass for more information regarding those methods.
+
+ This class extracts mel-filter bank features from raw speech using a custom numpy implementation of the *Short Time
+ Fourier Transform* (STFT) which should match pytorch's `torch.stft` equivalent.
+
+ Args:
+ feature_size (`int`, *optional*, defaults to 64):
+ The feature dimension of the extracted Mel spectrograms. This corresponds to the number of mel filters
+ (`n_mels`).
+ sampling_rate (`int`, *optional*, defaults to 48000):
+ The sampling rate at which the audio files should be digitalized expressed in hertz (Hz). This only serves
+ to warn users if the audio fed to the feature extractor does not have the same sampling rate.
+ hop_length (`int`,*optional*, defaults to 480):
+ Length of the overlaping windows for the STFT used to obtain the Mel Spectrogram. The audio will be split
+ in smaller `frames` with a step of `hop_length` between each frame.
+ max_length_s (`int`, *optional*, defaults to 10):
+ The maximum input length of the model in seconds. This is used to pad the audio.
+ fft_window_size (`int`, *optional*, defaults to 1024):
+ Size of the window (in samples) on which the Fourier transform is applied. This controls the frequency
+ resolution of the spectrogram. 400 means that the fourrier transform is computed on windows of 400 samples.
+ padding_value (`float`, *optional*, defaults to 0.0):
+ Padding value used to pad the audio. Should correspond to silences.
+ return_attention_mask (`bool`, *optional*, defaults to `False`):
+ Whether or not the model should return the attention masks coresponding to the input.
+ frequency_min (`float`, *optional*, defaults to 0):
+ The lowest frequency of interest. The STFT will not be computed for values below this.
+ frequency_max (`float`, *optional*, defaults to 14000):
+ The highest frequency of interest. The STFT will not be computed for values above this.
+ top_db (`float`, *optional*):
+ The highest decibel value used to convert the mel spectrogram to the log scale. For more details see the
+ `audio_utils.power_to_db` function
+ truncation (`str`, *optional*, defaults to `"fusion"`):
+ Truncation pattern for long audio inputs. Two patterns are available:
+ - `fusion` will use `_random_mel_fusion`, which stacks 3 random crops from the mel spectrogram and a
+ downsampled version of the entire mel spectrogram.
+ If `config.fusion` is set to True, shorter audios also need to to return 4 mels, which will just be a copy
+ of the original mel obtained from the padded audio.
+ - `rand_trunc` will select a random crop of the mel spectrogram.
+ padding (`str`, *optional*, defaults to `"repeatpad"`):
+ Padding pattern for shorter audio inputs. Three patterns were originally implemented:
+ - `repeatpad`: the audio is repeated, and then padded to fit the `max_length`.
+ - `repeat`: the audio is repeated and then cut to fit the `max_length`
+ - `pad`: the audio is padded.
+ """
+
+ model_input_names = ["input_features", "is_longer"]
+
+ def __init__(
+ self,
+ feature_size=64,
+ sampling_rate=48_000,
+ hop_length=480,
+ max_length_s=10,
+ fft_window_size=1024,
+ padding_value=0.0,
+ return_attention_mask=False, # pad inputs to max length with silence token (zero) and no attention mask
+ frequency_min: float = 0,
+ frequency_max: float = 14_000,
+ top_db: int = None,
+ truncation: str = "fusion",
+ padding: str = "repeatpad",
+ **kwargs,
+ ):
+ super().__init__(
+ feature_size=feature_size,
+ sampling_rate=sampling_rate,
+ padding_value=padding_value,
+ return_attention_mask=return_attention_mask,
+ **kwargs,
+ )
+ self.top_db = top_db
+ self.truncation = truncation
+ self.padding = padding
+ self.fft_window_size = fft_window_size
+ self.nb_frequency_bins = (fft_window_size >> 1) + 1
+ self.hop_length = hop_length
+ self.max_length_s = max_length_s
+ self.nb_max_samples = max_length_s * sampling_rate
+ self.sampling_rate = sampling_rate
+ self.frequency_min = frequency_min
+ self.frequency_max = frequency_max
+ self.mel_filters = mel_filter_bank(
+ num_frequency_bins=self.nb_frequency_bins,
+ num_mel_filters=feature_size,
+ min_frequency=frequency_min,
+ max_frequency=frequency_max,
+ sampling_rate=sampling_rate,
+ norm=None,
+ mel_scale="htk",
+ )
+ self.mel_filters_slaney = mel_filter_bank(
+ num_frequency_bins=self.nb_frequency_bins,
+ num_mel_filters=feature_size,
+ min_frequency=frequency_min,
+ max_frequency=frequency_max,
+ sampling_rate=sampling_rate,
+ norm="slaney",
+ mel_scale="slaney",
+ )
+
+ def to_dict(self) -> Dict[str, Any]:
+ """
+ Serializes this instance to a Python dictionary.
+
+ Returns:
+ `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance, excpet for the
+ mel filter banks, which do not need to be saved or printed as they are too long.
+ """
+ output = copy.deepcopy(self.__dict__)
+ output["feature_extractor_type"] = self.__class__.__name__
+ if "mel_filters" in output:
+ del output["mel_filters"]
+ if "mel_filters_slaney" in output:
+ del output["mel_filters_slaney"]
+ return output
+
+ def _np_extract_fbank_features(self, waveform: np.array, mel_filters: Optional[np.array] = None) -> np.ndarray:
+ """
+ Compute the log-mel spectrogram of the provided `waveform` using the Hann window. In CLAP, two different filter
+ banks are used depending on the truncation pattern:
+ - `self.mel_filters`: they correspond to the default parameters of `torchaudio` which can be obtained from
+ calling `torchaudio.transforms.MelSpectrogram().mel_scale.fb`. These filters are used when `truncation`
+ is set to `"fusion"`.
+ - `self.mel_filteres_slaney` : they correspond to the default parameters of `librosa` which used
+ `librosa.filters.mel` when computing the mel spectrogram. These filters were only used in the original
+ implementation when the truncation mode is not `"fusion"`.
+ """
+ log_mel_spectrogram = spectrogram(
+ waveform,
+ window_function(self.fft_window_size, "hann"),
+ frame_length=self.fft_window_size,
+ hop_length=self.hop_length,
+ power=2.0,
+ mel_filters=mel_filters,
+ log_mel="dB",
+ )
+ return log_mel_spectrogram.T
+
+ def _random_mel_fusion(self, mel, total_frames, chunk_frames):
+ ranges = np.array_split(list(range(0, total_frames - chunk_frames + 1)), 3)
+ if len(ranges[1]) == 0:
+ # if the audio is too short, we just use the first chunk
+ ranges[1] = [0]
+ if len(ranges[2]) == 0:
+ # if the audio is too short, we just use the first chunk
+ ranges[2] = [0]
+ # randomly choose index for each part
+ idx_front = np.random.choice(ranges[0])
+ idx_middle = np.random.choice(ranges[1])
+ idx_back = np.random.choice(ranges[2])
+
+ mel_chunk_front = mel[idx_front : idx_front + chunk_frames, :]
+ mel_chunk_middle = mel[idx_middle : idx_middle + chunk_frames, :]
+ mel_chunk_back = mel[idx_back : idx_back + chunk_frames, :]
+
+ mel = torch.tensor(mel[None, None, :])
+ mel_shrink = torch.nn.functional.interpolate(
+ mel, size=[chunk_frames, 64], mode="bilinear", align_corners=False
+ )
+ mel_shrink = mel_shrink[0][0].numpy()
+ mel_fusion = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back], axis=0)
+ return mel_fusion
+
+ def _get_input_mel(self, waveform: np.array, max_length, truncation, padding) -> np.array:
+ """
+ Extracts the mel spectrogram and prepares it for the mode based on the `truncation` and `padding` arguments.
+ Four different path are possible:
+ - `truncation="fusion"` and the length of the waveform is greater than the max length: the mel spectrogram
+ will be computed on the entire audio. 3 random crops and a dowsampled version of the full mel spectrogram
+ are then stacked together. They will later be used for `feature_fusion`.
+ - `truncation="rand_trunc"` and the length of the waveform is smaller than the max length: the audio is
+ padded based on `padding`.
+ - `truncation="fusion"` and the length of the waveform is smaller than the max length: the audio is padded
+ based on `padding`, and is repeated `4` times.
+ - `truncation="rand_trunc"` and the length of the waveform is greater than the max length: the mel
+ spectrogram will be computed on a random crop of the waveform.
+
+ """
+ if waveform.shape[0] > max_length:
+ if truncation == "rand_trunc":
+ longer = True
+ # random crop to max_length (for compatibility) -> this should be handled by self.pad
+ overflow = len(waveform) - max_length
+ idx = np.random.randint(0, overflow + 1)
+ waveform = waveform[idx : idx + max_length]
+ input_mel = self._np_extract_fbank_features(waveform, self.mel_filters_slaney)[None, :]
+ elif truncation == "fusion":
+ mel = self._np_extract_fbank_features(waveform, self.mel_filters)
+ chunk_frames = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
+ total_frames = mel.shape[0]
+ if chunk_frames == total_frames:
+ # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
+ # In this case, we just use the whole audio.
+ input_mel = np.stack([mel, mel, mel, mel], axis=0)
+ longer = False
+ else:
+ input_mel = self._random_mel_fusion(mel, total_frames, chunk_frames)
+ longer = True
+ else:
+ raise NotImplementedError(f"data_truncating {truncation} not implemented")
+
+ else:
+ longer = False
+ # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
+ if waveform.shape[0] < max_length:
+ if padding == "repeat":
+ n_repeat = int(max_length / len(waveform))
+ waveform = np.tile(waveform, n_repeat + 1)[:max_length]
+ if padding == "repeatpad":
+ n_repeat = int(max_length / len(waveform))
+ waveform = np.tile(waveform, n_repeat)
+ waveform = np.pad(waveform, (0, max_length - waveform.shape[0]), mode="constant", constant_values=0)
+
+ if truncation == "fusion":
+ input_mel = self._np_extract_fbank_features(waveform, self.mel_filters)
+ input_mel = np.stack([input_mel, input_mel, input_mel, input_mel], axis=0)
+ else:
+ input_mel = self._np_extract_fbank_features(waveform, self.mel_filters_slaney)[None, :]
+
+ return input_mel, longer
+
+ def __call__(
+ self,
+ raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],
+ truncation: str = None,
+ padding: Optional[str] = None,
+ max_length: Optional[int] = None,
+ sampling_rate: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ **kwargs,
+ ) -> BatchFeature:
+ """
+ Main method to featurize and prepare for the model one or several sequence(s).
+
+ Args:
+ raw_speech (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`):
+ The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float
+ values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not
+ stereo, i.e. single float per timestep.
+ truncation (`str`, *optional*):
+ Truncation pattern for long audio inputs. Two patterns are available:
+ - `fusion` will use `_random_mel_fusion`, which stacks 3 random crops from the mel spectrogram and
+ a downsampled version of the entire mel spectrogram.
+ If `config.fusion` is set to True, shorter audios also need to to return 4 mels, which will just be a
+ copy of the original mel obtained from the padded audio.
+ - `rand_trunc` will select a random crop of the mel spectrogram.
+ padding (`str`, *optional*):
+ Padding pattern for shorter audio inputs. Three patterns were originally implemented:
+ - `repeatpad`: the audio is repeated, and then padded to fit the `max_length`.
+ - `repeat`: the audio is repeated and then cut to fit the `max_length`
+ - `pad`: the audio is padded.
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
+ If set, will return tensors instead of list of python integers. Acceptable values are:
+
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
+ - `'pt'`: Return PyTorch `torch.np.array` objects.
+ - `'np'`: Return Numpy `np.ndarray` objects.
+ sampling_rate (`int`, *optional*):
+ The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass
+ `sampling_rate` at the forward call to prevent silent errors and allow automatic speech recognition
+ pipeline.
+ """
+ truncation = truncation if truncation is not None else self.truncation
+ padding = padding if padding else self.padding
+
+ if sampling_rate is not None:
+ if sampling_rate != self.sampling_rate:
+ raise ValueError(
+ f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
+ f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
+ f" was sampled with {self.sampling_rate} and not {sampling_rate}."
+ )
+ else:
+ logger.warning(
+ "It is strongly recommended to pass the `sampling_rate` argument to this function. "
+ "Failing to do so can result in silent errors that might be hard to debug."
+ )
+
+ is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1
+ if is_batched_numpy and len(raw_speech.shape) > 2:
+ raise ValueError(f"Only mono-channel audio is supported for input to {self}")
+ is_batched = is_batched_numpy or (
+ isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list)))
+ )
+
+ if is_batched:
+ raw_speech = [np.asarray(speech, dtype=np.float64) for speech in raw_speech]
+ elif not is_batched and not isinstance(raw_speech, np.ndarray):
+ raw_speech = np.asarray(raw_speech, dtype=np.float64)
+ elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64):
+ raw_speech = raw_speech.astype(np.float64)
+
+ # always return batch
+ if not is_batched:
+ raw_speech = [np.asarray(raw_speech)]
+
+ # convert to mel spectrogram, truncate and pad if needed.
+ padded_inputs = [
+ self._get_input_mel(waveform, max_length if max_length else self.nb_max_samples, truncation, padding)
+ for waveform in raw_speech
+ ]
+
+ input_mel = []
+ is_longer = []
+ for mel, longer in padded_inputs:
+ input_mel.append(mel)
+ is_longer.append(longer)
+
+ if truncation == "fusion" and sum(is_longer) == 0:
+ # if no audio is longer than 10s, then randomly select one audio to be longer
+ rand_idx = np.random.randint(0, len(input_mel))
+ is_longer[rand_idx] = True
+
+ if isinstance(input_mel[0], List):
+ input_mel = [np.asarray(feature, dtype=np.float64) for feature in input_mel]
+
+ # is_longer is a list of bool
+ is_longer = [[longer] for longer in is_longer]
+
+ input_features = {"input_features": input_mel, "is_longer": is_longer}
+ input_features = BatchFeature(input_features)
+
+ if return_tensors is not None:
+ input_features = input_features.convert_to_tensors(return_tensors)
+
+ return input_features
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/clap/processing_clap.py b/llava_next/lib/python3.10/site-packages/transformers/models/clap/processing_clap.py
new file mode 100644
index 0000000000000000000000000000000000000000..87799899945fa669d3980e8cc6c15192cf7a2ba5
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/transformers/models/clap/processing_clap.py
@@ -0,0 +1,117 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Audio/Text processor class for CLAP
+"""
+
+from ...processing_utils import ProcessorMixin
+from ...tokenization_utils_base import BatchEncoding
+
+
+class ClapProcessor(ProcessorMixin):
+ r"""
+ Constructs a CLAP processor which wraps a CLAP feature extractor and a RoBerta tokenizer into a single processor.
+
+ [`ClapProcessor`] offers all the functionalities of [`ClapFeatureExtractor`] and [`RobertaTokenizerFast`]. See the
+ [`~ClapProcessor.__call__`] and [`~ClapProcessor.decode`] for more information.
+
+ Args:
+ feature_extractor ([`ClapFeatureExtractor`]):
+ The audio processor is a required input.
+ tokenizer ([`RobertaTokenizerFast`]):
+ The tokenizer is a required input.
+ """
+
+ feature_extractor_class = "ClapFeatureExtractor"
+ tokenizer_class = ("RobertaTokenizer", "RobertaTokenizerFast")
+
+ def __init__(self, feature_extractor, tokenizer):
+ super().__init__(feature_extractor, tokenizer)
+
+ def __call__(self, text=None, audios=None, return_tensors=None, **kwargs):
+ """
+ Main method to prepare for the model one or several sequences(s) and audio(s). This method forwards the `text`
+ and `kwargs` arguments to RobertaTokenizerFast's [`~RobertaTokenizerFast.__call__`] if `text` is not `None` to
+ encode the text. To prepare the audio(s), this method forwards the `audios` and `kwrags` arguments to
+ ClapFeatureExtractor's [`~ClapFeatureExtractor.__call__`] if `audios` is not `None`. Please refer to the
+ doctsring of the above two methods for more information.
+
+ Args:
+ text (`str`, `List[str]`, `List[List[str]]`):
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
+ audios (`np.ndarray`, `torch.Tensor`, `List[np.ndarray]`, `List[torch.Tensor]`):
+ The audio or batch of audios to be prepared. Each audio can be NumPy array or PyTorch tensor. In case
+ of a NumPy array/PyTorch tensor, each audio should be of shape (C, T), where C is a number of channels,
+ and T the sample length of the audio.
+
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
+ If set, will return tensors of a particular framework. Acceptable values are:
+
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
+ - `'np'`: Return NumPy `np.ndarray` objects.
+ - `'jax'`: Return JAX `jnp.ndarray` objects.
+
+ Returns:
+ [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
+
+ - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
+ `None`).
+ - **audio_features** -- Audio features to be fed to a model. Returned when `audios` is not `None`.
+ """
+ sampling_rate = kwargs.pop("sampling_rate", None)
+
+ if text is None and audios is None:
+ raise ValueError("You have to specify either text or audios. Both cannot be none.")
+
+ if text is not None:
+ encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs)
+
+ if audios is not None:
+ audio_features = self.feature_extractor(
+ audios, sampling_rate=sampling_rate, return_tensors=return_tensors, **kwargs
+ )
+
+ if text is not None and audios is not None:
+ encoding["input_features"] = audio_features.input_features
+ return encoding
+ elif text is not None:
+ return encoding
+ else:
+ return BatchEncoding(data=dict(**audio_features), tensor_type=return_tensors)
+
+ def batch_decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to RobertaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
+ refer to the docstring of this method for more information.
+ """
+ return self.tokenizer.batch_decode(*args, **kwargs)
+
+ def decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to RobertaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer
+ to the docstring of this method for more information.
+ """
+ return self.tokenizer.decode(*args, **kwargs)
+
+ @property
+ def model_input_names(self):
+ tokenizer_input_names = self.tokenizer.model_input_names
+ feature_extractor_input_names = self.feature_extractor.model_input_names
+ return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names))
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/distilbert/__init__.py b/llava_next/lib/python3.10/site-packages/transformers/models/distilbert/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..6a2756eb9d1c269e08446f9328120738196349d0
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/transformers/models/distilbert/__init__.py
@@ -0,0 +1,166 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_flax_available,
+ is_tf_available,
+ is_tokenizers_available,
+ is_torch_available,
+)
+
+
+_import_structure = {
+ "configuration_distilbert": [
+ "DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
+ "DistilBertConfig",
+ "DistilBertOnnxConfig",
+ ],
+ "tokenization_distilbert": ["DistilBertTokenizer"],
+}
+
+try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_distilbert_fast"] = ["DistilBertTokenizerFast"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_distilbert"] = [
+ "DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "DistilBertForMaskedLM",
+ "DistilBertForMultipleChoice",
+ "DistilBertForQuestionAnswering",
+ "DistilBertForSequenceClassification",
+ "DistilBertForTokenClassification",
+ "DistilBertModel",
+ "DistilBertPreTrainedModel",
+ ]
+
+try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_tf_distilbert"] = [
+ "TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "TFDistilBertForMaskedLM",
+ "TFDistilBertForMultipleChoice",
+ "TFDistilBertForQuestionAnswering",
+ "TFDistilBertForSequenceClassification",
+ "TFDistilBertForTokenClassification",
+ "TFDistilBertMainLayer",
+ "TFDistilBertModel",
+ "TFDistilBertPreTrainedModel",
+ ]
+
+try:
+ if not is_flax_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_flax_distilbert"] = [
+ "FlaxDistilBertForMaskedLM",
+ "FlaxDistilBertForMultipleChoice",
+ "FlaxDistilBertForQuestionAnswering",
+ "FlaxDistilBertForSequenceClassification",
+ "FlaxDistilBertForTokenClassification",
+ "FlaxDistilBertModel",
+ "FlaxDistilBertPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_distilbert import (
+ DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
+ DistilBertConfig,
+ DistilBertOnnxConfig,
+ )
+ from .tokenization_distilbert import DistilBertTokenizer
+
+ try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_distilbert_fast import DistilBertTokenizerFast
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_distilbert import (
+ DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ DistilBertForMaskedLM,
+ DistilBertForMultipleChoice,
+ DistilBertForQuestionAnswering,
+ DistilBertForSequenceClassification,
+ DistilBertForTokenClassification,
+ DistilBertModel,
+ DistilBertPreTrainedModel,
+ )
+
+ try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_tf_distilbert import (
+ TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ TFDistilBertForMaskedLM,
+ TFDistilBertForMultipleChoice,
+ TFDistilBertForQuestionAnswering,
+ TFDistilBertForSequenceClassification,
+ TFDistilBertForTokenClassification,
+ TFDistilBertMainLayer,
+ TFDistilBertModel,
+ TFDistilBertPreTrainedModel,
+ )
+
+ try:
+ if not is_flax_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_flax_distilbert import (
+ FlaxDistilBertForMaskedLM,
+ FlaxDistilBertForMultipleChoice,
+ FlaxDistilBertForQuestionAnswering,
+ FlaxDistilBertForSequenceClassification,
+ FlaxDistilBertForTokenClassification,
+ FlaxDistilBertModel,
+ FlaxDistilBertPreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/__init__.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b607495cc2dcd314cd8af9d7356ca17d00c0b7dc
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/configuration_distilbert.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/configuration_distilbert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0d105eec1cd1bb570a30ca157644f220a875bc22
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/configuration_distilbert.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_distilbert.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_distilbert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d15ba49d5756bd29afb3d8619d50d024317e1bd4
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_distilbert.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_flax_distilbert.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_flax_distilbert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..98c5251a4d8f66f58ca9d86163400d355113a7d4
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_flax_distilbert.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_tf_distilbert.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_tf_distilbert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..559af4bbf68e88633028b5c53e6a18726e664108
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_tf_distilbert.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/tokenization_distilbert.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/tokenization_distilbert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..86f0fb30dbf40ce9ea78091d96ab98bf885ce02a
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/tokenization_distilbert.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/tokenization_distilbert_fast.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/tokenization_distilbert_fast.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..581c5f21fdf04827575cf8be6f14e9cc67c38b23
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/tokenization_distilbert_fast.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/distilbert/configuration_distilbert.py b/llava_next/lib/python3.10/site-packages/transformers/models/distilbert/configuration_distilbert.py
new file mode 100644
index 0000000000000000000000000000000000000000..97b5b7c869064b6fa9cae4879012aec3b030360f
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/transformers/models/distilbert/configuration_distilbert.py
@@ -0,0 +1,155 @@
+# coding=utf-8
+# Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" DistilBERT model configuration"""
+from collections import OrderedDict
+from typing import Mapping
+
+from ...configuration_utils import PretrainedConfig
+from ...onnx import OnnxConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
+ "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
+ "distilbert-base-uncased-distilled-squad": (
+ "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
+ ),
+ "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
+ "distilbert-base-cased-distilled-squad": (
+ "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
+ ),
+ "distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
+ "distilbert-base-multilingual-cased": (
+ "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
+ ),
+ "distilbert-base-uncased-finetuned-sst-2-english": (
+ "https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
+ ),
+}
+
+
+class DistilBertConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`DistilBertModel`] or a [`TFDistilBertModel`]. It
+ is used to instantiate a DistilBERT model according to the specified arguments, defining the model architecture.
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the DistilBERT
+ [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 30522):
+ Vocabulary size of the DistilBERT model. Defines the number of different tokens that can be represented by
+ the `inputs_ids` passed when calling [`DistilBertModel`] or [`TFDistilBertModel`].
+ max_position_embeddings (`int`, *optional*, defaults to 512):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ sinusoidal_pos_embds (`boolean`, *optional*, defaults to `False`):
+ Whether to use sinusoidal positional embeddings.
+ n_layers (`int`, *optional*, defaults to 6):
+ Number of hidden layers in the Transformer encoder.
+ n_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ dim (`int`, *optional*, defaults to 768):
+ Dimensionality of the encoder layers and the pooler layer.
+ hidden_dim (`int`, *optional*, defaults to 3072):
+ The size of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
+ dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_dropout (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention probabilities.
+ activation (`str` or `Callable`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ qa_dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probabilities used in the question answering model [`DistilBertForQuestionAnswering`].
+ seq_classif_dropout (`float`, *optional*, defaults to 0.2):
+ The dropout probabilities used in the sequence classification and the multiple choice model
+ [`DistilBertForSequenceClassification`].
+
+ Examples:
+
+ ```python
+ >>> from transformers import DistilBertConfig, DistilBertModel
+
+ >>> # Initializing a DistilBERT configuration
+ >>> configuration = DistilBertConfig()
+
+ >>> # Initializing a model (with random weights) from the configuration
+ >>> model = DistilBertModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "distilbert"
+ attribute_map = {
+ "hidden_size": "dim",
+ "num_attention_heads": "n_heads",
+ "num_hidden_layers": "n_layers",
+ }
+
+ def __init__(
+ self,
+ vocab_size=30522,
+ max_position_embeddings=512,
+ sinusoidal_pos_embds=False,
+ n_layers=6,
+ n_heads=12,
+ dim=768,
+ hidden_dim=4 * 768,
+ dropout=0.1,
+ attention_dropout=0.1,
+ activation="gelu",
+ initializer_range=0.02,
+ qa_dropout=0.1,
+ seq_classif_dropout=0.2,
+ pad_token_id=0,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.max_position_embeddings = max_position_embeddings
+ self.sinusoidal_pos_embds = sinusoidal_pos_embds
+ self.n_layers = n_layers
+ self.n_heads = n_heads
+ self.dim = dim
+ self.hidden_dim = hidden_dim
+ self.dropout = dropout
+ self.attention_dropout = attention_dropout
+ self.activation = activation
+ self.initializer_range = initializer_range
+ self.qa_dropout = qa_dropout
+ self.seq_classif_dropout = seq_classif_dropout
+ super().__init__(**kwargs, pad_token_id=pad_token_id)
+
+
+class DistilBertOnnxConfig(OnnxConfig):
+ @property
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
+ if self.task == "multiple-choice":
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
+ else:
+ dynamic_axis = {0: "batch", 1: "sequence"}
+ return OrderedDict(
+ [
+ ("input_ids", dynamic_axis),
+ ("attention_mask", dynamic_axis),
+ ]
+ )
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/distilbert/modeling_distilbert.py b/llava_next/lib/python3.10/site-packages/transformers/models/distilbert/modeling_distilbert.py
new file mode 100644
index 0000000000000000000000000000000000000000..a6d7a3bebc34b9673d009be1ad163e8ec4092258
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/transformers/models/distilbert/modeling_distilbert.py
@@ -0,0 +1,1392 @@
+# coding=utf-8
+# Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+ PyTorch DistilBERT model adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM) and in
+ part from HuggingFace PyTorch version of Google AI Bert model (https://github.com/google-research/bert)
+"""
+
+
+import math
+from typing import Dict, List, Optional, Set, Tuple, Union
+
+import numpy as np
+import torch
+import torch.nn.functional as F
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import get_activation
+from ...configuration_utils import PretrainedConfig
+from ...integrations.deepspeed import is_deepspeed_zero3_enabled
+from ...modeling_outputs import (
+ BaseModelOutput,
+ MaskedLMOutput,
+ MultipleChoiceModelOutput,
+ QuestionAnsweringModelOutput,
+ SequenceClassifierOutput,
+ TokenClassifierOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
+from ...utils import (
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ is_flash_attn_2_available,
+ is_flash_attn_greater_or_equal_2_10,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_distilbert import DistilBertConfig
+
+
+if is_flash_attn_2_available():
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
+
+
+logger = logging.get_logger(__name__)
+_CHECKPOINT_FOR_DOC = "distilbert-base-uncased"
+_CONFIG_FOR_DOC = "DistilBertConfig"
+
+DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
+ "distilbert-base-uncased",
+ "distilbert-base-uncased-distilled-squad",
+ "distilbert-base-cased",
+ "distilbert-base-cased-distilled-squad",
+ "distilbert-base-german-cased",
+ "distilbert-base-multilingual-cased",
+ "distilbert-base-uncased-finetuned-sst-2-english",
+ # See all DistilBERT models at https://huggingface.co/models?filter=distilbert
+]
+
+
+# UTILS AND BUILDING BLOCKS OF THE ARCHITECTURE #
+
+
+# Copied from transformers.models.llama.modeling_llama._get_unpad_data
+def _get_unpad_data(attention_mask):
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
+ return (
+ indices,
+ cu_seqlens,
+ max_seqlen_in_batch,
+ )
+
+
+def create_sinusoidal_embeddings(n_pos: int, dim: int, out: torch.Tensor):
+ if is_deepspeed_zero3_enabled():
+ import deepspeed
+
+ with deepspeed.zero.GatheredParameters(out, modifier_rank=0):
+ if torch.distributed.get_rank() == 0:
+ _create_sinusoidal_embeddings(n_pos=n_pos, dim=dim, out=out)
+ else:
+ _create_sinusoidal_embeddings(n_pos=n_pos, dim=dim, out=out)
+
+
+def _create_sinusoidal_embeddings(n_pos: int, dim: int, out: torch.Tensor):
+ position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)])
+ out.requires_grad = False
+ out[:, 0::2] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
+ out[:, 1::2] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
+ out.detach_()
+
+
+class Embeddings(nn.Module):
+ def __init__(self, config: PretrainedConfig):
+ super().__init__()
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.dim, padding_idx=config.pad_token_id)
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.dim)
+ if config.sinusoidal_pos_embds:
+ create_sinusoidal_embeddings(
+ n_pos=config.max_position_embeddings, dim=config.dim, out=self.position_embeddings.weight
+ )
+
+ self.LayerNorm = nn.LayerNorm(config.dim, eps=1e-12)
+ self.dropout = nn.Dropout(config.dropout)
+ self.register_buffer(
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
+ )
+
+ def forward(self, input_ids: torch.Tensor, input_embeds: Optional[torch.Tensor] = None) -> torch.Tensor:
+ """
+ Parameters:
+ input_ids (torch.Tensor):
+ torch.tensor(bs, max_seq_length) The token ids to embed.
+ input_embeds (*optional*, torch.Tensor):
+ The pre-computed word embeddings. Can only be passed if the input ids are `None`.
+
+
+ Returns: torch.tensor(bs, max_seq_length, dim) The embedded tokens (plus position embeddings, no token_type
+ embeddings)
+ """
+ if input_ids is not None:
+ input_embeds = self.word_embeddings(input_ids) # (bs, max_seq_length, dim)
+
+ seq_length = input_embeds.size(1)
+
+ # Setting the position-ids to the registered buffer in constructor, it helps
+ # when tracing the model without passing position-ids, solves
+ # isues similar to issue #5664
+ if hasattr(self, "position_ids"):
+ position_ids = self.position_ids[:, :seq_length]
+ else:
+ position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device) # (max_seq_length)
+ position_ids = position_ids.unsqueeze(0).expand_as(input_ids) # (bs, max_seq_length)
+
+ position_embeddings = self.position_embeddings(position_ids) # (bs, max_seq_length, dim)
+
+ embeddings = input_embeds + position_embeddings # (bs, max_seq_length, dim)
+ embeddings = self.LayerNorm(embeddings) # (bs, max_seq_length, dim)
+ embeddings = self.dropout(embeddings) # (bs, max_seq_length, dim)
+ return embeddings
+
+
+class MultiHeadSelfAttention(nn.Module):
+ def __init__(self, config: PretrainedConfig):
+ super().__init__()
+ self.config = config
+
+ self.n_heads = config.n_heads
+ self.dim = config.dim
+ self.dropout = nn.Dropout(p=config.attention_dropout)
+ self.is_causal = False
+
+ # Have an even number of multi heads that divide the dimensions
+ if self.dim % self.n_heads != 0:
+ # Raise value errors for even multi-head attention nodes
+ raise ValueError(f"self.n_heads: {self.n_heads} must divide self.dim: {self.dim} evenly")
+
+ self.q_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
+ self.k_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
+ self.v_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
+ self.out_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
+
+ self.pruned_heads: Set[int] = set()
+ self.attention_head_size = self.dim // self.n_heads
+
+ def prune_heads(self, heads: List[int]):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.n_heads, self.attention_head_size, self.pruned_heads
+ )
+ # Prune linear layers
+ self.q_lin = prune_linear_layer(self.q_lin, index)
+ self.k_lin = prune_linear_layer(self.k_lin, index)
+ self.v_lin = prune_linear_layer(self.v_lin, index)
+ self.out_lin = prune_linear_layer(self.out_lin, index, dim=1)
+ # Update hyper params
+ self.n_heads = self.n_heads - len(heads)
+ self.dim = self.attention_head_size * self.n_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(
+ self,
+ query: torch.Tensor,
+ key: torch.Tensor,
+ value: torch.Tensor,
+ mask: torch.Tensor,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[torch.Tensor, ...]:
+ """
+ Parameters:
+ query: torch.tensor(bs, seq_length, dim)
+ key: torch.tensor(bs, seq_length, dim)
+ value: torch.tensor(bs, seq_length, dim)
+ mask: torch.tensor(bs, seq_length)
+
+ Returns:
+ weights: torch.tensor(bs, n_heads, seq_length, seq_length) Attention weights context: torch.tensor(bs,
+ seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True`
+ """
+ bs, q_length, dim = query.size()
+ k_length = key.size(1)
+ # assert dim == self.dim, f'Dimensions do not match: {dim} input vs {self.dim} configured'
+ # assert key.size() == value.size()
+
+ dim_per_head = self.dim // self.n_heads
+
+ mask_reshp = (bs, 1, 1, k_length)
+
+ def shape(x: torch.Tensor) -> torch.Tensor:
+ """separate heads"""
+ return x.view(bs, -1, self.n_heads, dim_per_head).transpose(1, 2)
+
+ def unshape(x: torch.Tensor) -> torch.Tensor:
+ """group heads"""
+ return x.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * dim_per_head)
+
+ q = shape(self.q_lin(query)) # (bs, n_heads, q_length, dim_per_head)
+ k = shape(self.k_lin(key)) # (bs, n_heads, k_length, dim_per_head)
+ v = shape(self.v_lin(value)) # (bs, n_heads, k_length, dim_per_head)
+
+ q = q / math.sqrt(dim_per_head) # (bs, n_heads, q_length, dim_per_head)
+ scores = torch.matmul(q, k.transpose(2, 3)) # (bs, n_heads, q_length, k_length)
+ mask = (mask == 0).view(mask_reshp).expand_as(scores) # (bs, n_heads, q_length, k_length)
+ scores = scores.masked_fill(
+ mask, torch.tensor(torch.finfo(scores.dtype).min)
+ ) # (bs, n_heads, q_length, k_length)
+
+ weights = nn.functional.softmax(scores, dim=-1) # (bs, n_heads, q_length, k_length)
+ weights = self.dropout(weights) # (bs, n_heads, q_length, k_length)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ weights = weights * head_mask
+
+ context = torch.matmul(weights, v) # (bs, n_heads, q_length, dim_per_head)
+ context = unshape(context) # (bs, q_length, dim)
+ context = self.out_lin(context) # (bs, q_length, dim)
+
+ if output_attentions:
+ return (context, weights)
+ else:
+ return (context,)
+
+
+class DistilBertFlashAttention2(MultiHeadSelfAttention):
+ """
+ DistilBert flash attention module. This module inherits from `MultiHeadSelfAttention` as the weights of the module
+ stays untouched. The only required change would be on the forward pass where it needs to correctly call the public
+ API of flash attention and deal with padding tokens in case the input contains any of them.
+ """
+
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
+
+ def forward(
+ self,
+ query: torch.Tensor,
+ key: torch.Tensor,
+ value: torch.Tensor,
+ mask: torch.Tensor,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[torch.Tensor, ...]:
+ """
+ Parameters:
+ query: torch.tensor(bs, seq_length, dim)
+ key: torch.tensor(bs, seq_length, dim)
+ value: torch.tensor(bs, seq_length, dim)
+ mask: torch.tensor(bs, seq_length)
+
+ Returns:
+ weights: torch.tensor(bs, n_heads, seq_length, seq_length) Attention weights context: torch.tensor(bs,
+ seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True`
+ """
+ batch_size, q_length, dim = query.size()
+
+ dim_per_head = self.dim // self.n_heads
+
+ def reshape(x: torch.Tensor) -> torch.Tensor:
+ """separate heads"""
+ return x.view(batch_size, -1, self.n_heads, dim_per_head)
+
+ # Flash attention requires the input to have the shape
+ # batch_size x seq_length x head_dim x hidden_dim
+ query_states = reshape(self.q_lin(query))
+ key_states = reshape(self.k_lin(key))
+ value_states = reshape(self.v_lin(value))
+
+ attn_dropout = self.config.attention_dropout if self.training else 0.0
+
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
+ # cast them back in the correct dtype just to be sure everything works as expected.
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
+ # in fp32. (LlamaRMSNorm handles it correctly)
+
+ if query_states.dtype == torch.float32:
+ if torch.is_autocast_enabled():
+ target_dtype = torch.get_autocast_gpu_dtype()
+ # Handle the case where the model is quantized
+ elif hasattr(self.config, "_pre_quantization_dtype"):
+ target_dtype = self.config._pre_quantization_dtype
+ else:
+ target_dtype = self.q_lin.weight.dtype
+
+ logger.warning_once(
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
+ f" {target_dtype}."
+ )
+
+ query_states = query_states.to(target_dtype)
+ key_states = key_states.to(target_dtype)
+ value_states = value_states.to(target_dtype)
+
+ attn_weights = self._flash_attention_forward(
+ query_states, key_states, value_states, mask, q_length, dropout=attn_dropout
+ )
+
+ attn_weights_reshaped = attn_weights.reshape(batch_size, q_length, self.n_heads * dim_per_head)
+ attn_output = self.out_lin(attn_weights_reshaped)
+
+ if output_attentions:
+ return (attn_output, attn_weights)
+ else:
+ return (attn_output,)
+
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward with causal=True->causal=False
+ def _flash_attention_forward(
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
+ ):
+ """
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
+ first unpad the input, then computes the attention scores and pad the final attention scores.
+
+ Args:
+ query_states (`torch.Tensor`):
+ Input query states to be passed to Flash Attention API
+ key_states (`torch.Tensor`):
+ Input key states to be passed to Flash Attention API
+ value_states (`torch.Tensor`):
+ Input value states to be passed to Flash Attention API
+ attention_mask (`torch.Tensor`):
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
+ position of padding tokens and 1 for the position of non-padding tokens.
+ dropout (`int`, *optional*):
+ Attention dropout
+ softmax_scale (`float`, *optional*):
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
+ """
+ if not self._flash_attn_uses_top_left_mask:
+ causal = self.is_causal
+ else:
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
+ causal = self.is_causal and query_length != 1
+
+ # Contains at least one padding token in the sequence
+ if attention_mask is not None:
+ batch_size = query_states.shape[0]
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
+ query_states, key_states, value_states, attention_mask, query_length
+ )
+
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
+
+ attn_output_unpad = flash_attn_varlen_func(
+ query_states,
+ key_states,
+ value_states,
+ cu_seqlens_q=cu_seqlens_q,
+ cu_seqlens_k=cu_seqlens_k,
+ max_seqlen_q=max_seqlen_in_batch_q,
+ max_seqlen_k=max_seqlen_in_batch_k,
+ dropout_p=dropout,
+ softmax_scale=softmax_scale,
+ causal=causal,
+ )
+
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
+ else:
+ attn_output = flash_attn_func(
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
+ )
+
+ return attn_output
+
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input with num_heads->n_heads
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
+
+ key_layer = index_first_axis(
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
+ )
+ value_layer = index_first_axis(
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
+ )
+ if query_length == kv_seq_len:
+ query_layer = index_first_axis(
+ query_layer.reshape(batch_size * kv_seq_len, self.n_heads, head_dim), indices_k
+ )
+ cu_seqlens_q = cu_seqlens_k
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
+ indices_q = indices_k
+ elif query_length == 1:
+ max_seqlen_in_batch_q = 1
+ cu_seqlens_q = torch.arange(
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
+ ) # There is a memcpy here, that is very bad.
+ indices_q = cu_seqlens_q[:-1]
+ query_layer = query_layer.squeeze(1)
+ else:
+ # The -q_len: slice assumes left padding.
+ attention_mask = attention_mask[:, -query_length:]
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
+
+ return (
+ query_layer,
+ key_layer,
+ value_layer,
+ indices_q,
+ (cu_seqlens_q, cu_seqlens_k),
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
+ )
+
+
+class FFN(nn.Module):
+ def __init__(self, config: PretrainedConfig):
+ super().__init__()
+ self.dropout = nn.Dropout(p=config.dropout)
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+ self.seq_len_dim = 1
+ self.lin1 = nn.Linear(in_features=config.dim, out_features=config.hidden_dim)
+ self.lin2 = nn.Linear(in_features=config.hidden_dim, out_features=config.dim)
+ self.activation = get_activation(config.activation)
+
+ def forward(self, input: torch.Tensor) -> torch.Tensor:
+ return apply_chunking_to_forward(self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, input)
+
+ def ff_chunk(self, input: torch.Tensor) -> torch.Tensor:
+ x = self.lin1(input)
+ x = self.activation(x)
+ x = self.lin2(x)
+ x = self.dropout(x)
+ return x
+
+
+DISTILBERT_ATTENTION_CLASSES = {
+ "eager": MultiHeadSelfAttention,
+ "flash_attention_2": DistilBertFlashAttention2,
+}
+
+
+class TransformerBlock(nn.Module):
+ def __init__(self, config: PretrainedConfig):
+ super().__init__()
+
+ # Have an even number of Configure multi-heads
+ if config.dim % config.n_heads != 0:
+ raise ValueError(f"config.n_heads {config.n_heads} must divide config.dim {config.dim} evenly")
+
+ self.attention = DISTILBERT_ATTENTION_CLASSES[config._attn_implementation](config)
+ self.sa_layer_norm = nn.LayerNorm(normalized_shape=config.dim, eps=1e-12)
+
+ self.ffn = FFN(config)
+ self.output_layer_norm = nn.LayerNorm(normalized_shape=config.dim, eps=1e-12)
+
+ def forward(
+ self,
+ x: torch.Tensor,
+ attn_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[torch.Tensor, ...]:
+ """
+ Parameters:
+ x: torch.tensor(bs, seq_length, dim)
+ attn_mask: torch.tensor(bs, seq_length)
+
+ Returns:
+ sa_weights: torch.tensor(bs, n_heads, seq_length, seq_length) The attention weights ffn_output:
+ torch.tensor(bs, seq_length, dim) The output of the transformer block contextualization.
+ """
+ # Self-Attention
+ sa_output = self.attention(
+ query=x,
+ key=x,
+ value=x,
+ mask=attn_mask,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ )
+ if output_attentions:
+ sa_output, sa_weights = sa_output # (bs, seq_length, dim), (bs, n_heads, seq_length, seq_length)
+ else: # To handle these `output_attentions` or `output_hidden_states` cases returning tuples
+ if type(sa_output) != tuple:
+ raise TypeError(f"sa_output must be a tuple but it is {type(sa_output)} type")
+
+ sa_output = sa_output[0]
+ sa_output = self.sa_layer_norm(sa_output + x) # (bs, seq_length, dim)
+
+ # Feed Forward Network
+ ffn_output = self.ffn(sa_output) # (bs, seq_length, dim)
+ ffn_output: torch.Tensor = self.output_layer_norm(ffn_output + sa_output) # (bs, seq_length, dim)
+
+ output = (ffn_output,)
+ if output_attentions:
+ output = (sa_weights,) + output
+ return output
+
+
+class Transformer(nn.Module):
+ def __init__(self, config: PretrainedConfig):
+ super().__init__()
+ self.n_layers = config.n_layers
+ self.layer = nn.ModuleList([TransformerBlock(config) for _ in range(config.n_layers)])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ x: torch.Tensor,
+ attn_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: Optional[bool] = None,
+ ) -> Union[BaseModelOutput, Tuple[torch.Tensor, ...]]: # docstyle-ignore
+ """
+ Parameters:
+ x: torch.tensor(bs, seq_length, dim) Input sequence embedded.
+ attn_mask: torch.tensor(bs, seq_length) Attention mask on the sequence.
+
+ Returns:
+ hidden_state: torch.tensor(bs, seq_length, dim) Sequence of hidden states in the last (top)
+ layer all_hidden_states: Tuple[torch.tensor(bs, seq_length, dim)]
+ Tuple of length n_layers with the hidden states from each layer.
+ Optional: only if output_hidden_states=True
+ all_attentions: Tuple[torch.tensor(bs, n_heads, seq_length, seq_length)]
+ Tuple of length n_layers with the attention weights from each layer
+ Optional: only if output_attentions=True
+ """
+ all_hidden_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ hidden_state = x
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_state,)
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.__call__,
+ hidden_state,
+ attn_mask,
+ head_mask[i],
+ output_attentions,
+ )
+ else:
+ layer_outputs = layer_module(
+ hidden_state,
+ attn_mask,
+ head_mask[i],
+ output_attentions,
+ )
+
+ hidden_state = layer_outputs[-1]
+
+ if output_attentions:
+ if len(layer_outputs) != 2:
+ raise ValueError(f"The length of the layer_outputs should be 2, but it is {len(layer_outputs)}")
+
+ attentions = layer_outputs[0]
+ all_attentions = all_attentions + (attentions,)
+ else:
+ if len(layer_outputs) != 1:
+ raise ValueError(f"The length of the layer_outputs should be 1, but it is {len(layer_outputs)}")
+
+ # Add last layer
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_state,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_state, all_hidden_states, all_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_state, hidden_states=all_hidden_states, attentions=all_attentions
+ )
+
+
+# INTERFACE FOR ENCODER AND TASK SPECIFIC MODEL #
+class DistilBertPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = DistilBertConfig
+ load_tf_weights = None
+ base_model_prefix = "distilbert"
+ supports_gradient_checkpointing = True
+ _supports_flash_attn_2 = True
+
+ def _init_weights(self, module: nn.Module):
+ """Initialize the weights."""
+ if isinstance(module, nn.Linear):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+DISTILBERT_START_DOCSTRING = r"""
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`DistilBertConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+DISTILBERT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare DistilBERT encoder/transformer outputting raw hidden-states without any specific head on top.",
+ DISTILBERT_START_DOCSTRING,
+)
+class DistilBertModel(DistilBertPreTrainedModel):
+ def __init__(self, config: PretrainedConfig):
+ super().__init__(config)
+
+ self.embeddings = Embeddings(config) # Embeddings
+ self.transformer = Transformer(config) # Encoder
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_position_embeddings(self) -> nn.Embedding:
+ """
+ Returns the position embeddings
+ """
+ return self.embeddings.position_embeddings
+
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
+ """
+ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
+
+ Arguments:
+ new_num_position_embeddings (`int`):
+ The number of new position embedding matrix. If position embeddings are learned, increasing the size
+ will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
+ end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
+ size will add correct vectors at the end following the position encoding algorithm, whereas reducing
+ the size will remove vectors from the end.
+ """
+ num_position_embeds_diff = new_num_position_embeddings - self.config.max_position_embeddings
+
+ # no resizing needs to be done if the length stays the same
+ if num_position_embeds_diff == 0:
+ return
+
+ logger.info(f"Setting `config.max_position_embeddings={new_num_position_embeddings}`...")
+ self.config.max_position_embeddings = new_num_position_embeddings
+
+ old_position_embeddings_weight = self.embeddings.position_embeddings.weight.clone()
+
+ self.embeddings.position_embeddings = nn.Embedding(self.config.max_position_embeddings, self.config.dim)
+
+ if self.config.sinusoidal_pos_embds:
+ create_sinusoidal_embeddings(
+ n_pos=self.config.max_position_embeddings, dim=self.config.dim, out=self.position_embeddings.weight
+ )
+ else:
+ with torch.no_grad():
+ if num_position_embeds_diff > 0:
+ self.embeddings.position_embeddings.weight[:-num_position_embeds_diff] = nn.Parameter(
+ old_position_embeddings_weight
+ )
+ else:
+ self.embeddings.position_embeddings.weight = nn.Parameter(
+ old_position_embeddings_weight[:num_position_embeds_diff]
+ )
+ # move position_embeddings to correct device
+ self.embeddings.position_embeddings.to(self.device)
+
+ def get_input_embeddings(self) -> nn.Embedding:
+ return self.embeddings.word_embeddings
+
+ def set_input_embeddings(self, new_embeddings: nn.Embedding):
+ self.embeddings.word_embeddings = new_embeddings
+
+ def _prune_heads(self, heads_to_prune: Dict[int, List[List[int]]]):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.transformer.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[BaseModelOutput, Tuple[torch.Tensor, ...]]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
+ input_shape = input_ids.size()
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+
+ # Prepare head mask if needed
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
+
+ embeddings = self.embeddings(input_ids, inputs_embeds) # (bs, seq_length, dim)
+
+ if self._use_flash_attention_2:
+ attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
+ else:
+ if attention_mask is None:
+ attention_mask = torch.ones(input_shape, device=device) # (bs, seq_length)
+
+ return self.transformer(
+ x=embeddings,
+ attn_mask=attention_mask,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+
+@add_start_docstrings(
+ """DistilBert Model with a `masked language modeling` head on top.""",
+ DISTILBERT_START_DOCSTRING,
+)
+class DistilBertForMaskedLM(DistilBertPreTrainedModel):
+ _tied_weights_keys = ["vocab_projector.weight"]
+
+ def __init__(self, config: PretrainedConfig):
+ super().__init__(config)
+
+ self.activation = get_activation(config.activation)
+
+ self.distilbert = DistilBertModel(config)
+ self.vocab_transform = nn.Linear(config.dim, config.dim)
+ self.vocab_layer_norm = nn.LayerNorm(config.dim, eps=1e-12)
+ self.vocab_projector = nn.Linear(config.dim, config.vocab_size)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ self.mlm_loss_fct = nn.CrossEntropyLoss()
+
+ def get_position_embeddings(self) -> nn.Embedding:
+ """
+ Returns the position embeddings
+ """
+ return self.distilbert.get_position_embeddings()
+
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
+ """
+ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
+
+ Arguments:
+ new_num_position_embeddings (`int`):
+ The number of new position embedding matrix. If position embeddings are learned, increasing the size
+ will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
+ end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
+ size will add correct vectors at the end following the position encoding algorithm, whereas reducing
+ the size will remove vectors from the end.
+ """
+ self.distilbert.resize_position_embeddings(new_num_position_embeddings)
+
+ def get_output_embeddings(self) -> nn.Module:
+ return self.vocab_projector
+
+ def set_output_embeddings(self, new_embeddings: nn.Module):
+ self.vocab_projector = new_embeddings
+
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=MaskedLMOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[MaskedLMOutput, Tuple[torch.Tensor, ...]]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ dlbrt_output = self.distilbert(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_states = dlbrt_output[0] # (bs, seq_length, dim)
+ prediction_logits = self.vocab_transform(hidden_states) # (bs, seq_length, dim)
+ prediction_logits = self.activation(prediction_logits) # (bs, seq_length, dim)
+ prediction_logits = self.vocab_layer_norm(prediction_logits) # (bs, seq_length, dim)
+ prediction_logits = self.vocab_projector(prediction_logits) # (bs, seq_length, vocab_size)
+
+ mlm_loss = None
+ if labels is not None:
+ mlm_loss = self.mlm_loss_fct(prediction_logits.view(-1, prediction_logits.size(-1)), labels.view(-1))
+
+ if not return_dict:
+ output = (prediction_logits,) + dlbrt_output[1:]
+ return ((mlm_loss,) + output) if mlm_loss is not None else output
+
+ return MaskedLMOutput(
+ loss=mlm_loss,
+ logits=prediction_logits,
+ hidden_states=dlbrt_output.hidden_states,
+ attentions=dlbrt_output.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ DistilBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the
+ pooled output) e.g. for GLUE tasks.
+ """,
+ DISTILBERT_START_DOCSTRING,
+)
+class DistilBertForSequenceClassification(DistilBertPreTrainedModel):
+ def __init__(self, config: PretrainedConfig):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.config = config
+
+ self.distilbert = DistilBertModel(config)
+ self.pre_classifier = nn.Linear(config.dim, config.dim)
+ self.classifier = nn.Linear(config.dim, config.num_labels)
+ self.dropout = nn.Dropout(config.seq_classif_dropout)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_position_embeddings(self) -> nn.Embedding:
+ """
+ Returns the position embeddings
+ """
+ return self.distilbert.get_position_embeddings()
+
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
+ """
+ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
+
+ Arguments:
+ new_num_position_embeddings (`int`):
+ The number of new position embedding matrix. If position embeddings are learned, increasing the size
+ will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
+ end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
+ size will add correct vectors at the end following the position encoding algorithm, whereas reducing
+ the size will remove vectors from the end.
+ """
+ self.distilbert.resize_position_embeddings(new_num_position_embeddings)
+
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=SequenceClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[SequenceClassifierOutput, Tuple[torch.Tensor, ...]]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ distilbert_output = self.distilbert(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_state = distilbert_output[0] # (bs, seq_len, dim)
+ pooled_output = hidden_state[:, 0] # (bs, dim)
+ pooled_output = self.pre_classifier(pooled_output) # (bs, dim)
+ pooled_output = nn.ReLU()(pooled_output) # (bs, dim)
+ pooled_output = self.dropout(pooled_output) # (bs, dim)
+ logits = self.classifier(pooled_output) # (bs, num_labels)
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+
+ if not return_dict:
+ output = (logits,) + distilbert_output[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=distilbert_output.hidden_states,
+ attentions=distilbert_output.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ DistilBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
+ linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ DISTILBERT_START_DOCSTRING,
+)
+class DistilBertForQuestionAnswering(DistilBertPreTrainedModel):
+ def __init__(self, config: PretrainedConfig):
+ super().__init__(config)
+
+ self.distilbert = DistilBertModel(config)
+ self.qa_outputs = nn.Linear(config.dim, config.num_labels)
+ if config.num_labels != 2:
+ raise ValueError(f"config.num_labels should be 2, but it is {config.num_labels}")
+
+ self.dropout = nn.Dropout(config.qa_dropout)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_position_embeddings(self) -> nn.Embedding:
+ """
+ Returns the position embeddings
+ """
+ return self.distilbert.get_position_embeddings()
+
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
+ """
+ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
+
+ Arguments:
+ new_num_position_embeddings (`int`):
+ The number of new position embedding matrix. If position embeddings are learned, increasing the size
+ will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
+ end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
+ size will add correct vectors at the end following the position encoding algorithm, whereas reducing
+ the size will remove vectors from the end.
+ """
+ self.distilbert.resize_position_embeddings(new_num_position_embeddings)
+
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=QuestionAnsweringModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ start_positions: Optional[torch.Tensor] = None,
+ end_positions: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[QuestionAnsweringModelOutput, Tuple[torch.Tensor, ...]]:
+ r"""
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ distilbert_output = self.distilbert(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_states = distilbert_output[0] # (bs, max_query_len, dim)
+
+ hidden_states = self.dropout(hidden_states) # (bs, max_query_len, dim)
+ logits = self.qa_outputs(hidden_states) # (bs, max_query_len, 2)
+ start_logits, end_logits = logits.split(1, dim=-1)
+ start_logits = start_logits.squeeze(-1).contiguous() # (bs, max_query_len)
+ end_logits = end_logits.squeeze(-1).contiguous() # (bs, max_query_len)
+
+ total_loss = None
+ if start_positions is not None and end_positions is not None:
+ # If we are on multi-GPU, split add a dimension
+ if len(start_positions.size()) > 1:
+ start_positions = start_positions.squeeze(-1)
+ if len(end_positions.size()) > 1:
+ end_positions = end_positions.squeeze(-1)
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
+ ignored_index = start_logits.size(1)
+ start_positions = start_positions.clamp(0, ignored_index)
+ end_positions = end_positions.clamp(0, ignored_index)
+
+ loss_fct = nn.CrossEntropyLoss(ignore_index=ignored_index)
+ start_loss = loss_fct(start_logits, start_positions)
+ end_loss = loss_fct(end_logits, end_positions)
+ total_loss = (start_loss + end_loss) / 2
+
+ if not return_dict:
+ output = (start_logits, end_logits) + distilbert_output[1:]
+ return ((total_loss,) + output) if total_loss is not None else output
+
+ return QuestionAnsweringModelOutput(
+ loss=total_loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=distilbert_output.hidden_states,
+ attentions=distilbert_output.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ DistilBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
+ for Named-Entity-Recognition (NER) tasks.
+ """,
+ DISTILBERT_START_DOCSTRING,
+)
+class DistilBertForTokenClassification(DistilBertPreTrainedModel):
+ def __init__(self, config: PretrainedConfig):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.distilbert = DistilBertModel(config)
+ self.dropout = nn.Dropout(config.dropout)
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_position_embeddings(self) -> nn.Embedding:
+ """
+ Returns the position embeddings
+ """
+ return self.distilbert.get_position_embeddings()
+
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
+ """
+ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
+
+ Arguments:
+ new_num_position_embeddings (`int`):
+ The number of new position embedding matrix. If position embeddings are learned, increasing the size
+ will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
+ end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
+ size will add correct vectors at the end following the position encoding algorithm, whereas reducing
+ the size will remove vectors from the end.
+ """
+ self.distilbert.resize_position_embeddings(new_num_position_embeddings)
+
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TokenClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[TokenClassifierOutput, Tuple[torch.Tensor, ...]]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.distilbert(
+ input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ sequence_output = self.dropout(sequence_output)
+ logits = self.classifier(sequence_output)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TokenClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ DistilBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
+ a softmax) e.g. for RocStories/SWAG tasks.
+ """,
+ DISTILBERT_START_DOCSTRING,
+)
+class DistilBertForMultipleChoice(DistilBertPreTrainedModel):
+ def __init__(self, config: PretrainedConfig):
+ super().__init__(config)
+
+ self.distilbert = DistilBertModel(config)
+ self.pre_classifier = nn.Linear(config.dim, config.dim)
+ self.classifier = nn.Linear(config.dim, 1)
+ self.dropout = nn.Dropout(config.seq_classif_dropout)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_position_embeddings(self) -> nn.Embedding:
+ """
+ Returns the position embeddings
+ """
+ return self.distilbert.get_position_embeddings()
+
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
+ """
+ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
+
+ Arguments:
+ new_num_position_embeddings (`int`)
+ The number of new position embeddings. If position embeddings are learned, increasing the size will add
+ newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
+ position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
+ add correct vectors at the end following the position encoding algorithm, whereas reducing the size
+ will remove vectors from the end.
+ """
+ self.distilbert.resize_position_embeddings(new_num_position_embeddings)
+
+ @add_start_docstrings_to_model_forward(
+ DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
+ )
+ @replace_return_docstrings(output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[MultipleChoiceModelOutput, Tuple[torch.Tensor, ...]]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
+ `input_ids` above)
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer, DistilBertForMultipleChoice
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("distilbert-base-cased")
+ >>> model = DistilBertForMultipleChoice.from_pretrained("distilbert-base-cased")
+
+ >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
+ >>> choice0 = "It is eaten with a fork and a knife."
+ >>> choice1 = "It is eaten while held in the hand."
+ >>> labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1
+
+ >>> encoding = tokenizer([[prompt, choice0], [prompt, choice1]], return_tensors="pt", padding=True)
+ >>> outputs = model(**{k: v.unsqueeze(0) for k, v in encoding.items()}, labels=labels) # batch size is 1
+
+ >>> # the linear classifier still needs to be trained
+ >>> loss = outputs.loss
+ >>> logits = outputs.logits
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
+
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
+ inputs_embeds = (
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
+ if inputs_embeds is not None
+ else None
+ )
+
+ outputs = self.distilbert(
+ input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_state = outputs[0] # (bs * num_choices, seq_len, dim)
+ pooled_output = hidden_state[:, 0] # (bs * num_choices, dim)
+ pooled_output = self.pre_classifier(pooled_output) # (bs * num_choices, dim)
+ pooled_output = nn.ReLU()(pooled_output) # (bs * num_choices, dim)
+ pooled_output = self.dropout(pooled_output) # (bs * num_choices, dim)
+ logits = self.classifier(pooled_output) # (bs * num_choices, 1)
+
+ reshaped_logits = logits.view(-1, num_choices) # (bs, num_choices)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(reshaped_logits, labels)
+
+ if not return_dict:
+ output = (reshaped_logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return MultipleChoiceModelOutput(
+ loss=loss,
+ logits=reshaped_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/distilbert/modeling_flax_distilbert.py b/llava_next/lib/python3.10/site-packages/transformers/models/distilbert/modeling_flax_distilbert.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ba34eb9b20263726c9d9d311c8d28fc6f056d22
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/transformers/models/distilbert/modeling_flax_distilbert.py
@@ -0,0 +1,895 @@
+# coding=utf-8
+# Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import math
+from typing import Callable, Optional, Tuple
+
+import flax.linen as nn
+import jax
+import jax.numpy as jnp
+import numpy as np
+from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
+from flax.traverse_util import flatten_dict, unflatten_dict
+from jax import lax
+
+from ...modeling_flax_outputs import (
+ FlaxBaseModelOutput,
+ FlaxMaskedLMOutput,
+ FlaxMultipleChoiceModelOutput,
+ FlaxQuestionAnsweringModelOutput,
+ FlaxSequenceClassifierOutput,
+ FlaxTokenClassifierOutput,
+)
+from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring, overwrite_call_docstring
+from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging
+from .configuration_distilbert import DistilBertConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "distilbert-base-uncased"
+_CONFIG_FOR_DOC = "DistilBertConfig"
+
+
+FLAX_DISTILBERT_START_DOCSTRING = r"""
+
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading, saving and converting weights from PyTorch models)
+
+ This model is also a
+ [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as
+ a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and
+ behavior.
+
+ Finally, this model supports inherent JAX features such as:
+
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
+
+ Parameters:
+ config ([`DistilBertConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+DISTILBERT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`numpy.ndarray` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`numpy.ndarray` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+def get_angles(pos, i, d_model):
+ angle_rates = 1 / np.power(10000, (2 * (i // 2)) / np.float32(d_model))
+ return pos * angle_rates
+
+
+def positional_encoding(position, d_model):
+ # create the sinusoidal pattern for the positional encoding
+ angle_rads = get_angles(np.arange(position)[:, np.newaxis], np.arange(d_model)[np.newaxis, :], d_model)
+
+ # apply sin to even indices in the array; 2i
+ angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])
+
+ # apply cos to odd indices in the array; 2i+1
+ angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])
+
+ pos_encoding = angle_rads[np.newaxis, ...]
+
+ return jnp.array(pos_encoding)
+
+
+class FlaxEmbeddings(nn.Module):
+ """Construct the embeddings from word, position and token_type embeddings."""
+
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.word_embeddings = nn.Embed(
+ self.config.vocab_size,
+ self.config.dim,
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+ if not self.config.sinusoidal_pos_embds:
+ self.position_embeddings = nn.Embed(
+ self.config.max_position_embeddings,
+ self.config.dim,
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+ else:
+ self.pos_encoding = positional_encoding(self.config.max_position_embeddings, self.config.dim)
+ self.LayerNorm = nn.LayerNorm(epsilon=1e-12, dtype=self.dtype)
+ self.dropout = nn.Dropout(rate=self.config.dropout)
+
+ def __call__(self, input_ids, deterministic: bool = True):
+ # Embed
+ batch_size, seq_length = input_ids.shape
+ inputs_embeds = self.word_embeddings(input_ids.astype("i4"))
+ if not self.config.sinusoidal_pos_embds:
+ position_ids = jnp.arange(seq_length).astype("i4")
+ position_ids = jnp.broadcast_to(position_ids, shape=(batch_size, seq_length))
+ position_embeds = self.position_embeddings(position_ids.astype("i4"))
+ else:
+ position_embeds = self.pos_encoding[:, :seq_length, :]
+ # explictly cast the positions here, since self.embed_positions are not registered as parameters
+ position_embeds = position_embeds.astype(inputs_embeds.dtype)
+
+ # Sum all embeddings
+ hidden_states = inputs_embeds + position_embeds
+
+ # Layer Norm
+ hidden_states = self.LayerNorm(hidden_states)
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
+ return hidden_states
+
+
+class FlaxMultiHeadSelfAttention(nn.Module):
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.n_heads = self.config.n_heads
+ self.dim = self.config.dim
+ self.dropout = nn.Dropout(rate=self.config.attention_dropout)
+
+ if not (self.dim % self.n_heads == 0):
+ raise ValueError(f"Hidden size {self.dim} not dividable by number of heads {self.n_heads}")
+
+ self.q_lin = nn.Dense(
+ self.dim,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+ self.k_lin = nn.Dense(
+ self.dim,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+ self.v_lin = nn.Dense(
+ self.dim,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+ self.out_lin = nn.Dense(
+ self.dim,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+
+ def __call__(
+ self,
+ query,
+ key,
+ value,
+ mask,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ ):
+ bs, q_len, dim = query.shape
+ k_len = key.shape[1]
+ # assert dim == self.dim, f'Dimensions do not match: {dim} input vs {self.dim} configured'
+ # assert key.size() == value.size()
+
+ dim_per_head = self.dim // self.n_heads
+
+ mask_reshp = (bs, 1, 1, k_len)
+
+ def shape(x):
+ """separate heads"""
+ return x.reshape(bs, -1, self.n_heads, dim_per_head).transpose(0, 2, 1, 3)
+
+ def unshape(x):
+ """group heads"""
+ return x.transpose(0, 2, 1, 3).reshape(bs, -1, self.n_heads * dim_per_head)
+
+ q = shape(self.q_lin(query)) # (bs, n_heads, q_len, dim_per_head)
+ k = shape(self.k_lin(key)) # (bs, n_heads, k_len, dim_per_head)
+ v = shape(self.v_lin(value)) # (bs, n_heads, k_len, dim_per_head)
+
+ q = q / math.sqrt(dim_per_head) # (bs, n_heads, q_len, dim_per_head)
+ scores = jnp.matmul(q, k.transpose(0, 1, 3, 2)) # (bs, n_heads, q_len, k_len)
+ mask = jnp.reshape(mask, mask_reshp)
+
+ mask = mask.astype(scores.dtype)
+ scores = scores - 1e30 * (1.0 - mask)
+
+ weights = nn.softmax(scores, axis=-1) # (bs, n_heads, q_len, k_len)
+ weights = self.dropout(weights, deterministic=deterministic)
+
+ context = jnp.matmul(weights, v) # (bs, n_heads, q_len, dim_per_head)
+ context = unshape(context) # (bs, q_len, dim)
+ context = self.out_lin(context) # (bs, q_len, dim)
+
+ if output_attentions:
+ return (context, weights)
+ else:
+ return (context,)
+
+
+class FlaxFFN(nn.Module):
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.dropout = nn.Dropout(rate=self.config.dropout)
+ self.chunk_size_feed_forward = self.config.chunk_size_feed_forward
+ self.seq_len_dim = 1
+ self.lin1 = nn.Dense(
+ self.config.hidden_dim,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+ self.lin2 = nn.Dense(
+ self.config.dim,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+
+ self.activation = ACT2FN[self.config.activation]
+
+ def __call__(self, hidden_states, deterministic: bool = True):
+ hidden_states = self.lin1(hidden_states)
+ hidden_states = self.activation(hidden_states)
+ hidden_states = self.lin2(hidden_states)
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
+ return hidden_states
+
+
+class FlaxTransformerBlock(nn.Module):
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ assert (
+ self.config.dim % self.config.n_heads == 0
+ ), f"Hidden size {self.config.dim} not dividable by number of heads {self.config.n_heads}"
+
+ self.attention = FlaxMultiHeadSelfAttention(self.config, dtype=self.dtype)
+ self.sa_layer_norm = nn.LayerNorm(epsilon=1e-12, dtype=self.dtype)
+
+ self.ffn = FlaxFFN(self.config, dtype=self.dtype)
+ self.output_layer_norm = nn.LayerNorm(epsilon=1e-12, dtype=self.dtype)
+
+ def __call__(
+ self,
+ hidden_states,
+ attn_mask,
+ output_attentions: bool = False,
+ deterministic: bool = True,
+ ):
+ # Self-Attention
+ sa_output = self.attention(
+ query=hidden_states,
+ key=hidden_states,
+ value=hidden_states,
+ mask=attn_mask,
+ output_attentions=output_attentions,
+ deterministic=deterministic,
+ )
+ if output_attentions:
+ sa_output, sa_weights = sa_output
+ else:
+ assert type(sa_output) == tuple
+ sa_output = sa_output[0]
+ sa_output = self.sa_layer_norm(sa_output + hidden_states)
+
+ # Feed Forward Network
+ ffn_output = self.ffn(sa_output, deterministic=deterministic)
+ ffn_output = self.output_layer_norm(ffn_output + sa_output)
+ output = (ffn_output,)
+ if output_attentions:
+ output = (sa_weights,) + output
+ return output
+
+
+class FlaxTransformer(nn.Module):
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.layers = [
+ FlaxTransformerBlock(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.n_layers)
+ ]
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ deterministic: bool = True,
+ return_dict: bool = False,
+ ):
+ all_hidden_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ for layer_module in self.layers:
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_outputs = layer_module(
+ hidden_states=hidden_states,
+ attn_mask=attention_mask,
+ output_attentions=output_attentions,
+ deterministic=deterministic,
+ )
+ hidden_states = layer_outputs[-1]
+
+ if output_attentions:
+ assert len(layer_outputs) == 2
+ attentions = layer_outputs[0]
+ all_attentions = all_attentions + (attentions,)
+ else:
+ assert len(layer_outputs) == 1
+
+ # Add last layer
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_attentions, all_hidden_states] if v is not None)
+ return FlaxBaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
+ )
+
+
+class FlaxTransformerEncoder(nn.Module):
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.layer = FlaxTransformer(self.config, dtype=self.dtype)
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ deterministic: bool = True,
+ return_dict: bool = False,
+ ):
+ return self.layer(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ deterministic=deterministic,
+ return_dict=return_dict,
+ )
+
+
+class FlaxDistilBertLMDecoder(nn.Module):
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+ bias_init: Callable[..., np.ndarray] = jax.nn.initializers.zeros
+
+ def setup(self):
+ self.bias = self.param("bias", self.bias_init, (self.config.vocab_size,))
+
+ def __call__(self, inputs, kernel):
+ inputs = jnp.asarray(inputs, self.dtype)
+ kernel = jnp.asarray(kernel, self.dtype)
+ y = lax.dot_general(inputs, kernel, (((inputs.ndim - 1,), (0,)), ((), ())))
+ bias = jnp.asarray(self.bias, self.dtype)
+ y = y + bias
+ return y
+
+
+class FlaxDistilBertPreTrainedModel(FlaxPreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = DistilBertConfig
+ base_model_prefix = "distilbert"
+ module_class: nn.Module = None
+
+ def __init__(
+ self,
+ config: DistilBertConfig,
+ input_shape: Tuple = (1, 1),
+ seed: int = 0,
+ dtype: jnp.dtype = jnp.float32,
+ _do_init: bool = True,
+ **kwargs,
+ ):
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
+
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
+ # init input tensors
+ input_ids = jnp.zeros(input_shape, dtype="i4")
+ attention_mask = jnp.ones_like(input_ids)
+
+ params_rng, dropout_rng = jax.random.split(rng)
+ rngs = {"params": params_rng, "dropout": dropout_rng}
+
+ random_params = self.module.init(rngs, input_ids, attention_mask, return_dict=False)["params"]
+
+ if params is not None:
+ random_params = flatten_dict(unfreeze(random_params))
+ params = flatten_dict(unfreeze(params))
+ for missing_key in self._missing_keys:
+ params[missing_key] = random_params[missing_key]
+ self._missing_keys = set()
+ return freeze(unflatten_dict(params))
+ else:
+ return random_params
+
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ def __call__(
+ self,
+ input_ids,
+ attention_mask=None,
+ head_mask=None,
+ params: dict = None,
+ dropout_rng: jax.random.PRNGKey = None,
+ train: bool = False,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ):
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ if attention_mask is None:
+ attention_mask = jnp.ones_like(input_ids)
+
+ # Handle any PRNG if needed
+ rngs = {}
+ if dropout_rng is not None:
+ rngs["dropout"] = dropout_rng
+
+ return self.module.apply(
+ {"params": params or self.params},
+ jnp.array(input_ids, dtype="i4"),
+ jnp.array(attention_mask, dtype="i4"),
+ not train,
+ output_attentions,
+ output_hidden_states,
+ return_dict,
+ rngs=rngs,
+ )
+
+
+class FlaxDistilBertModule(nn.Module):
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.embeddings = FlaxEmbeddings(self.config, dtype=self.dtype)
+ self.transformer = FlaxTransformerEncoder(self.config, dtype=self.dtype)
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ input_embeds = self.embeddings(input_ids, deterministic=deterministic)
+ return self.transformer(
+ hidden_states=input_embeds,
+ attention_mask=attention_mask,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+
+@add_start_docstrings(
+ "The bare DistilBert Model transformer outputting raw hidden-states without any specific head on top.",
+ FLAX_DISTILBERT_START_DOCSTRING,
+)
+class FlaxDistilBertModel(FlaxDistilBertPreTrainedModel):
+ module_class = FlaxDistilBertModule
+
+
+append_call_sample_docstring(FlaxDistilBertModel, _CHECKPOINT_FOR_DOC, None, _CONFIG_FOR_DOC)
+
+
+class FlaxDistilBertForMaskedLMModule(nn.Module):
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.distilbert = FlaxDistilBertModule(self.config, dtype=self.dtype)
+ self.vocab_transform = nn.Dense(
+ self.config.dim,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+ self.vocab_layer_norm = nn.LayerNorm(epsilon=1e-12, dtype=self.dtype)
+ if self.config.tie_word_embeddings:
+ self.vocab_projector = FlaxDistilBertLMDecoder(
+ self.config,
+ dtype=self.dtype,
+ )
+ else:
+ self.vocab_projector = nn.Dense(
+ self.config.vocab_size,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ dlbrt_output = self.distilbert(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ deterministic=deterministic,
+ return_dict=return_dict,
+ )
+ hidden_states = dlbrt_output[0]
+ prediction_logits = self.vocab_transform(hidden_states)
+ prediction_logits = ACT2FN[self.config.activation](prediction_logits)
+ prediction_logits = self.vocab_layer_norm(prediction_logits)
+
+ if self.config.tie_word_embeddings:
+ shared_embedding = self.distilbert.variables["params"]["embeddings"]["word_embeddings"]["embedding"]
+ prediction_logits = self.vocab_projector(prediction_logits, shared_embedding.T)
+ else:
+ prediction_logits = self.vocab_projector(prediction_logits)
+
+ if not return_dict:
+ output = (prediction_logits,) + dlbrt_output[1:]
+ return output
+
+ return FlaxMaskedLMOutput(
+ logits=prediction_logits,
+ hidden_states=dlbrt_output.hidden_states,
+ attentions=dlbrt_output.attentions,
+ )
+
+
+@add_start_docstrings("""DistilBert Model with a `language modeling` head on top.""", FLAX_DISTILBERT_START_DOCSTRING)
+class FlaxDistilBertForMaskedLM(FlaxDistilBertPreTrainedModel):
+ module_class = FlaxDistilBertForMaskedLMModule
+
+
+append_call_sample_docstring(FlaxDistilBertForMaskedLM, _CHECKPOINT_FOR_DOC, FlaxMaskedLMOutput, _CONFIG_FOR_DOC)
+
+
+class FlaxDistilBertForSequenceClassificationModule(nn.Module):
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ self.distilbert = FlaxDistilBertModule(config=self.config, dtype=self.dtype)
+ self.pre_classifier = nn.Dense(
+ self.config.dim,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+ self.dropout = nn.Dropout(rate=self.config.seq_classif_dropout)
+ self.classifier = nn.Dense(
+ self.config.num_labels,
+ dtype=self.dtype,
+ )
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ # Model
+ distilbert_output = self.distilbert(
+ input_ids,
+ attention_mask,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_state = distilbert_output[0] # (bs, seq_len, dim)
+ pooled_output = hidden_state[:, 0] # (bs, dim)
+ pooled_output = self.pre_classifier(pooled_output) # (bs, dim)
+ pooled_output = ACT2FN["relu"](pooled_output)
+ pooled_output = self.dropout(pooled_output, deterministic=deterministic)
+ logits = self.classifier(pooled_output) # (bs, dim)
+
+ if not return_dict:
+ return (logits,) + distilbert_output[1:]
+
+ return FlaxSequenceClassifierOutput(
+ logits=logits,
+ hidden_states=distilbert_output.hidden_states,
+ attentions=distilbert_output.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ DistilBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the
+ pooled output) e.g. for GLUE tasks.
+ """,
+ FLAX_DISTILBERT_START_DOCSTRING,
+)
+class FlaxDistilBertForSequenceClassification(FlaxDistilBertPreTrainedModel):
+ module_class = FlaxDistilBertForSequenceClassificationModule
+
+
+append_call_sample_docstring(
+ FlaxDistilBertForSequenceClassification,
+ _CHECKPOINT_FOR_DOC,
+ FlaxSequenceClassifierOutput,
+ _CONFIG_FOR_DOC,
+)
+
+
+class FlaxDistilBertForMultipleChoiceModule(nn.Module):
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ self.distilbert = FlaxDistilBertModule(config=self.config, dtype=self.dtype)
+ self.pre_classifier = nn.Dense(
+ self.config.dim,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+ self.dropout = nn.Dropout(rate=self.config.seq_classif_dropout)
+ self.classifier = nn.Dense(
+ 1,
+ dtype=self.dtype,
+ )
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ num_choices = input_ids.shape[1]
+ input_ids = input_ids.reshape(-1, input_ids.shape[-1]) if input_ids is not None else None
+ attention_mask = attention_mask.reshape(-1, attention_mask.shape[-1]) if attention_mask is not None else None
+
+ # Model
+ outputs = self.distilbert(
+ input_ids,
+ attention_mask,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_state = outputs[0]
+ pooled_output = hidden_state[:, 0]
+ pooled_output = self.pre_classifier(pooled_output)
+ pooled_output = ACT2FN["relu"](pooled_output)
+ pooled_output = self.dropout(pooled_output, deterministic=deterministic)
+ logits = self.classifier(pooled_output)
+
+ reshaped_logits = logits.reshape(-1, num_choices)
+
+ if not return_dict:
+ return (reshaped_logits,) + outputs[2:]
+
+ return FlaxMultipleChoiceModelOutput(
+ logits=reshaped_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ DistilBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
+ a softmax) e.g. for RocStories/SWAG tasks.
+ """,
+ FLAX_DISTILBERT_START_DOCSTRING,
+)
+class FlaxDistilBertForMultipleChoice(FlaxDistilBertPreTrainedModel):
+ module_class = FlaxDistilBertForMultipleChoiceModule
+
+
+overwrite_call_docstring(
+ FlaxDistilBertForMultipleChoice, DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
+)
+append_call_sample_docstring(
+ FlaxDistilBertForMultipleChoice,
+ _CHECKPOINT_FOR_DOC,
+ FlaxMultipleChoiceModelOutput,
+ _CONFIG_FOR_DOC,
+)
+
+
+class FlaxDistilBertForTokenClassificationModule(nn.Module):
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ self.distilbert = FlaxDistilBertModule(config=self.config, dtype=self.dtype)
+ self.dropout = nn.Dropout(rate=self.config.dropout)
+ self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype)
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ # Model
+ outputs = self.distilbert(
+ input_ids,
+ attention_mask,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = outputs[0]
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
+ logits = self.classifier(hidden_states)
+
+ if not return_dict:
+ return (logits,) + outputs[1:]
+
+ return FlaxTokenClassifierOutput(
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ DistilBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
+ for Named-Entity-Recognition (NER) tasks.
+ """,
+ FLAX_DISTILBERT_START_DOCSTRING,
+)
+class FlaxDistilBertForTokenClassification(FlaxDistilBertPreTrainedModel):
+ module_class = FlaxDistilBertForTokenClassificationModule
+
+
+append_call_sample_docstring(
+ FlaxDistilBertForTokenClassification,
+ _CHECKPOINT_FOR_DOC,
+ FlaxTokenClassifierOutput,
+ _CONFIG_FOR_DOC,
+)
+
+
+class FlaxDistilBertForQuestionAnsweringModule(nn.Module):
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ self.distilbert = FlaxDistilBertModule(config=self.config, dtype=self.dtype)
+ self.qa_outputs = nn.Dense(self.config.num_labels, dtype=self.dtype)
+ assert self.config.num_labels == 2
+ self.dropout = nn.Dropout(rate=self.config.qa_dropout)
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # Model
+ distilbert_output = self.distilbert(
+ input_ids,
+ attention_mask,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = distilbert_output[0]
+
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
+ logits = self.qa_outputs(hidden_states)
+ start_logits, end_logits = logits.split(self.config.num_labels, axis=-1)
+ start_logits = start_logits.squeeze(-1)
+ end_logits = end_logits.squeeze(-1)
+
+ if not return_dict:
+ return (start_logits, end_logits) + distilbert_output[1:]
+
+ return FlaxQuestionAnsweringModelOutput(
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=distilbert_output.hidden_states,
+ attentions=distilbert_output.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ DistilBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
+ linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ FLAX_DISTILBERT_START_DOCSTRING,
+)
+class FlaxDistilBertForQuestionAnswering(FlaxDistilBertPreTrainedModel):
+ module_class = FlaxDistilBertForQuestionAnsweringModule
+
+
+append_call_sample_docstring(
+ FlaxDistilBertForQuestionAnswering,
+ _CHECKPOINT_FOR_DOC,
+ FlaxQuestionAnsweringModelOutput,
+ _CONFIG_FOR_DOC,
+)
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/distilbert/modeling_tf_distilbert.py b/llava_next/lib/python3.10/site-packages/transformers/models/distilbert/modeling_tf_distilbert.py
new file mode 100644
index 0000000000000000000000000000000000000000..192e25698181047b765ca12c0ad2cd66339bfc9e
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/transformers/models/distilbert/modeling_tf_distilbert.py
@@ -0,0 +1,1145 @@
+# coding=utf-8
+# Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+ TF 2.0 DistilBERT model
+"""
+
+
+from __future__ import annotations
+
+import warnings
+from typing import Optional, Tuple, Union
+
+import numpy as np
+import tensorflow as tf
+
+from ...activations_tf import get_tf_activation
+from ...modeling_tf_outputs import (
+ TFBaseModelOutput,
+ TFMaskedLMOutput,
+ TFMultipleChoiceModelOutput,
+ TFQuestionAnsweringModelOutput,
+ TFSequenceClassifierOutput,
+ TFTokenClassifierOutput,
+)
+from ...modeling_tf_utils import (
+ TFMaskedLanguageModelingLoss,
+ TFModelInputType,
+ TFMultipleChoiceLoss,
+ TFPreTrainedModel,
+ TFQuestionAnsweringLoss,
+ TFSequenceClassificationLoss,
+ TFTokenClassificationLoss,
+ get_initializer,
+ keras_serializable,
+ unpack_inputs,
+)
+from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
+from ...utils import (
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+)
+from .configuration_distilbert import DistilBertConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "distilbert-base-uncased"
+_CONFIG_FOR_DOC = "DistilBertConfig"
+
+TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
+ "distilbert-base-uncased",
+ "distilbert-base-uncased-distilled-squad",
+ "distilbert-base-cased",
+ "distilbert-base-cased-distilled-squad",
+ "distilbert-base-multilingual-cased",
+ "distilbert-base-uncased-finetuned-sst-2-english",
+ # See all DistilBERT models at https://huggingface.co/models?filter=distilbert
+]
+
+
+class TFEmbeddings(tf.keras.layers.Layer):
+ """Construct the embeddings from word, position and token_type embeddings."""
+
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ self.config = config
+ self.dim = config.dim
+ self.initializer_range = config.initializer_range
+ self.max_position_embeddings = config.max_position_embeddings
+ self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=1e-12, name="LayerNorm")
+ self.dropout = tf.keras.layers.Dropout(rate=config.dropout)
+
+ def build(self, input_shape=None):
+ with tf.name_scope("word_embeddings"):
+ self.weight = self.add_weight(
+ name="weight",
+ shape=[self.config.vocab_size, self.dim],
+ initializer=get_initializer(initializer_range=self.initializer_range),
+ )
+
+ with tf.name_scope("position_embeddings"):
+ self.position_embeddings = self.add_weight(
+ name="embeddings",
+ shape=[self.max_position_embeddings, self.dim],
+ initializer=get_initializer(initializer_range=self.initializer_range),
+ )
+
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "LayerNorm", None) is not None:
+ with tf.name_scope(self.LayerNorm.name):
+ self.LayerNorm.build([None, None, self.config.dim])
+
+ def call(self, input_ids=None, position_ids=None, inputs_embeds=None, training=False):
+ """
+ Applies embedding based on inputs tensor.
+
+ Returns:
+ final_embeddings (`tf.Tensor`): output embedding tensor.
+ """
+ assert not (input_ids is None and inputs_embeds is None)
+
+ if input_ids is not None:
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
+
+ input_shape = shape_list(inputs_embeds)[:-1]
+
+ if position_ids is None:
+ position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)
+
+ position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
+ final_embeddings = inputs_embeds + position_embeds
+ final_embeddings = self.LayerNorm(inputs=final_embeddings)
+ final_embeddings = self.dropout(inputs=final_embeddings, training=training)
+
+ return final_embeddings
+
+
+class TFMultiHeadSelfAttention(tf.keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+
+ self.n_heads = config.n_heads
+ self.dim = config.dim
+ self.dropout = tf.keras.layers.Dropout(config.attention_dropout)
+ self.output_attentions = config.output_attentions
+
+ assert self.dim % self.n_heads == 0, f"Hidden size {self.dim} not dividable by number of heads {self.n_heads}"
+
+ self.q_lin = tf.keras.layers.Dense(
+ config.dim, kernel_initializer=get_initializer(config.initializer_range), name="q_lin"
+ )
+ self.k_lin = tf.keras.layers.Dense(
+ config.dim, kernel_initializer=get_initializer(config.initializer_range), name="k_lin"
+ )
+ self.v_lin = tf.keras.layers.Dense(
+ config.dim, kernel_initializer=get_initializer(config.initializer_range), name="v_lin"
+ )
+ self.out_lin = tf.keras.layers.Dense(
+ config.dim, kernel_initializer=get_initializer(config.initializer_range), name="out_lin"
+ )
+
+ self.pruned_heads = set()
+ self.config = config
+
+ def prune_heads(self, heads):
+ raise NotImplementedError
+
+ def call(self, query, key, value, mask, head_mask, output_attentions, training=False):
+ """
+ Parameters:
+ query: tf.Tensor(bs, seq_length, dim)
+ key: tf.Tensor(bs, seq_length, dim)
+ value: tf.Tensor(bs, seq_length, dim)
+ mask: tf.Tensor(bs, seq_length)
+
+ Returns:
+ weights: tf.Tensor(bs, n_heads, seq_length, seq_length) Attention weights context: tf.Tensor(bs,
+ seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True`
+ """
+ bs, q_length, dim = shape_list(query)
+ k_length = shape_list(key)[1]
+ # assert dim == self.dim, f'Dimensions do not match: {dim} input vs {self.dim} configured'
+ # assert key.size() == value.size()
+ dim_per_head = int(self.dim / self.n_heads)
+ dim_per_head = tf.cast(dim_per_head, dtype=tf.int32)
+ mask_reshape = [bs, 1, 1, k_length]
+
+ def shape(x):
+ """separate heads"""
+ return tf.transpose(tf.reshape(x, (bs, -1, self.n_heads, dim_per_head)), perm=(0, 2, 1, 3))
+
+ def unshape(x):
+ """group heads"""
+ return tf.reshape(tf.transpose(x, perm=(0, 2, 1, 3)), (bs, -1, self.n_heads * dim_per_head))
+
+ q = shape(self.q_lin(query)) # (bs, n_heads, q_length, dim_per_head)
+ k = shape(self.k_lin(key)) # (bs, n_heads, k_length, dim_per_head)
+ v = shape(self.v_lin(value)) # (bs, n_heads, k_length, dim_per_head)
+ q = tf.cast(q, dtype=tf.float32)
+ q = tf.multiply(q, tf.math.rsqrt(tf.cast(dim_per_head, dtype=tf.float32)))
+ k = tf.cast(k, dtype=q.dtype)
+ scores = tf.matmul(q, k, transpose_b=True) # (bs, n_heads, q_length, k_length)
+ mask = tf.reshape(mask, mask_reshape) # (bs, n_heads, qlen, klen)
+ # scores.masked_fill_(mask, -float('inf')) # (bs, n_heads, q_length, k_length)
+
+ mask = tf.cast(mask, dtype=scores.dtype)
+ scores = scores - 1e30 * (1.0 - mask)
+ weights = stable_softmax(scores, axis=-1) # (bs, n_heads, qlen, klen)
+ weights = self.dropout(weights, training=training) # (bs, n_heads, qlen, klen)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ weights = weights * head_mask
+
+ context = tf.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head)
+ context = unshape(context) # (bs, q_length, dim)
+ context = self.out_lin(context) # (bs, q_length, dim)
+
+ if output_attentions:
+ return (context, weights)
+ else:
+ return (context,)
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "q_lin", None) is not None:
+ with tf.name_scope(self.q_lin.name):
+ self.q_lin.build([None, None, self.config.dim])
+ if getattr(self, "k_lin", None) is not None:
+ with tf.name_scope(self.k_lin.name):
+ self.k_lin.build([None, None, self.config.dim])
+ if getattr(self, "v_lin", None) is not None:
+ with tf.name_scope(self.v_lin.name):
+ self.v_lin.build([None, None, self.config.dim])
+ if getattr(self, "out_lin", None) is not None:
+ with tf.name_scope(self.out_lin.name):
+ self.out_lin.build([None, None, self.config.dim])
+
+
+class TFFFN(tf.keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ self.dropout = tf.keras.layers.Dropout(config.dropout)
+ self.lin1 = tf.keras.layers.Dense(
+ config.hidden_dim, kernel_initializer=get_initializer(config.initializer_range), name="lin1"
+ )
+ self.lin2 = tf.keras.layers.Dense(
+ config.dim, kernel_initializer=get_initializer(config.initializer_range), name="lin2"
+ )
+ self.activation = get_tf_activation(config.activation)
+ self.config = config
+
+ def call(self, input, training=False):
+ x = self.lin1(input)
+ x = self.activation(x)
+ x = self.lin2(x)
+ x = self.dropout(x, training=training)
+ return x
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "lin1", None) is not None:
+ with tf.name_scope(self.lin1.name):
+ self.lin1.build([None, None, self.config.dim])
+ if getattr(self, "lin2", None) is not None:
+ with tf.name_scope(self.lin2.name):
+ self.lin2.build([None, None, self.config.hidden_dim])
+
+
+class TFTransformerBlock(tf.keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+
+ self.n_heads = config.n_heads
+ self.dim = config.dim
+ self.hidden_dim = config.hidden_dim
+ self.dropout = tf.keras.layers.Dropout(config.dropout)
+ self.activation = config.activation
+ self.output_attentions = config.output_attentions
+
+ assert (
+ config.dim % config.n_heads == 0
+ ), f"Hidden size {config.dim} not dividable by number of heads {config.n_heads}"
+
+ self.attention = TFMultiHeadSelfAttention(config, name="attention")
+ self.sa_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-12, name="sa_layer_norm")
+
+ self.ffn = TFFFN(config, name="ffn")
+ self.output_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-12, name="output_layer_norm")
+ self.config = config
+
+ def call(self, x, attn_mask, head_mask, output_attentions, training=False): # removed: src_enc=None, src_len=None
+ """
+ Parameters:
+ x: tf.Tensor(bs, seq_length, dim)
+ attn_mask: tf.Tensor(bs, seq_length)
+
+ Outputs: sa_weights: tf.Tensor(bs, n_heads, seq_length, seq_length) The attention weights ffn_output:
+ tf.Tensor(bs, seq_length, dim) The output of the transformer block contextualization.
+ """
+ # Self-Attention
+ sa_output = self.attention(x, x, x, attn_mask, head_mask, output_attentions, training=training)
+ if output_attentions:
+ sa_output, sa_weights = sa_output # (bs, seq_length, dim), (bs, n_heads, seq_length, seq_length)
+ else: # To handle these `output_attentions` or `output_hidden_states` cases returning tuples
+ # assert type(sa_output) == tuple
+ sa_output = sa_output[0]
+ sa_output = self.sa_layer_norm(sa_output + x) # (bs, seq_length, dim)
+
+ # Feed Forward Network
+ ffn_output = self.ffn(sa_output, training=training) # (bs, seq_length, dim)
+ ffn_output = self.output_layer_norm(ffn_output + sa_output) # (bs, seq_length, dim)
+
+ output = (ffn_output,)
+ if output_attentions:
+ output = (sa_weights,) + output
+ return output
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "attention", None) is not None:
+ with tf.name_scope(self.attention.name):
+ self.attention.build(None)
+ if getattr(self, "sa_layer_norm", None) is not None:
+ with tf.name_scope(self.sa_layer_norm.name):
+ self.sa_layer_norm.build([None, None, self.config.dim])
+ if getattr(self, "ffn", None) is not None:
+ with tf.name_scope(self.ffn.name):
+ self.ffn.build(None)
+ if getattr(self, "output_layer_norm", None) is not None:
+ with tf.name_scope(self.output_layer_norm.name):
+ self.output_layer_norm.build([None, None, self.config.dim])
+
+
+class TFTransformer(tf.keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ self.n_layers = config.n_layers
+ self.output_hidden_states = config.output_hidden_states
+ self.output_attentions = config.output_attentions
+
+ self.layer = [TFTransformerBlock(config, name=f"layer_._{i}") for i in range(config.n_layers)]
+
+ def call(self, x, attn_mask, head_mask, output_attentions, output_hidden_states, return_dict, training=False):
+ # docstyle-ignore
+ """
+ Parameters:
+ x: tf.Tensor(bs, seq_length, dim) Input sequence embedded.
+ attn_mask: tf.Tensor(bs, seq_length) Attention mask on the sequence.
+
+ Returns:
+ hidden_state: tf.Tensor(bs, seq_length, dim)
+ Sequence of hidden states in the last (top) layer
+ all_hidden_states: Tuple[tf.Tensor(bs, seq_length, dim)]
+ Tuple of length n_layers with the hidden states from each layer.
+ Optional: only if output_hidden_states=True
+ all_attentions: Tuple[tf.Tensor(bs, n_heads, seq_length, seq_length)]
+ Tuple of length n_layers with the attention weights from each layer
+ Optional: only if output_attentions=True
+ """
+ all_hidden_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ hidden_state = x
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_state,)
+
+ layer_outputs = layer_module(hidden_state, attn_mask, head_mask[i], output_attentions, training=training)
+ hidden_state = layer_outputs[-1]
+
+ if output_attentions:
+ assert len(layer_outputs) == 2
+ attentions = layer_outputs[0]
+ all_attentions = all_attentions + (attentions,)
+ else:
+ assert len(layer_outputs) == 1, f"Incorrect number of outputs {len(layer_outputs)} instead of 1"
+
+ # Add last layer
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_state,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_state, all_hidden_states, all_attentions] if v is not None)
+ return TFBaseModelOutput(
+ last_hidden_state=hidden_state, hidden_states=all_hidden_states, attentions=all_attentions
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "layer", None) is not None:
+ for layer in self.layer:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+
+@keras_serializable
+class TFDistilBertMainLayer(tf.keras.layers.Layer):
+ config_class = DistilBertConfig
+
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+
+ self.config = config
+ self.num_hidden_layers = config.num_hidden_layers
+ self.output_attentions = config.output_attentions
+ self.output_hidden_states = config.output_hidden_states
+ self.return_dict = config.use_return_dict
+
+ self.embeddings = TFEmbeddings(config, name="embeddings") # Embeddings
+ self.transformer = TFTransformer(config, name="transformer") # Encoder
+
+ def get_input_embeddings(self):
+ return self.embeddings
+
+ def set_input_embeddings(self, value):
+ self.embeddings.weight = value
+ self.embeddings.vocab_size = value.shape[0]
+
+ def _prune_heads(self, heads_to_prune):
+ raise NotImplementedError
+
+ @unpack_inputs
+ def call(
+ self,
+ input_ids=None,
+ attention_mask=None,
+ head_mask=None,
+ inputs_embeds=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ training=False,
+ ):
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_shape = shape_list(input_ids)
+ elif inputs_embeds is not None:
+ input_shape = shape_list(inputs_embeds)[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ if attention_mask is None:
+ attention_mask = tf.ones(input_shape) # (bs, seq_length)
+
+ attention_mask = tf.cast(attention_mask, dtype=tf.float32)
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ if head_mask is not None:
+ raise NotImplementedError
+ else:
+ head_mask = [None] * self.num_hidden_layers
+
+ embedding_output = self.embeddings(input_ids, inputs_embeds=inputs_embeds) # (bs, seq_length, dim)
+ tfmr_output = self.transformer(
+ embedding_output,
+ attention_mask,
+ head_mask,
+ output_attentions,
+ output_hidden_states,
+ return_dict,
+ training=training,
+ )
+
+ return tfmr_output # last-layer hidden-state, (all hidden_states), (all attentions)
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "embeddings", None) is not None:
+ with tf.name_scope(self.embeddings.name):
+ self.embeddings.build(None)
+ if getattr(self, "transformer", None) is not None:
+ with tf.name_scope(self.transformer.name):
+ self.transformer.build(None)
+
+
+# INTERFACE FOR ENCODER AND TASK SPECIFIC MODEL #
+class TFDistilBertPreTrainedModel(TFPreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = DistilBertConfig
+ base_model_prefix = "distilbert"
+
+
+DISTILBERT_START_DOCSTRING = r"""
+
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
+ behavior.
+
+
+
+ TensorFlow models and layers in `transformers` accept two formats as input:
+
+ - having all inputs as keyword arguments (like PyTorch models), or
+ - having all inputs as a list, tuple or dict in the first positional argument.
+
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
+ positional argument:
+
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
+
+ Note that when creating models and layers with
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
+ about any of this, as you can just pass inputs like you would to any other Python function!
+
+
+
+ Parameters:
+ config ([`DistilBertConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+DISTILBERT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
+ [`PreTrainedTokenizer.encode`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
+ config will be used instead.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
+ used instead.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
+ eager mode, in graph mode the value will always be set to True.
+ training (`bool`, *optional*, defaults to `False`):
+ Whether or not to use the model in training mode (some modules like dropout modules have different
+ behaviors between training and evaluation).
+"""
+
+
+@add_start_docstrings(
+ "The bare DistilBERT encoder/transformer outputting raw hidden-states without any specific head on top.",
+ DISTILBERT_START_DOCSTRING,
+)
+class TFDistilBertModel(TFDistilBertPreTrainedModel):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.distilbert = TFDistilBertMainLayer(config, name="distilbert") # Embeddings
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFBaseModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
+ outputs = self.distilbert(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "distilbert", None) is not None:
+ with tf.name_scope(self.distilbert.name):
+ self.distilbert.build(None)
+
+
+class TFDistilBertLMHead(tf.keras.layers.Layer):
+ def __init__(self, config, input_embeddings, **kwargs):
+ super().__init__(**kwargs)
+
+ self.config = config
+ self.dim = config.dim
+
+ # The output weights are the same as the input embeddings, but there is
+ # an output-only bias for each token.
+ self.input_embeddings = input_embeddings
+
+ def build(self, input_shape):
+ self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
+
+ super().build(input_shape)
+
+ def get_output_embeddings(self):
+ return self.input_embeddings
+
+ def set_output_embeddings(self, value):
+ self.input_embeddings.weight = value
+ self.input_embeddings.vocab_size = shape_list(value)[0]
+
+ def get_bias(self):
+ return {"bias": self.bias}
+
+ def set_bias(self, value):
+ self.bias = value["bias"]
+ self.config.vocab_size = shape_list(value["bias"])[0]
+
+ def call(self, hidden_states):
+ seq_length = shape_list(tensor=hidden_states)[1]
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.dim])
+ hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
+ hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
+
+ return hidden_states
+
+
+@add_start_docstrings(
+ """DistilBert Model with a `masked language modeling` head on top.""",
+ DISTILBERT_START_DOCSTRING,
+)
+class TFDistilBertForMaskedLM(TFDistilBertPreTrainedModel, TFMaskedLanguageModelingLoss):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.config = config
+
+ self.distilbert = TFDistilBertMainLayer(config, name="distilbert")
+ self.vocab_transform = tf.keras.layers.Dense(
+ config.dim, kernel_initializer=get_initializer(config.initializer_range), name="vocab_transform"
+ )
+ self.act = get_tf_activation(config.activation)
+ self.vocab_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-12, name="vocab_layer_norm")
+ self.vocab_projector = TFDistilBertLMHead(config, self.distilbert.embeddings, name="vocab_projector")
+
+ def get_lm_head(self):
+ return self.vocab_projector
+
+ def get_prefix_bias_name(self):
+ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
+ return self.name + "/" + self.vocab_projector.name
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFMaskedLMOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+ """
+ distilbert_output = self.distilbert(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ hidden_states = distilbert_output[0] # (bs, seq_length, dim)
+ prediction_logits = self.vocab_transform(hidden_states) # (bs, seq_length, dim)
+ prediction_logits = self.act(prediction_logits) # (bs, seq_length, dim)
+ prediction_logits = self.vocab_layer_norm(prediction_logits) # (bs, seq_length, dim)
+ prediction_logits = self.vocab_projector(prediction_logits)
+
+ loss = None if labels is None else self.hf_compute_loss(labels, prediction_logits)
+
+ if not return_dict:
+ output = (prediction_logits,) + distilbert_output[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFMaskedLMOutput(
+ loss=loss,
+ logits=prediction_logits,
+ hidden_states=distilbert_output.hidden_states,
+ attentions=distilbert_output.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "distilbert", None) is not None:
+ with tf.name_scope(self.distilbert.name):
+ self.distilbert.build(None)
+ if getattr(self, "vocab_transform", None) is not None:
+ with tf.name_scope(self.vocab_transform.name):
+ self.vocab_transform.build([None, None, self.config.dim])
+ if getattr(self, "vocab_layer_norm", None) is not None:
+ with tf.name_scope(self.vocab_layer_norm.name):
+ self.vocab_layer_norm.build([None, None, self.config.dim])
+ if getattr(self, "vocab_projector", None) is not None:
+ with tf.name_scope(self.vocab_projector.name):
+ self.vocab_projector.build(None)
+
+
+@add_start_docstrings(
+ """
+ DistilBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the
+ pooled output) e.g. for GLUE tasks.
+ """,
+ DISTILBERT_START_DOCSTRING,
+)
+class TFDistilBertForSequenceClassification(TFDistilBertPreTrainedModel, TFSequenceClassificationLoss):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.num_labels = config.num_labels
+
+ self.distilbert = TFDistilBertMainLayer(config, name="distilbert")
+ self.pre_classifier = tf.keras.layers.Dense(
+ config.dim,
+ kernel_initializer=get_initializer(config.initializer_range),
+ activation="relu",
+ name="pre_classifier",
+ )
+ self.classifier = tf.keras.layers.Dense(
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
+ )
+ self.dropout = tf.keras.layers.Dropout(config.seq_classif_dropout)
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFSequenceClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ distilbert_output = self.distilbert(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ hidden_state = distilbert_output[0] # (bs, seq_len, dim)
+ pooled_output = hidden_state[:, 0] # (bs, dim)
+ pooled_output = self.pre_classifier(pooled_output) # (bs, dim)
+ pooled_output = self.dropout(pooled_output, training=training) # (bs, dim)
+ logits = self.classifier(pooled_output) # (bs, dim)
+
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
+
+ if not return_dict:
+ output = (logits,) + distilbert_output[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFSequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=distilbert_output.hidden_states,
+ attentions=distilbert_output.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "distilbert", None) is not None:
+ with tf.name_scope(self.distilbert.name):
+ self.distilbert.build(None)
+ if getattr(self, "pre_classifier", None) is not None:
+ with tf.name_scope(self.pre_classifier.name):
+ self.pre_classifier.build([None, None, self.config.dim])
+ if getattr(self, "classifier", None) is not None:
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build([None, None, self.config.dim])
+
+
+@add_start_docstrings(
+ """
+ DistilBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
+ for Named-Entity-Recognition (NER) tasks.
+ """,
+ DISTILBERT_START_DOCSTRING,
+)
+class TFDistilBertForTokenClassification(TFDistilBertPreTrainedModel, TFTokenClassificationLoss):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.num_labels = config.num_labels
+
+ self.distilbert = TFDistilBertMainLayer(config, name="distilbert")
+ self.dropout = tf.keras.layers.Dropout(config.dropout)
+ self.classifier = tf.keras.layers.Dense(
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
+ )
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFTokenClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
+ """
+ outputs = self.distilbert(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ sequence_output = outputs[0]
+ sequence_output = self.dropout(sequence_output, training=training)
+ logits = self.classifier(sequence_output)
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFTokenClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "distilbert", None) is not None:
+ with tf.name_scope(self.distilbert.name):
+ self.distilbert.build(None)
+ if getattr(self, "classifier", None) is not None:
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build([None, None, self.config.hidden_size])
+
+
+@add_start_docstrings(
+ """
+ DistilBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
+ a softmax) e.g. for RocStories/SWAG tasks.
+ """,
+ DISTILBERT_START_DOCSTRING,
+)
+class TFDistilBertForMultipleChoice(TFDistilBertPreTrainedModel, TFMultipleChoiceLoss):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.distilbert = TFDistilBertMainLayer(config, name="distilbert")
+ self.dropout = tf.keras.layers.Dropout(config.seq_classif_dropout)
+ self.pre_classifier = tf.keras.layers.Dense(
+ config.dim,
+ kernel_initializer=get_initializer(config.initializer_range),
+ activation="relu",
+ name="pre_classifier",
+ )
+ self.classifier = tf.keras.layers.Dense(
+ 1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
+ )
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(
+ DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
+ )
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFMultipleChoiceModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
+ where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)
+ """
+ if input_ids is not None:
+ num_choices = shape_list(input_ids)[1]
+ seq_length = shape_list(input_ids)[2]
+ else:
+ num_choices = shape_list(inputs_embeds)[1]
+ seq_length = shape_list(inputs_embeds)[2]
+
+ flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None
+ flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None
+ flat_inputs_embeds = (
+ tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3]))
+ if inputs_embeds is not None
+ else None
+ )
+ distilbert_output = self.distilbert(
+ flat_input_ids,
+ flat_attention_mask,
+ head_mask,
+ flat_inputs_embeds,
+ output_attentions,
+ output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ hidden_state = distilbert_output[0] # (bs, seq_len, dim)
+ pooled_output = hidden_state[:, 0] # (bs, dim)
+ pooled_output = self.pre_classifier(pooled_output) # (bs, dim)
+ pooled_output = self.dropout(pooled_output, training=training) # (bs, dim)
+ logits = self.classifier(pooled_output)
+ reshaped_logits = tf.reshape(logits, (-1, num_choices))
+
+ loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits)
+
+ if not return_dict:
+ output = (reshaped_logits,) + distilbert_output[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFMultipleChoiceModelOutput(
+ loss=loss,
+ logits=reshaped_logits,
+ hidden_states=distilbert_output.hidden_states,
+ attentions=distilbert_output.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "distilbert", None) is not None:
+ with tf.name_scope(self.distilbert.name):
+ self.distilbert.build(None)
+ if getattr(self, "pre_classifier", None) is not None:
+ with tf.name_scope(self.pre_classifier.name):
+ self.pre_classifier.build([None, None, self.config.dim])
+ if getattr(self, "classifier", None) is not None:
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build([None, None, self.config.dim])
+
+
+@add_start_docstrings(
+ """
+ DistilBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
+ linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ DISTILBERT_START_DOCSTRING,
+)
+class TFDistilBertForQuestionAnswering(TFDistilBertPreTrainedModel, TFQuestionAnsweringLoss):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.distilbert = TFDistilBertMainLayer(config, name="distilbert")
+ self.qa_outputs = tf.keras.layers.Dense(
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
+ )
+ assert config.num_labels == 2, f"Incorrect number of labels {config.num_labels} instead of 2"
+ self.dropout = tf.keras.layers.Dropout(config.qa_dropout)
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFQuestionAnsweringModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ start_positions: np.ndarray | tf.Tensor | None = None,
+ end_positions: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
+ r"""
+ start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ """
+ distilbert_output = self.distilbert(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ hidden_states = distilbert_output[0] # (bs, max_query_len, dim)
+ hidden_states = self.dropout(hidden_states, training=training) # (bs, max_query_len, dim)
+ logits = self.qa_outputs(hidden_states) # (bs, max_query_len, 2)
+ start_logits, end_logits = tf.split(logits, 2, axis=-1)
+ start_logits = tf.squeeze(start_logits, axis=-1)
+ end_logits = tf.squeeze(end_logits, axis=-1)
+
+ loss = None
+ if start_positions is not None and end_positions is not None:
+ labels = {"start_position": start_positions}
+ labels["end_position"] = end_positions
+ loss = self.hf_compute_loss(labels, (start_logits, end_logits))
+
+ if not return_dict:
+ output = (start_logits, end_logits) + distilbert_output[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFQuestionAnsweringModelOutput(
+ loss=loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=distilbert_output.hidden_states,
+ attentions=distilbert_output.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "distilbert", None) is not None:
+ with tf.name_scope(self.distilbert.name):
+ self.distilbert.build(None)
+ if getattr(self, "qa_outputs", None) is not None:
+ with tf.name_scope(self.qa_outputs.name):
+ self.qa_outputs.build([None, None, self.config.dim])
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/distilbert/tokenization_distilbert.py b/llava_next/lib/python3.10/site-packages/transformers/models/distilbert/tokenization_distilbert.py
new file mode 100644
index 0000000000000000000000000000000000000000..014c41d1243b6f7d29a02b7bc7aa2d7a1c6dbd60
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/transformers/models/distilbert/tokenization_distilbert.py
@@ -0,0 +1,553 @@
+# coding=utf-8
+# Copyright 2018 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tokenization classes for DistilBERT."""
+
+import collections
+import os
+import unicodedata
+from typing import List, Optional, Tuple
+
+from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
+
+PRETRAINED_VOCAB_FILES_MAP = {
+ "vocab_file": {
+ "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
+ "distilbert-base-uncased-distilled-squad": (
+ "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
+ ),
+ "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
+ "distilbert-base-cased-distilled-squad": (
+ "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
+ ),
+ "distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
+ "distilbert-base-multilingual-cased": (
+ "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
+ ),
+ }
+}
+
+PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
+ "distilbert-base-uncased": 512,
+ "distilbert-base-uncased-distilled-squad": 512,
+ "distilbert-base-cased": 512,
+ "distilbert-base-cased-distilled-squad": 512,
+ "distilbert-base-german-cased": 512,
+ "distilbert-base-multilingual-cased": 512,
+}
+
+
+PRETRAINED_INIT_CONFIGURATION = {
+ "distilbert-base-uncased": {"do_lower_case": True},
+ "distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
+ "distilbert-base-cased": {"do_lower_case": False},
+ "distilbert-base-cased-distilled-squad": {"do_lower_case": False},
+ "distilbert-base-german-cased": {"do_lower_case": False},
+ "distilbert-base-multilingual-cased": {"do_lower_case": False},
+}
+
+
+# Copied from transformers.models.bert.tokenization_bert.load_vocab
+def load_vocab(vocab_file):
+ """Loads a vocabulary file into a dictionary."""
+ vocab = collections.OrderedDict()
+ with open(vocab_file, "r", encoding="utf-8") as reader:
+ tokens = reader.readlines()
+ for index, token in enumerate(tokens):
+ token = token.rstrip("\n")
+ vocab[token] = index
+ return vocab
+
+
+# Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize
+def whitespace_tokenize(text):
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
+ text = text.strip()
+ if not text:
+ return []
+ tokens = text.split()
+ return tokens
+
+
+class DistilBertTokenizer(PreTrainedTokenizer):
+ r"""
+ Construct a DistilBERT tokenizer. Based on WordPiece.
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
+ this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ File containing the vocabulary.
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ do_basic_tokenize (`bool`, *optional*, defaults to `True`):
+ Whether or not to do basic tokenization before WordPiece.
+ never_split (`Iterable`, *optional*):
+ Collection of tokens which will never be split during tokenization. Only has an effect when
+ `do_basic_tokenize=True`
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
+ The token used for padding, for example when batching sequences of different lengths.
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
+ Whether or not to tokenize Chinese characters.
+
+ This should likely be deactivated for Japanese (see this
+ [issue](https://github.com/huggingface/transformers/issues/328)).
+ strip_accents (`bool`, *optional*):
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
+ value for `lowercase` (as in the original BERT).
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
+ pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
+ model_input_names = ["input_ids", "attention_mask"]
+
+ def __init__(
+ self,
+ vocab_file,
+ do_lower_case=True,
+ do_basic_tokenize=True,
+ never_split=None,
+ unk_token="[UNK]",
+ sep_token="[SEP]",
+ pad_token="[PAD]",
+ cls_token="[CLS]",
+ mask_token="[MASK]",
+ tokenize_chinese_chars=True,
+ strip_accents=None,
+ **kwargs,
+ ):
+ if not os.path.isfile(vocab_file):
+ raise ValueError(
+ f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
+ " model use `tokenizer = DistilBertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
+ )
+ self.vocab = load_vocab(vocab_file)
+ self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
+ self.do_basic_tokenize = do_basic_tokenize
+ if do_basic_tokenize:
+ self.basic_tokenizer = BasicTokenizer(
+ do_lower_case=do_lower_case,
+ never_split=never_split,
+ tokenize_chinese_chars=tokenize_chinese_chars,
+ strip_accents=strip_accents,
+ )
+ self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
+
+ super().__init__(
+ do_lower_case=do_lower_case,
+ do_basic_tokenize=do_basic_tokenize,
+ never_split=never_split,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ pad_token=pad_token,
+ cls_token=cls_token,
+ mask_token=mask_token,
+ tokenize_chinese_chars=tokenize_chinese_chars,
+ strip_accents=strip_accents,
+ **kwargs,
+ )
+
+ @property
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.do_lower_case
+ def do_lower_case(self):
+ return self.basic_tokenizer.do_lower_case
+
+ @property
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.vocab_size
+ def vocab_size(self):
+ return len(self.vocab)
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.get_vocab
+ def get_vocab(self):
+ return dict(self.vocab, **self.added_tokens_encoder)
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._tokenize
+ def _tokenize(self, text, split_special_tokens=False):
+ split_tokens = []
+ if self.do_basic_tokenize:
+ for token in self.basic_tokenizer.tokenize(
+ text, never_split=self.all_special_tokens if not split_special_tokens else None
+ ):
+ # If the token is part of the never_split set
+ if token in self.basic_tokenizer.never_split:
+ split_tokens.append(token)
+ else:
+ split_tokens += self.wordpiece_tokenizer.tokenize(token)
+ else:
+ split_tokens = self.wordpiece_tokenizer.tokenize(text)
+ return split_tokens
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._convert_token_to_id
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._convert_id_to_token
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ return self.ids_to_tokens.get(index, self.unk_token)
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.convert_tokens_to_string
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ out_string = " ".join(tokens).replace(" ##", "").strip()
+ return out_string
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.build_inputs_with_special_tokens
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A BERT sequence has the following format:
+
+ - single sequence: `[CLS] X [SEP]`
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ if token_ids_1 is None:
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
+ cls = [self.cls_token_id]
+ sep = [self.sep_token_id]
+ return cls + token_ids_0 + sep + token_ids_1 + sep
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.get_special_tokens_mask
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ if token_ids_1 is not None:
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
+ return [1] + ([0] * len(token_ids_0)) + [1]
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.create_token_type_ids_from_sequences
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
+ pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.save_vocabulary
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ index = 0
+ if os.path.isdir(save_directory):
+ vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+ else:
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
+ with open(vocab_file, "w", encoding="utf-8") as writer:
+ for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
+ if index != token_index:
+ logger.warning(
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
+ " Please check that the vocabulary is not corrupted!"
+ )
+ index = token_index
+ writer.write(token + "\n")
+ index += 1
+ return (vocab_file,)
+
+
+# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
+class BasicTokenizer(object):
+ """
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
+
+ Args:
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ never_split (`Iterable`, *optional*):
+ Collection of tokens which will never be split during tokenization. Only has an effect when
+ `do_basic_tokenize=True`
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
+ Whether or not to tokenize Chinese characters.
+
+ This should likely be deactivated for Japanese (see this
+ [issue](https://github.com/huggingface/transformers/issues/328)).
+ strip_accents (`bool`, *optional*):
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
+ value for `lowercase` (as in the original BERT).
+ do_split_on_punc (`bool`, *optional*, defaults to `True`):
+ In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
+ the full context of the words, such as contractions.
+ """
+
+ def __init__(
+ self,
+ do_lower_case=True,
+ never_split=None,
+ tokenize_chinese_chars=True,
+ strip_accents=None,
+ do_split_on_punc=True,
+ ):
+ if never_split is None:
+ never_split = []
+ self.do_lower_case = do_lower_case
+ self.never_split = set(never_split)
+ self.tokenize_chinese_chars = tokenize_chinese_chars
+ self.strip_accents = strip_accents
+ self.do_split_on_punc = do_split_on_punc
+
+ def tokenize(self, text, never_split=None):
+ """
+ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
+
+ Args:
+ never_split (`List[str]`, *optional*)
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
+ """
+ # union() returns a new set by concatenating the two sets.
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
+ text = self._clean_text(text)
+
+ # This was added on November 1st, 2018 for the multilingual and Chinese
+ # models. This is also applied to the English models now, but it doesn't
+ # matter since the English models were not trained on any Chinese data
+ # and generally don't have any Chinese data in them (there are Chinese
+ # characters in the vocabulary because Wikipedia does have some Chinese
+ # words in the English Wikipedia.).
+ if self.tokenize_chinese_chars:
+ text = self._tokenize_chinese_chars(text)
+ # prevents treating the same character with different unicode codepoints as different characters
+ unicode_normalized_text = unicodedata.normalize("NFC", text)
+ orig_tokens = whitespace_tokenize(unicode_normalized_text)
+ split_tokens = []
+ for token in orig_tokens:
+ if token not in never_split:
+ if self.do_lower_case:
+ token = token.lower()
+ if self.strip_accents is not False:
+ token = self._run_strip_accents(token)
+ elif self.strip_accents:
+ token = self._run_strip_accents(token)
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
+
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
+ return output_tokens
+
+ def _run_strip_accents(self, text):
+ """Strips accents from a piece of text."""
+ text = unicodedata.normalize("NFD", text)
+ output = []
+ for char in text:
+ cat = unicodedata.category(char)
+ if cat == "Mn":
+ continue
+ output.append(char)
+ return "".join(output)
+
+ def _run_split_on_punc(self, text, never_split=None):
+ """Splits punctuation on a piece of text."""
+ if not self.do_split_on_punc or (never_split is not None and text in never_split):
+ return [text]
+ chars = list(text)
+ i = 0
+ start_new_word = True
+ output = []
+ while i < len(chars):
+ char = chars[i]
+ if _is_punctuation(char):
+ output.append([char])
+ start_new_word = True
+ else:
+ if start_new_word:
+ output.append([])
+ start_new_word = False
+ output[-1].append(char)
+ i += 1
+
+ return ["".join(x) for x in output]
+
+ def _tokenize_chinese_chars(self, text):
+ """Adds whitespace around any CJK character."""
+ output = []
+ for char in text:
+ cp = ord(char)
+ if self._is_chinese_char(cp):
+ output.append(" ")
+ output.append(char)
+ output.append(" ")
+ else:
+ output.append(char)
+ return "".join(output)
+
+ def _is_chinese_char(self, cp):
+ """Checks whether CP is the codepoint of a CJK character."""
+ # This defines a "chinese character" as anything in the CJK Unicode block:
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
+ #
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
+ # despite its name. The modern Korean Hangul alphabet is a different block,
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
+ # space-separated words, so they are not treated specially and handled
+ # like the all of the other languages.
+ if (
+ (cp >= 0x4E00 and cp <= 0x9FFF)
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
+ or (cp >= 0xF900 and cp <= 0xFAFF)
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
+ ): #
+ return True
+
+ return False
+
+ def _clean_text(self, text):
+ """Performs invalid character removal and whitespace cleanup on text."""
+ output = []
+ for char in text:
+ cp = ord(char)
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
+ continue
+ if _is_whitespace(char):
+ output.append(" ")
+ else:
+ output.append(char)
+ return "".join(output)
+
+
+# Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
+class WordpieceTokenizer(object):
+ """Runs WordPiece tokenization."""
+
+ def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
+ self.vocab = vocab
+ self.unk_token = unk_token
+ self.max_input_chars_per_word = max_input_chars_per_word
+
+ def tokenize(self, text):
+ """
+ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
+ tokenization using the given vocabulary.
+
+ For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
+
+ Args:
+ text: A single token or whitespace separated tokens. This should have
+ already been passed through *BasicTokenizer*.
+
+ Returns:
+ A list of wordpiece tokens.
+ """
+
+ output_tokens = []
+ for token in whitespace_tokenize(text):
+ chars = list(token)
+ if len(chars) > self.max_input_chars_per_word:
+ output_tokens.append(self.unk_token)
+ continue
+
+ is_bad = False
+ start = 0
+ sub_tokens = []
+ while start < len(chars):
+ end = len(chars)
+ cur_substr = None
+ while start < end:
+ substr = "".join(chars[start:end])
+ if start > 0:
+ substr = "##" + substr
+ if substr in self.vocab:
+ cur_substr = substr
+ break
+ end -= 1
+ if cur_substr is None:
+ is_bad = True
+ break
+ sub_tokens.append(cur_substr)
+ start = end
+
+ if is_bad:
+ output_tokens.append(self.unk_token)
+ else:
+ output_tokens.extend(sub_tokens)
+ return output_tokens
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/distilbert/tokenization_distilbert_fast.py b/llava_next/lib/python3.10/site-packages/transformers/models/distilbert/tokenization_distilbert_fast.py
new file mode 100644
index 0000000000000000000000000000000000000000..adb90f857d75fef338b71975f27a4d9fcb521b4e
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/transformers/models/distilbert/tokenization_distilbert_fast.py
@@ -0,0 +1,231 @@
+# coding=utf-8
+# Copyright 2018 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tokenization classes for DistilBERT."""
+
+import json
+from typing import List, Optional, Tuple
+
+from tokenizers import normalizers
+
+from ...tokenization_utils_fast import PreTrainedTokenizerFast
+from ...utils import logging
+from .tokenization_distilbert import DistilBertTokenizer
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
+
+PRETRAINED_VOCAB_FILES_MAP = {
+ "vocab_file": {
+ "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
+ "distilbert-base-uncased-distilled-squad": (
+ "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
+ ),
+ "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
+ "distilbert-base-cased-distilled-squad": (
+ "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
+ ),
+ "distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
+ "distilbert-base-multilingual-cased": (
+ "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
+ ),
+ },
+ "tokenizer_file": {
+ "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
+ "distilbert-base-uncased-distilled-squad": (
+ "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
+ ),
+ "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
+ "distilbert-base-cased-distilled-squad": (
+ "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
+ ),
+ "distilbert-base-german-cased": (
+ "https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
+ ),
+ "distilbert-base-multilingual-cased": (
+ "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
+ ),
+ },
+}
+
+PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
+ "distilbert-base-uncased": 512,
+ "distilbert-base-uncased-distilled-squad": 512,
+ "distilbert-base-cased": 512,
+ "distilbert-base-cased-distilled-squad": 512,
+ "distilbert-base-german-cased": 512,
+ "distilbert-base-multilingual-cased": 512,
+}
+
+
+PRETRAINED_INIT_CONFIGURATION = {
+ "distilbert-base-uncased": {"do_lower_case": True},
+ "distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
+ "distilbert-base-cased": {"do_lower_case": False},
+ "distilbert-base-cased-distilled-squad": {"do_lower_case": False},
+ "distilbert-base-german-cased": {"do_lower_case": False},
+ "distilbert-base-multilingual-cased": {"do_lower_case": False},
+}
+
+
+class DistilBertTokenizerFast(PreTrainedTokenizerFast):
+ r"""
+ Construct a "fast" DistilBERT tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.
+
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
+ refer to this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ File containing the vocabulary.
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
+ The token used for padding, for example when batching sequences of different lengths.
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ clean_text (`bool`, *optional*, defaults to `True`):
+ Whether or not to clean the text before tokenization by removing any control characters and replacing all
+ whitespaces by the classic one.
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
+ Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
+ issue](https://github.com/huggingface/transformers/issues/328)).
+ strip_accents (`bool`, *optional*):
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
+ value for `lowercase` (as in the original BERT).
+ wordpieces_prefix (`str`, *optional*, defaults to `"##"`):
+ The prefix for subwords.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
+ pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
+ model_input_names = ["input_ids", "attention_mask"]
+ slow_tokenizer_class = DistilBertTokenizer
+
+ def __init__(
+ self,
+ vocab_file=None,
+ tokenizer_file=None,
+ do_lower_case=True,
+ unk_token="[UNK]",
+ sep_token="[SEP]",
+ pad_token="[PAD]",
+ cls_token="[CLS]",
+ mask_token="[MASK]",
+ tokenize_chinese_chars=True,
+ strip_accents=None,
+ **kwargs,
+ ):
+ super().__init__(
+ vocab_file,
+ tokenizer_file=tokenizer_file,
+ do_lower_case=do_lower_case,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ pad_token=pad_token,
+ cls_token=cls_token,
+ mask_token=mask_token,
+ tokenize_chinese_chars=tokenize_chinese_chars,
+ strip_accents=strip_accents,
+ **kwargs,
+ )
+
+ normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
+ if (
+ normalizer_state.get("lowercase", do_lower_case) != do_lower_case
+ or normalizer_state.get("strip_accents", strip_accents) != strip_accents
+ or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars
+ ):
+ normalizer_class = getattr(normalizers, normalizer_state.pop("type"))
+ normalizer_state["lowercase"] = do_lower_case
+ normalizer_state["strip_accents"] = strip_accents
+ normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars
+ self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state)
+
+ self.do_lower_case = do_lower_case
+
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.build_inputs_with_special_tokens
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A BERT sequence has the following format:
+
+ - single sequence: `[CLS] X [SEP]`
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
+
+ if token_ids_1 is not None:
+ output += token_ids_1 + [self.sep_token_id]
+
+ return output
+
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.create_token_type_ids_from_sequences
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
+ pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
+
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.save_vocabulary
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
+ return tuple(files)
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/phobert/__init__.py b/llava_next/lib/python3.10/site-packages/transformers/models/phobert/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..c974d994eca0322462ec7d97ce96728c9cb4ba24
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/transformers/models/phobert/__init__.py
@@ -0,0 +1,29 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import _LazyModule
+
+
+_import_structure = {"tokenization_phobert": ["PhobertTokenizer"]}
+
+
+if TYPE_CHECKING:
+ from .tokenization_phobert import PhobertTokenizer
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/phobert/__pycache__/__init__.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/phobert/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f1d3c1b73e653948122fc80e3df6d2818c45909b
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/phobert/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/phobert/__pycache__/tokenization_phobert.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/phobert/__pycache__/tokenization_phobert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b9567ba152999b8e48a0db0cc00f60db372cbc6c
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/phobert/__pycache__/tokenization_phobert.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/phobert/tokenization_phobert.py b/llava_next/lib/python3.10/site-packages/transformers/models/phobert/tokenization_phobert.py
new file mode 100644
index 0000000000000000000000000000000000000000..1275947776d46360c690a7aabe262652a80eef51
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/transformers/models/phobert/tokenization_phobert.py
@@ -0,0 +1,367 @@
+# coding=utf-8
+# Copyright (c) 2020, VinAI Research and the HuggingFace Inc. team.
+# Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Tokenization classes for PhoBERT"""
+
+
+import os
+import re
+from shutil import copyfile
+from typing import List, Optional, Tuple
+
+from ...tokenization_utils import PreTrainedTokenizer
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {
+ "vocab_file": "vocab.txt",
+ "merges_file": "bpe.codes",
+}
+
+PRETRAINED_VOCAB_FILES_MAP = {
+ "vocab_file": {
+ "vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt",
+ "vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt",
+ },
+ "merges_file": {
+ "vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes",
+ "vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes",
+ },
+}
+
+PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
+ "vinai/phobert-base": 256,
+ "vinai/phobert-large": 256,
+}
+
+
+def get_pairs(word):
+ """
+ Return set of symbol pairs in a word.
+
+ Word is represented as tuple of symbols (symbols being variable-length strings).
+ """
+ pairs = set()
+ prev_char = word[0]
+ for char in word[1:]:
+ pairs.add((prev_char, char))
+ prev_char = char
+
+ pairs = set(pairs)
+ return pairs
+
+
+class PhobertTokenizer(PreTrainedTokenizer):
+ """
+ Construct a PhoBERT tokenizer. Based on Byte-Pair-Encoding.
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
+ this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ Path to the vocabulary file.
+ merges_file (`str`):
+ Path to the merges file.
+ bos_token (`st`, *optional*, defaults to `""`):
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
+ sequence. The token used is the `cls_token`.
+
+
+
+ eos_token (`str`, *optional*, defaults to `""`):
+ The end of sequence token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
+ The token used is the `sep_token`.
+
+
+
+ sep_token (`str`, *optional*, defaults to `""`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ cls_token (`str`, *optional*, defaults to `""`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ pad_token (`str`, *optional*, defaults to `""`):
+ The token used for padding, for example when batching sequences of different lengths.
+ mask_token (`str`, *optional*, defaults to `""`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
+
+ def __init__(
+ self,
+ vocab_file,
+ merges_file,
+ bos_token="",
+ eos_token="",
+ sep_token="",
+ cls_token="",
+ unk_token="",
+ pad_token="",
+ mask_token="",
+ **kwargs,
+ ):
+ self.vocab_file = vocab_file
+ self.merges_file = merges_file
+
+ self.encoder = {}
+ self.encoder[str(bos_token)] = 0
+ self.encoder[str(pad_token)] = 1
+ self.encoder[str(eos_token)] = 2
+ self.encoder[str(unk_token)] = 3
+
+ self.add_from_file(vocab_file)
+
+ self.decoder = {v: k for k, v in self.encoder.items()}
+
+ with open(merges_file, encoding="utf-8") as merges_handle:
+ merges = merges_handle.read().split("\n")[:-1]
+ merges = [tuple(merge.split()[:-1]) for merge in merges]
+
+ self.bpe_ranks = dict(zip(merges, range(len(merges))))
+ self.cache = {}
+
+ super().__init__(
+ bos_token=bos_token,
+ eos_token=eos_token,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ cls_token=cls_token,
+ pad_token=pad_token,
+ mask_token=mask_token,
+ **kwargs,
+ )
+
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A PhoBERT sequence has the following format:
+
+ - single sequence: ` X `
+ - pair of sequences: ` A B `
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+
+ if token_ids_1 is None:
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
+ cls = [self.cls_token_id]
+ sep = [self.sep_token_id]
+ return cls + token_ids_0 + sep + sep + token_ids_1 + sep
+
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ if token_ids_1 is None:
+ return [1] + ([0] * len(token_ids_0)) + [1]
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. PhoBERT does not
+ make use of token type ids, therefore a list of zeros is returned.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of zeros.
+ """
+
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
+
+ @property
+ def vocab_size(self):
+ return len(self.encoder)
+
+ def get_vocab(self):
+ return dict(self.encoder, **self.added_tokens_encoder)
+
+ def bpe(self, token):
+ if token in self.cache:
+ return self.cache[token]
+ word = tuple(token)
+ word = tuple(list(word[:-1]) + [word[-1] + ""])
+ pairs = get_pairs(word)
+
+ if not pairs:
+ return token
+
+ while True:
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
+ if bigram not in self.bpe_ranks:
+ break
+ first, second = bigram
+ new_word = []
+ i = 0
+ while i < len(word):
+ try:
+ j = word.index(first, i)
+ except ValueError:
+ new_word.extend(word[i:])
+ break
+ else:
+ new_word.extend(word[i:j])
+ i = j
+
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
+ new_word.append(first + second)
+ i += 2
+ else:
+ new_word.append(word[i])
+ i += 1
+ new_word = tuple(new_word)
+ word = new_word
+ if len(word) == 1:
+ break
+ else:
+ pairs = get_pairs(word)
+ word = "@@ ".join(word)
+ word = word[:-4]
+ self.cache[token] = word
+ return word
+
+ def _tokenize(self, text):
+ """Tokenize a string."""
+ split_tokens = []
+
+ words = re.findall(r"\S+\n?", text)
+
+ for token in words:
+ split_tokens.extend(list(self.bpe(token).split(" ")))
+ return split_tokens
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ return self.decoder.get(index, self.unk_token)
+
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ out_string = " ".join(tokens).replace("@@ ", "").strip()
+ return out_string
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ out_vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+ out_merge_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
+ )
+
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
+ copyfile(self.vocab_file, out_vocab_file)
+ elif not os.path.isfile(self.vocab_file):
+ with open(out_vocab_file, "wb") as fi:
+ content_spiece_model = self.sp_model.serialized_model_proto()
+ fi.write(content_spiece_model)
+
+ if os.path.abspath(self.merges_file) != os.path.abspath(out_merge_file):
+ copyfile(self.merges_file, out_merge_file)
+
+ return out_vocab_file, out_merge_file
+
+ # def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
+ # filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
+ # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
+ # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
+ # return ''.join(tokens_generated_so_far)
+
+ def add_from_file(self, f):
+ """
+ Loads a pre-existing dictionary from a text file and adds its symbols to this instance.
+ """
+ if isinstance(f, str):
+ try:
+ with open(f, "r", encoding="utf-8") as fd:
+ self.add_from_file(fd)
+ except FileNotFoundError as fnfe:
+ raise fnfe
+ except UnicodeError:
+ raise Exception(f"Incorrect encoding detected in {f}, please rebuild the dataset")
+ return
+
+ lines = f.readlines()
+ for lineTmp in lines:
+ line = lineTmp.strip()
+ idx = line.rfind(" ")
+ if idx == -1:
+ raise ValueError("Incorrect dictionary format, expected ' '")
+ word = line[:idx]
+ self.encoder[word] = len(self.encoder)
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/rag/__pycache__/__init__.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/rag/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..de416d6bfb3c0dfdc53607bcdeea74854d57a0b6
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/rag/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/rag/__pycache__/retrieval_rag.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/rag/__pycache__/retrieval_rag.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7af3832eda8ba9dab4d6e283fbbd88c06a5b5bda
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/rag/__pycache__/retrieval_rag.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/rag/__pycache__/tokenization_rag.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/rag/__pycache__/tokenization_rag.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3b3c8aa459228e6c5b6c2c043df6611c7cb60fcf
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/rag/__pycache__/tokenization_rag.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/rag/retrieval_rag.py b/llava_next/lib/python3.10/site-packages/transformers/models/rag/retrieval_rag.py
new file mode 100644
index 0000000000000000000000000000000000000000..76f6231ec28fbb50bac5ee8b17268a23bbdbbc14
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/transformers/models/rag/retrieval_rag.py
@@ -0,0 +1,666 @@
+# coding=utf-8
+# Copyright 2020, The RAG Authors and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""RAG Retriever model implementation."""
+
+import os
+import pickle
+import time
+from typing import Iterable, List, Optional, Tuple
+
+import numpy as np
+
+from ...tokenization_utils import PreTrainedTokenizer
+from ...tokenization_utils_base import BatchEncoding
+from ...utils import cached_file, is_datasets_available, is_faiss_available, logging, requires_backends, strtobool
+from .configuration_rag import RagConfig
+from .tokenization_rag import RagTokenizer
+
+
+if is_datasets_available():
+ from datasets import Dataset, load_dataset, load_from_disk
+
+if is_faiss_available():
+ import faiss
+
+
+logger = logging.get_logger(__name__)
+
+
+LEGACY_INDEX_PATH = "https://storage.googleapis.com/huggingface-nlp/datasets/wiki_dpr/"
+
+
+class Index:
+ """
+ A base class for the Indices encapsulated by the [`RagRetriever`].
+ """
+
+ def get_doc_dicts(self, doc_ids: np.ndarray) -> List[dict]:
+ """
+ Returns a list of dictionaries, containing titles and text of the retrieved documents.
+
+ Args:
+ doc_ids (`np.ndarray` of shape `(batch_size, n_docs)`):
+ A tensor of document indices.
+ """
+ raise NotImplementedError
+
+ def get_top_docs(self, question_hidden_states: np.ndarray, n_docs=5) -> Tuple[np.ndarray, np.ndarray]:
+ """
+ For each query in the batch, retrieves `n_docs` documents.
+
+ Args:
+ question_hidden_states (`np.ndarray` of shape `(batch_size, vector_size)`):
+ An array of query vectors.
+ n_docs (`int`):
+ The number of docs retrieved per query.
+
+ Returns:
+ `np.ndarray` of shape `(batch_size, n_docs)`: A tensor of indices of retrieved documents. `np.ndarray` of
+ shape `(batch_size, vector_size)`: A tensor of vector representations of retrieved documents.
+ """
+ raise NotImplementedError
+
+ def is_initialized(self):
+ """
+ Returns `True` if index is already initialized.
+ """
+ raise NotImplementedError
+
+ def init_index(self):
+ """
+ A function responsible for loading the index into memory. Should be called only once per training run of a RAG
+ model. E.g. if the model is trained on multiple GPUs in a distributed setup, only one of the workers will load
+ the index.
+ """
+ raise NotImplementedError
+
+
+class LegacyIndex(Index):
+ """
+ An index which can be deserialized from the files built using https://github.com/facebookresearch/DPR. We use
+ default faiss index parameters as specified in that repository.
+
+ Args:
+ vector_size (`int`):
+ The dimension of indexed vectors.
+ index_path (`str`):
+ A path to a *directory* containing index files compatible with [`~models.rag.retrieval_rag.LegacyIndex`]
+ """
+
+ INDEX_FILENAME = "hf_bert_base.hnswSQ8_correct_phi_128.c_index"
+ PASSAGE_FILENAME = "psgs_w100.tsv.pkl"
+
+ def __init__(self, vector_size, index_path):
+ self.index_id_to_db_id = []
+ self.index_path = index_path
+ self.passages = self._load_passages()
+ self.vector_size = vector_size
+ self.index = None
+ self._index_initialized = False
+
+ def _resolve_path(self, index_path, filename):
+ is_local = os.path.isdir(index_path)
+ try:
+ # Load from URL or cache if already cached
+ resolved_archive_file = cached_file(index_path, filename)
+ except EnvironmentError:
+ msg = (
+ f"Can't load '{filename}'. Make sure that:\n\n"
+ f"- '{index_path}' is a correct remote path to a directory containing a file named {filename}\n\n"
+ f"- or '{index_path}' is the correct path to a directory containing a file named {filename}.\n\n"
+ )
+ raise EnvironmentError(msg)
+ if is_local:
+ logger.info(f"loading file {resolved_archive_file}")
+ else:
+ logger.info(f"loading file {filename} from cache at {resolved_archive_file}")
+ return resolved_archive_file
+
+ def _load_passages(self):
+ logger.info(f"Loading passages from {self.index_path}")
+ passages_path = self._resolve_path(self.index_path, self.PASSAGE_FILENAME)
+ if not strtobool(os.environ.get("TRUST_REMOTE_CODE", "False")):
+ raise ValueError(
+ "This part uses `pickle.load` which is insecure and will execute arbitrary code that is potentially "
+ "malicious. It's recommended to never unpickle data that could have come from an untrusted source, or "
+ "that could have been tampered with. If you already verified the pickle data and decided to use it, "
+ "you can set the environment variable `TRUST_REMOTE_CODE` to `True` to allow it."
+ )
+ with open(passages_path, "rb") as passages_file:
+ passages = pickle.load(passages_file)
+ return passages
+
+ def _deserialize_index(self):
+ logger.info(f"Loading index from {self.index_path}")
+ resolved_index_path = self._resolve_path(self.index_path, self.INDEX_FILENAME + ".index.dpr")
+ self.index = faiss.read_index(resolved_index_path)
+ resolved_meta_path = self._resolve_path(self.index_path, self.INDEX_FILENAME + ".index_meta.dpr")
+ if not strtobool(os.environ.get("TRUST_REMOTE_CODE", "False")):
+ raise ValueError(
+ "This part uses `pickle.load` which is insecure and will execute arbitrary code that is potentially "
+ "malicious. It's recommended to never unpickle data that could have come from an untrusted source, or "
+ "that could have been tampered with. If you already verified the pickle data and decided to use it, "
+ "you can set the environment variable `TRUST_REMOTE_CODE` to `True` to allow it."
+ )
+ with open(resolved_meta_path, "rb") as metadata_file:
+ self.index_id_to_db_id = pickle.load(metadata_file)
+ assert (
+ len(self.index_id_to_db_id) == self.index.ntotal
+ ), "Deserialized index_id_to_db_id should match faiss index size"
+
+ def is_initialized(self):
+ return self._index_initialized
+
+ def init_index(self):
+ index = faiss.IndexHNSWFlat(self.vector_size + 1, 512)
+ index.hnsw.efSearch = 128
+ index.hnsw.efConstruction = 200
+ self.index = index
+ self._deserialize_index()
+ self._index_initialized = True
+
+ def get_doc_dicts(self, doc_ids: np.array):
+ doc_list = []
+ for doc_ids_i in doc_ids:
+ ids = [str(int(doc_id)) for doc_id in doc_ids_i]
+ docs = [self.passages[doc_id] for doc_id in ids]
+ doc_list.append(docs)
+ doc_dicts = []
+ for docs in doc_list:
+ doc_dict = {}
+ doc_dict["title"] = [doc[1] for doc in docs]
+ doc_dict["text"] = [doc[0] for doc in docs]
+ doc_dicts.append(doc_dict)
+ return doc_dicts
+
+ def get_top_docs(self, question_hidden_states: np.ndarray, n_docs=5) -> Tuple[np.ndarray, np.ndarray]:
+ aux_dim = np.zeros(len(question_hidden_states), dtype="float32").reshape(-1, 1)
+ query_nhsw_vectors = np.hstack((question_hidden_states, aux_dim))
+ _, docs_ids = self.index.search(query_nhsw_vectors, n_docs)
+ vectors = [[self.index.reconstruct(int(doc_id))[:-1] for doc_id in doc_ids] for doc_ids in docs_ids]
+ ids = [[int(self.index_id_to_db_id[doc_id]) for doc_id in doc_ids] for doc_ids in docs_ids]
+ return np.array(ids), np.array(vectors)
+
+
+class HFIndexBase(Index):
+ def __init__(self, vector_size, dataset, index_initialized=False):
+ self.vector_size = vector_size
+ self.dataset = dataset
+ self._index_initialized = index_initialized
+ self._check_dataset_format(with_index=index_initialized)
+ dataset.set_format("numpy", columns=["embeddings"], output_all_columns=True, dtype="float32")
+
+ def _check_dataset_format(self, with_index: bool):
+ if not isinstance(self.dataset, Dataset):
+ raise ValueError(f"Dataset should be a datasets.Dataset object, but got {type(self.dataset)}")
+ if len({"title", "text", "embeddings"} - set(self.dataset.column_names)) > 0:
+ raise ValueError(
+ "Dataset should be a dataset with the following columns: "
+ "title (str), text (str) and embeddings (arrays of dimension vector_size), "
+ f"but got columns {self.dataset.column_names}"
+ )
+ if with_index and "embeddings" not in self.dataset.list_indexes():
+ raise ValueError(
+ "Missing faiss index in the dataset. Make sure you called `dataset.add_faiss_index` to compute it "
+ "or `dataset.load_faiss_index` to load one from the disk."
+ )
+
+ def init_index(self):
+ raise NotImplementedError()
+
+ def is_initialized(self):
+ return self._index_initialized
+
+ def get_doc_dicts(self, doc_ids: np.ndarray) -> List[dict]:
+ return [self.dataset[doc_ids[i].tolist()] for i in range(doc_ids.shape[0])]
+
+ def get_top_docs(self, question_hidden_states: np.ndarray, n_docs=5) -> Tuple[np.ndarray, np.ndarray]:
+ _, ids = self.dataset.search_batch("embeddings", question_hidden_states, n_docs)
+ docs = [self.dataset[[i for i in indices if i >= 0]] for indices in ids]
+ vectors = [doc["embeddings"] for doc in docs]
+ for i in range(len(vectors)):
+ if len(vectors[i]) < n_docs:
+ vectors[i] = np.vstack([vectors[i], np.zeros((n_docs - len(vectors[i]), self.vector_size))])
+ return np.array(ids), np.array(vectors) # shapes (batch_size, n_docs) and (batch_size, n_docs, d)
+
+
+class CanonicalHFIndex(HFIndexBase):
+ """
+ A wrapper around an instance of [`~datasets.Datasets`]. If `index_path` is set to `None`, we load the pre-computed
+ index available with the [`~datasets.arrow_dataset.Dataset`], otherwise, we load the index from the indicated path
+ on disk.
+
+ Args:
+ vector_size (`int`): the dimension of the passages embeddings used by the index
+ dataset_name (`str`, optional, defaults to `wiki_dpr`):
+ A dataset identifier of the indexed dataset on HuggingFace AWS bucket (list all available datasets and ids
+ with `datasets.list_datasets()`).
+ dataset_split (`str`, optional, defaults to `train`)
+ Which split of the `dataset` to load.
+ index_name (`str`, optional, defaults to `train`)
+ The index_name of the index associated with the `dataset`. The index loaded from `index_path` will be saved
+ under this name.
+ index_path (`str`, optional, defaults to `None`)
+ The path to the serialized faiss index on disk.
+ use_dummy_dataset (`bool`, optional, defaults to `False`):
+ If True, use the dummy configuration of the dataset for tests.
+ """
+
+ def __init__(
+ self,
+ vector_size: int,
+ dataset_name: str = "wiki_dpr",
+ dataset_split: str = "train",
+ index_name: Optional[str] = None,
+ index_path: Optional[str] = None,
+ use_dummy_dataset=False,
+ ):
+ if int(index_path is None) + int(index_name is None) != 1:
+ raise ValueError("Please provide `index_name` or `index_path`.")
+ self.dataset_name = dataset_name
+ self.dataset_split = dataset_split
+ self.index_name = index_name
+ self.index_path = index_path
+ self.use_dummy_dataset = use_dummy_dataset
+ logger.info(f"Loading passages from {self.dataset_name}")
+ dataset = load_dataset(
+ self.dataset_name, with_index=False, split=self.dataset_split, dummy=self.use_dummy_dataset
+ )
+ super().__init__(vector_size, dataset, index_initialized=False)
+
+ def init_index(self):
+ if self.index_path is not None:
+ logger.info(f"Loading index from {self.index_path}")
+ self.dataset.load_faiss_index("embeddings", file=self.index_path)
+ else:
+ logger.info(f"Loading index from {self.dataset_name} with index name {self.index_name}")
+ self.dataset = load_dataset(
+ self.dataset_name,
+ with_embeddings=True,
+ with_index=True,
+ split=self.dataset_split,
+ index_name=self.index_name,
+ dummy=self.use_dummy_dataset,
+ )
+ self.dataset.set_format("numpy", columns=["embeddings"], output_all_columns=True)
+ self._index_initialized = True
+
+
+class CustomHFIndex(HFIndexBase):
+ """
+ A wrapper around an instance of [`~datasets.Datasets`]. The dataset and the index are both loaded from the
+ indicated paths on disk.
+
+ Args:
+ vector_size (`int`): the dimension of the passages embeddings used by the index
+ dataset_path (`str`):
+ The path to the serialized dataset on disk. The dataset should have 3 columns: title (str), text (str) and
+ embeddings (arrays of dimension vector_size)
+ index_path (`str`)
+ The path to the serialized faiss index on disk.
+ """
+
+ def __init__(self, vector_size: int, dataset, index_path=None):
+ super().__init__(vector_size, dataset, index_initialized=index_path is None)
+ self.index_path = index_path
+
+ @classmethod
+ def load_from_disk(cls, vector_size, dataset_path, index_path):
+ logger.info(f"Loading passages from {dataset_path}")
+ if dataset_path is None or index_path is None:
+ raise ValueError(
+ "Please provide `dataset_path` and `index_path` after calling `dataset.save_to_disk(dataset_path)` "
+ "and `dataset.get_index('embeddings').save(index_path)`."
+ )
+ dataset = load_from_disk(dataset_path)
+ return cls(vector_size=vector_size, dataset=dataset, index_path=index_path)
+
+ def init_index(self):
+ if not self.is_initialized():
+ logger.info(f"Loading index from {self.index_path}")
+ self.dataset.load_faiss_index("embeddings", file=self.index_path)
+ self._index_initialized = True
+
+
+class RagRetriever:
+ """
+ Retriever used to get documents from vector queries. It retrieves the documents embeddings as well as the documents
+ contents, and it formats them to be used with a RagModel.
+
+ Args:
+ config ([`RagConfig`]):
+ The configuration of the RAG model this Retriever is used with. Contains parameters indicating which
+ `Index` to build. You can load your own custom dataset with `config.index_name="custom"` or use a canonical
+ one (default) from the datasets library with `config.index_name="wiki_dpr"` for example.
+ question_encoder_tokenizer ([`PreTrainedTokenizer`]):
+ The tokenizer that was used to tokenize the question. It is used to decode the question and then use the
+ generator_tokenizer.
+ generator_tokenizer ([`PreTrainedTokenizer`]):
+ The tokenizer used for the generator part of the RagModel.
+ index ([`~models.rag.retrieval_rag.Index`], optional, defaults to the one defined by the configuration):
+ If specified, use this index instead of the one built using the configuration
+
+ Examples:
+
+ ```python
+ >>> # To load the default "wiki_dpr" dataset with 21M passages from wikipedia (index name is 'compressed' or 'exact')
+ >>> from transformers import RagRetriever
+
+ >>> retriever = RagRetriever.from_pretrained(
+ ... "facebook/dpr-ctx_encoder-single-nq-base", dataset="wiki_dpr", index_name="compressed"
+ ... )
+
+ >>> # To load your own indexed dataset built with the datasets library. More info on how to build the indexed dataset in examples/rag/use_own_knowledge_dataset.py
+ >>> from transformers import RagRetriever
+
+ >>> dataset = (
+ ... ...
+ ... ) # dataset must be a datasets.Datasets object with columns "title", "text" and "embeddings", and it must have a faiss index
+ >>> retriever = RagRetriever.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base", indexed_dataset=dataset)
+
+ >>> # To load your own indexed dataset built with the datasets library that was saved on disk. More info in examples/rag/use_own_knowledge_dataset.py
+ >>> from transformers import RagRetriever
+
+ >>> dataset_path = "path/to/my/dataset" # dataset saved via *dataset.save_to_disk(...)*
+ >>> index_path = "path/to/my/index.faiss" # faiss index saved via *dataset.get_index("embeddings").save(...)*
+ >>> retriever = RagRetriever.from_pretrained(
+ ... "facebook/dpr-ctx_encoder-single-nq-base",
+ ... index_name="custom",
+ ... passages_path=dataset_path,
+ ... index_path=index_path,
+ ... )
+
+ >>> # To load the legacy index built originally for Rag's paper
+ >>> from transformers import RagRetriever
+
+ >>> retriever = RagRetriever.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base", index_name="legacy")
+ ```"""
+
+ def __init__(self, config, question_encoder_tokenizer, generator_tokenizer, index=None, init_retrieval=True):
+ self._init_retrieval = init_retrieval
+ requires_backends(self, ["datasets", "faiss"])
+ super().__init__()
+ self.index = index or self._build_index(config)
+ self.generator_tokenizer = generator_tokenizer
+ self.question_encoder_tokenizer = question_encoder_tokenizer
+
+ self.n_docs = config.n_docs
+ self.batch_size = config.retrieval_batch_size
+
+ self.config = config
+ if self._init_retrieval:
+ self.init_retrieval()
+
+ self.ctx_encoder_tokenizer = None
+ self.return_tokenized_docs = False
+
+ @staticmethod
+ def _build_index(config):
+ if config.index_name == "legacy":
+ return LegacyIndex(
+ config.retrieval_vector_size,
+ config.index_path or LEGACY_INDEX_PATH,
+ )
+ elif config.index_name == "custom":
+ return CustomHFIndex.load_from_disk(
+ vector_size=config.retrieval_vector_size,
+ dataset_path=config.passages_path,
+ index_path=config.index_path,
+ )
+ else:
+ return CanonicalHFIndex(
+ vector_size=config.retrieval_vector_size,
+ dataset_name=config.dataset,
+ dataset_split=config.dataset_split,
+ index_name=config.index_name,
+ index_path=config.index_path,
+ use_dummy_dataset=config.use_dummy_dataset,
+ )
+
+ @classmethod
+ def from_pretrained(cls, retriever_name_or_path, indexed_dataset=None, **kwargs):
+ requires_backends(cls, ["datasets", "faiss"])
+ config = kwargs.pop("config", None) or RagConfig.from_pretrained(retriever_name_or_path, **kwargs)
+ rag_tokenizer = RagTokenizer.from_pretrained(retriever_name_or_path, config=config)
+ question_encoder_tokenizer = rag_tokenizer.question_encoder
+ generator_tokenizer = rag_tokenizer.generator
+ if indexed_dataset is not None:
+ config.index_name = "custom"
+ index = CustomHFIndex(config.retrieval_vector_size, indexed_dataset)
+ else:
+ index = cls._build_index(config)
+ return cls(
+ config,
+ question_encoder_tokenizer=question_encoder_tokenizer,
+ generator_tokenizer=generator_tokenizer,
+ index=index,
+ )
+
+ def save_pretrained(self, save_directory):
+ if isinstance(self.index, CustomHFIndex):
+ if self.config.index_path is None:
+ index_path = os.path.join(save_directory, "hf_dataset_index.faiss")
+ self.index.dataset.get_index("embeddings").save(index_path)
+ self.config.index_path = index_path
+ if self.config.passages_path is None:
+ passages_path = os.path.join(save_directory, "hf_dataset")
+ # datasets don't support save_to_disk with indexes right now
+ faiss_index = self.index.dataset._indexes.pop("embeddings")
+ self.index.dataset.save_to_disk(passages_path)
+ self.index.dataset._indexes["embeddings"] = faiss_index
+ self.config.passages_path = passages_path
+ self.config.save_pretrained(save_directory)
+ rag_tokenizer = RagTokenizer(
+ question_encoder=self.question_encoder_tokenizer,
+ generator=self.generator_tokenizer,
+ )
+ rag_tokenizer.save_pretrained(save_directory)
+
+ def init_retrieval(self):
+ """
+ Retriever initialization function. It loads the index into memory.
+ """
+
+ logger.info("initializing retrieval")
+ self.index.init_index()
+
+ def postprocess_docs(self, docs, input_strings, prefix, n_docs, return_tensors=None):
+ r"""
+ Postprocessing retrieved `docs` and combining them with `input_strings`.
+
+ Args:
+ docs (`dict`):
+ Retrieved documents.
+ input_strings (`str`):
+ Input strings decoded by `preprocess_query`.
+ prefix (`str`):
+ Prefix added at the beginning of each input, typically used with T5-based models.
+
+ Return:
+ `tuple(tensors)`: a tuple consisting of two elements: contextualized `input_ids` and a compatible
+ `attention_mask`.
+ """
+
+ def cat_input_and_doc(doc_title, doc_text, input_string, prefix):
+ # TODO(Patrick): if we train more RAG models, I want to put the input first to take advantage of effortless truncation
+ # TODO(piktus): better handling of truncation
+ if doc_title.startswith('"'):
+ doc_title = doc_title[1:]
+ if doc_title.endswith('"'):
+ doc_title = doc_title[:-1]
+ if prefix is None:
+ prefix = ""
+ out = (prefix + doc_title + self.config.title_sep + doc_text + self.config.doc_sep + input_string).replace(
+ " ", " "
+ )
+ return out
+
+ rag_input_strings = [
+ cat_input_and_doc(
+ docs[i]["title"][j],
+ docs[i]["text"][j],
+ input_strings[i],
+ prefix,
+ )
+ for i in range(len(docs))
+ for j in range(n_docs)
+ ]
+
+ contextualized_inputs = self.generator_tokenizer.batch_encode_plus(
+ rag_input_strings,
+ max_length=self.config.max_combined_length,
+ return_tensors=return_tensors,
+ padding="max_length",
+ truncation=True,
+ )
+
+ return contextualized_inputs["input_ids"], contextualized_inputs["attention_mask"]
+
+ def _chunk_tensor(self, t: Iterable, chunk_size: int) -> List[Iterable]:
+ return [t[i : i + chunk_size] for i in range(0, len(t), chunk_size)]
+
+ def _main_retrieve(self, question_hidden_states: np.ndarray, n_docs: int) -> Tuple[np.ndarray, np.ndarray]:
+ question_hidden_states_batched = self._chunk_tensor(question_hidden_states, self.batch_size)
+ ids_batched = []
+ vectors_batched = []
+ for question_hidden_states in question_hidden_states_batched:
+ start_time = time.time()
+ ids, vectors = self.index.get_top_docs(question_hidden_states, n_docs)
+ logger.debug(
+ f"index search time: {time.time() - start_time} sec, batch size {question_hidden_states.shape}"
+ )
+ ids_batched.extend(ids)
+ vectors_batched.extend(vectors)
+ return (
+ np.array(ids_batched),
+ np.array(vectors_batched),
+ ) # shapes (batch_size, n_docs) and (batch_size, n_docs, d)
+
+ def retrieve(self, question_hidden_states: np.ndarray, n_docs: int) -> Tuple[np.ndarray, List[dict]]:
+ """
+ Retrieves documents for specified `question_hidden_states`.
+
+ Args:
+ question_hidden_states (`np.ndarray` of shape `(batch_size, vector_size)`):
+ A batch of query vectors to retrieve with.
+ n_docs (`int`):
+ The number of docs retrieved per query.
+
+ Return:
+ `Tuple[np.ndarray, np.ndarray, List[dict]]`: A tuple with the following objects:
+
+ - **retrieved_doc_embeds** (`np.ndarray` of shape `(batch_size, n_docs, dim)`) -- The retrieval embeddings
+ of the retrieved docs per query.
+ - **doc_ids** (`np.ndarray` of shape `(batch_size, n_docs)`) -- The ids of the documents in the index
+ - **doc_dicts** (`List[dict]`): The `retrieved_doc_embeds` examples per query.
+ """
+
+ doc_ids, retrieved_doc_embeds = self._main_retrieve(question_hidden_states, n_docs)
+ return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(doc_ids)
+
+ def set_ctx_encoder_tokenizer(self, ctx_encoder_tokenizer: PreTrainedTokenizer):
+ # used in end2end retriever training
+ self.ctx_encoder_tokenizer = ctx_encoder_tokenizer
+ self.return_tokenized_docs = True
+
+ def __call__(
+ self,
+ question_input_ids: List[List[int]],
+ question_hidden_states: np.ndarray,
+ prefix=None,
+ n_docs=None,
+ return_tensors=None,
+ ) -> BatchEncoding:
+ """
+ Retrieves documents for specified `question_hidden_states`.
+
+ Args:
+ question_input_ids (`List[List[int]]`) batch of input ids
+ question_hidden_states (`np.ndarray` of shape `(batch_size, vector_size)`:
+ A batch of query vectors to retrieve with.
+ prefix (`str`, *optional*):
+ The prefix used by the generator's tokenizer.
+ n_docs (`int`, *optional*):
+ The number of docs retrieved per query.
+ return_tensors (`str` or [`~utils.TensorType`], *optional*, defaults to "pt"):
+ If set, will return tensors instead of list of python integers. Acceptable values are:
+
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
+ - `'np'`: Return Numpy `np.ndarray` objects.
+
+ Returns: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
+
+ - **context_input_ids** -- List of token ids to be fed to a model.
+
+ [What are input IDs?](../glossary#input-ids)
+
+ - **context_attention_mask** -- List of indices specifying which tokens should be attended to by the model
+ (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`).
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ - **retrieved_doc_embeds** -- List of embeddings of the retrieved documents
+ - **doc_ids** -- List of ids of the retrieved documents
+ """
+
+ n_docs = n_docs if n_docs is not None else self.n_docs
+ prefix = prefix if prefix is not None else self.config.generator.prefix
+ retrieved_doc_embeds, doc_ids, docs = self.retrieve(question_hidden_states, n_docs)
+
+ input_strings = self.question_encoder_tokenizer.batch_decode(question_input_ids, skip_special_tokens=True)
+ context_input_ids, context_attention_mask = self.postprocess_docs(
+ docs, input_strings, prefix, n_docs, return_tensors=return_tensors
+ )
+
+ if self.return_tokenized_docs:
+ retrieved_doc_text = []
+ retrieved_doc_title = []
+
+ for b_idx in range(len(docs)):
+ for doc_idx in range(n_docs):
+ retrieved_doc_text.append(docs[b_idx]["text"][doc_idx])
+ retrieved_doc_title.append(docs[b_idx]["title"][doc_idx])
+
+ tokenized_docs = self.ctx_encoder_tokenizer(
+ retrieved_doc_title,
+ retrieved_doc_text,
+ truncation=True,
+ padding="longest",
+ return_tensors=return_tensors,
+ )
+
+ return BatchEncoding(
+ {
+ "context_input_ids": context_input_ids,
+ "context_attention_mask": context_attention_mask,
+ "retrieved_doc_embeds": retrieved_doc_embeds,
+ "doc_ids": doc_ids,
+ "tokenized_doc_ids": tokenized_docs["input_ids"],
+ "tokenized_doc_attention_mask": tokenized_docs["attention_mask"],
+ },
+ tensor_type=return_tensors,
+ )
+
+ else:
+ return BatchEncoding(
+ {
+ "context_input_ids": context_input_ids,
+ "context_attention_mask": context_attention_mask,
+ "retrieved_doc_embeds": retrieved_doc_embeds,
+ "doc_ids": doc_ids,
+ },
+ tensor_type=return_tensors,
+ )
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/sam/__pycache__/convert_sam_original_to_hf_format.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/sam/__pycache__/convert_sam_original_to_hf_format.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f75a5d8c5e4ceddb60f9a54ce018e0c617d3c473
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/sam/__pycache__/convert_sam_original_to_hf_format.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/sew/__init__.py b/llava_next/lib/python3.10/site-packages/transformers/models/sew/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..bd43be68b7c0533dd7b20c8d11cb401f298c4f58
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/transformers/models/sew/__init__.py
@@ -0,0 +1,56 @@
+# Copyright 2021 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
+
+
+_import_structure = {"configuration_sew": ["SEW_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWConfig"]}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_sew"] = [
+ "SEW_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "SEWForCTC",
+ "SEWForSequenceClassification",
+ "SEWModel",
+ "SEWPreTrainedModel",
+ ]
+
+if TYPE_CHECKING:
+ from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_sew import (
+ SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
+ SEWForCTC,
+ SEWForSequenceClassification,
+ SEWModel,
+ SEWPreTrainedModel,
+ )
+
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/sew/__pycache__/__init__.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/sew/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2b63525d4ce87f32c0c6a1f4be2430e94210ad67
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/sew/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/sew/__pycache__/configuration_sew.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/sew/__pycache__/configuration_sew.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8120d8359622163388e1e5333db2e56389798ed0
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/sew/__pycache__/configuration_sew.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/sew/__pycache__/convert_sew_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/sew/__pycache__/convert_sew_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cd4e9bc00dc163587ad9b4838fd334d446cdfacc
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/sew/__pycache__/convert_sew_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/sew/__pycache__/modeling_sew.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/sew/__pycache__/modeling_sew.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1340243092967e5317ec1ab528872cab8e306861
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/sew/__pycache__/modeling_sew.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/sew/configuration_sew.py b/llava_next/lib/python3.10/site-packages/transformers/models/sew/configuration_sew.py
new file mode 100644
index 0000000000000000000000000000000000000000..f5db6fd2c1044a435eb0252942af2bf675a1fc38
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/transformers/models/sew/configuration_sew.py
@@ -0,0 +1,258 @@
+# coding=utf-8
+# Copyright 2021 ASAPP Inc. and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" SEW model configuration"""
+
+import functools
+import operator
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+SEW_PRETRAINED_CONFIG_ARCHIVE_MAP = {
+ "asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
+ # See all SEW models at https://huggingface.co/models?filter=sew
+}
+
+
+class SEWConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`SEWModel`]. It is used to instantiate a SEW model
+ according to the specified arguments, defining the model architecture. Instantiating a configuration with the
+ defaults will yield a similar configuration to that of the SEW
+ [asapp/sew-tiny-100k](https://huggingface.co/asapp/sew-tiny-100k) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 32):
+ Vocabulary size of the SEW model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`SEW`].
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimensionality of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ intermediate_size (`int`, *optional*, defaults to 3072):
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
+ squeeze_factor (`int`, *optional*, defaults to 2):
+ Sequence length downsampling factor after the encoder and upsampling factor after the transformer.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
+ hidden_dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ activation_dropout (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for activations inside the fully connected layer.
+ attention_dropout (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention probabilities.
+ final_dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for the final projection layer of [`SEWForCTC`].
+ layerdrop (`float`, *optional*, defaults to 0.1):
+ The LayerDrop probability. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more
+ details.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+ feat_extract_norm (`str`, *optional*, defaults to `"group"`):
+ The norm to be applied to 1D convolutional layers in feature encoder. One of `"group"` for group
+ normalization of only the first 1D convolutional layer or `"layer"` for layer normalization of all 1D
+ convolutional layers.
+ feat_proj_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout probability for output of the feature encoder.
+ feat_extract_activation (`str, `optional`, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the 1D convolutional layers of the feature
+ extractor. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported.
+ conv_dim (`Tuple[int]` or `List[int]`, *optional*, defaults to `(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512)`):
+ A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the
+ feature encoder. The length of *conv_dim* defines the number of 1D convolutional layers.
+ conv_stride (`Tuple[int]` or `List[int]`, *optional*, defaults to `(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1)`):
+ A tuple of integers defining the stride of each 1D convolutional layer in the feature encoder. The length
+ of *conv_stride* defines the number of convolutional layers and has to match the length of *conv_dim*.
+ conv_kernel (`Tuple[int]` or `List[int]`, *optional*, defaults to `(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1)`):
+ A tuple of integers defining the kernel size of each 1D convolutional layer in the feature encoder. The
+ length of *conv_kernel* defines the number of convolutional layers and has to match the length of
+ *conv_dim*.
+ conv_bias (`bool`, *optional*, defaults to `False`):
+ Whether the 1D convolutional layers have a bias.
+ num_conv_pos_embeddings (`int`, *optional*, defaults to 128):
+ Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional
+ embeddings layer.
+ num_conv_pos_embedding_groups (`int`, *optional*, defaults to 16):
+ Number of groups of 1D convolutional positional embeddings layer.
+ apply_spec_augment (`bool`, *optional*, defaults to `True`):
+ Whether to apply *SpecAugment* data augmentation to the outputs of the feature encoder. For reference see
+ [SpecAugment: A Simple Data Augmentation Method for Automatic Speech
+ Recognition](https://arxiv.org/abs/1904.08779).
+ mask_time_prob (`float`, *optional*, defaults to 0.05):
+ Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking
+ procecure generates ''mask_time_prob*len(time_axis)/mask_time_length'' independent masks over the axis. If
+ reasoning from the propability of each feature vector to be chosen as the start of the vector span to be
+ masked, *mask_time_prob* should be `prob_vector_start*mask_time_length`. Note that overlap may decrease the
+ actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`.
+ mask_time_length (`int`, *optional*, defaults to 10):
+ Length of vector span along the time axis.
+ mask_time_min_masks (`int`, *optional*, defaults to 2),:
+ The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step,
+ irrespectively of `mask_feature_prob`. Only relevant if ''mask_time_prob*len(time_axis)/mask_time_length <
+ mask_time_min_masks''
+ mask_feature_prob (`float`, *optional*, defaults to 0.0):
+ Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The
+ masking procecure generates ''mask_feature_prob*len(feature_axis)/mask_time_length'' independent masks over
+ the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector
+ span to be masked, *mask_feature_prob* should be `prob_vector_start*mask_feature_length`. Note that overlap
+ may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is
+ True`.
+ mask_feature_length (`int`, *optional*, defaults to 10):
+ Length of vector span along the feature axis.
+ mask_feature_min_masks (`int`, *optional*, defaults to 0),:
+ The minimum number of masks of length `mask_feature_length` generated along the feature axis, each time
+ step, irrespectively of `mask_feature_prob`. Only relevant if
+ ''mask_feature_prob*len(feature_axis)/mask_feature_length < mask_feature_min_masks''
+ ctc_loss_reduction (`str`, *optional*, defaults to `"sum"`):
+ Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an
+ instance of [`SEWForCTC`].
+ ctc_zero_infinity (`bool`, *optional*, defaults to `False`):
+ Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly
+ occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance
+ of [`SEWForCTC`].
+ use_weighted_layer_sum (`bool`, *optional*, defaults to `False`):
+ Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an
+ instance of [`Wav2Vec2ForSequenceClassification`].
+ classifier_proj_size (`int`, *optional*, defaults to 256):
+ Dimensionality of the projection before token mean-pooling for classification.
+
+ Example:
+
+ ```python
+ >>> from transformers import SEWConfig, SEWModel
+
+ >>> # Initializing a SEW asapp/sew-tiny-100k style configuration
+ >>> configuration = SEWConfig()
+
+ >>> # Initializing a model (with random weights) from the asapp/sew-tiny-100k style configuration
+ >>> model = SEWModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "sew"
+
+ def __init__(
+ self,
+ vocab_size=32,
+ hidden_size=768,
+ num_hidden_layers=12,
+ num_attention_heads=12,
+ intermediate_size=3072,
+ squeeze_factor=2,
+ hidden_act="gelu",
+ hidden_dropout=0.1,
+ activation_dropout=0.1,
+ attention_dropout=0.1,
+ feat_proj_dropout=0.0,
+ final_dropout=0.1,
+ layerdrop=0.1,
+ initializer_range=0.02,
+ layer_norm_eps=1e-5,
+ feat_extract_norm="group",
+ feat_extract_activation="gelu",
+ conv_dim=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512),
+ conv_stride=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1),
+ conv_kernel=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1),
+ conv_bias=False,
+ num_conv_pos_embeddings=128,
+ num_conv_pos_embedding_groups=16,
+ apply_spec_augment=True,
+ mask_time_prob=0.05,
+ mask_time_length=10,
+ mask_time_min_masks=2,
+ mask_feature_prob=0.0,
+ mask_feature_length=10,
+ mask_feature_min_masks=0,
+ ctc_loss_reduction="mean",
+ ctc_zero_infinity=False,
+ use_weighted_layer_sum=False,
+ classifier_proj_size=256,
+ pad_token_id=0,
+ bos_token_id=1,
+ eos_token_id=2,
+ **kwargs,
+ ):
+ super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id)
+ self.hidden_size = hidden_size
+ self.feat_extract_norm = feat_extract_norm
+ self.feat_extract_activation = feat_extract_activation
+ self.conv_dim = list(conv_dim)
+ self.conv_stride = list(conv_stride)
+ self.conv_kernel = list(conv_kernel)
+ self.conv_bias = conv_bias
+ self.num_conv_pos_embeddings = num_conv_pos_embeddings
+ self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups
+ self.num_feat_extract_layers = len(self.conv_dim)
+ self.num_hidden_layers = num_hidden_layers
+ self.intermediate_size = intermediate_size
+ self.squeeze_factor = squeeze_factor
+ self.hidden_act = hidden_act
+ self.num_attention_heads = num_attention_heads
+ self.hidden_dropout = hidden_dropout
+ self.attention_dropout = attention_dropout
+ self.activation_dropout = activation_dropout
+ self.feat_proj_dropout = feat_proj_dropout
+ self.final_dropout = final_dropout
+ self.layerdrop = layerdrop
+ self.layer_norm_eps = layer_norm_eps
+ self.initializer_range = initializer_range
+ self.vocab_size = vocab_size
+
+ if (
+ (len(self.conv_stride) != self.num_feat_extract_layers)
+ or (len(self.conv_kernel) != self.num_feat_extract_layers)
+ or (len(self.conv_dim) != self.num_feat_extract_layers)
+ ):
+ raise ValueError(
+ "Configuration for convolutional layers is incorrect. "
+ "It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`, "
+ f"but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride) "
+ f"= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`."
+ )
+
+ # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
+ self.apply_spec_augment = apply_spec_augment
+ self.mask_time_prob = mask_time_prob
+ self.mask_time_length = mask_time_length
+ self.mask_time_min_masks = mask_time_min_masks
+ self.mask_feature_prob = mask_feature_prob
+ self.mask_feature_length = mask_feature_length
+ self.mask_feature_min_masks = mask_feature_min_masks
+
+ # ctc loss
+ self.ctc_loss_reduction = ctc_loss_reduction
+ self.ctc_zero_infinity = ctc_zero_infinity
+
+ # sequence classification
+ self.use_weighted_layer_sum = use_weighted_layer_sum
+ self.classifier_proj_size = classifier_proj_size
+
+ @property
+ def inputs_to_logits_ratio(self):
+ return functools.reduce(operator.mul, self.conv_stride, 1)
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/sew/convert_sew_original_pytorch_checkpoint_to_pytorch.py b/llava_next/lib/python3.10/site-packages/transformers/models/sew/convert_sew_original_pytorch_checkpoint_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..81c3284af8ef6e87a61b3776d56900c8b102bcca
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/transformers/models/sew/convert_sew_original_pytorch_checkpoint_to_pytorch.py
@@ -0,0 +1,306 @@
+# coding=utf-8
+# Copyright 2021 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert SEW checkpoint."""
+
+
+import argparse
+import json
+import os
+
+import fairseq
+import torch
+from fairseq.data import Dictionary
+
+# Register SEW's fairseq modules
+from sew_asapp import tasks # noqa: F401
+
+from transformers import (
+ SEWConfig,
+ SEWForCTC,
+ SEWModel,
+ Wav2Vec2CTCTokenizer,
+ Wav2Vec2FeatureExtractor,
+ Wav2Vec2Processor,
+ logging,
+)
+
+
+logging.set_verbosity_info()
+logger = logging.get_logger(__name__)
+
+MAPPING = {
+ "post_extract_proj": "feature_projection",
+ "encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
+ "self_attn.k_proj": "encoder.layers.*.attention.k_proj",
+ "self_attn.v_proj": "encoder.layers.*.attention.v_proj",
+ "self_attn.q_proj": "encoder.layers.*.attention.q_proj",
+ "self_attn.out_proj": "encoder.layers.*.attention.out_proj",
+ "self_attn_layer_norm": "encoder.layers.*.layer_norm",
+ "fc1": "encoder.layers.*.feed_forward.intermediate_dense",
+ "fc2": "encoder.layers.*.feed_forward.output_dense",
+ "final_layer_norm": "encoder.layers.*.final_layer_norm",
+ "encoder.upsample.0": "encoder.upsample.projection",
+ "encoder.layer_norm": "encoder.layer_norm",
+ "w2v_model.layer_norm": "layer_norm",
+ "w2v_encoder.proj": "lm_head",
+ "mask_emb": "masked_spec_embed",
+}
+
+
+def set_recursively(hf_pointer, key, value, full_name, weight_type):
+ for attribute in key.split("."):
+ hf_pointer = getattr(hf_pointer, attribute)
+
+ if weight_type is not None:
+ hf_shape = getattr(hf_pointer, weight_type).shape
+ else:
+ hf_shape = hf_pointer.shape
+
+ assert hf_shape == value.shape, (
+ f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
+ f" {value.shape} for {full_name}"
+ )
+
+ if weight_type == "weight":
+ hf_pointer.weight.data = value
+ elif weight_type == "weight_g":
+ hf_pointer.weight_g.data = value
+ elif weight_type == "weight_v":
+ hf_pointer.weight_v.data = value
+ elif weight_type == "bias":
+ hf_pointer.bias.data = value
+ else:
+ hf_pointer.data = value
+
+ logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.")
+
+
+def recursively_load_weights(fairseq_model, hf_model, is_finetuned):
+ unused_weights = []
+ fairseq_dict = fairseq_model.state_dict()
+
+ feature_extractor = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
+
+ for name, value in fairseq_dict.items():
+ is_used = False
+ if "conv_layers" in name:
+ load_conv_layer(
+ name,
+ value,
+ feature_extractor,
+ unused_weights,
+ hf_model.config.feat_extract_norm == "group",
+ )
+ is_used = True
+ else:
+ for key, mapped_key in MAPPING.items():
+ mapped_key = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
+
+ if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
+ is_used = True
+ if "*" in mapped_key:
+ layer_index = name.split(key)[0].split(".")[-2]
+ mapped_key = mapped_key.replace("*", layer_index)
+ if "weight_g" in name:
+ weight_type = "weight_g"
+ elif "weight_v" in name:
+ weight_type = "weight_v"
+ elif "weight" in name:
+ weight_type = "weight"
+ elif "bias" in name:
+ weight_type = "bias"
+ else:
+ weight_type = None
+ set_recursively(hf_model, mapped_key, value, name, weight_type)
+ continue
+ if not is_used:
+ unused_weights.append(name)
+
+ logger.warning(f"Unused weights: {unused_weights}")
+
+
+def load_conv_layer(full_name, value, feature_extractor, unused_weights, use_group_norm):
+ name = full_name.split("conv_layers.")[-1]
+ items = name.split(".")
+ layer_id = int(items[0])
+ type_id = int(items[1])
+
+ if type_id == 0:
+ if "bias" in name:
+ assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
+ f"{full_name} has size {value.shape}, but"
+ f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
+ )
+ feature_extractor.conv_layers[layer_id].conv.bias.data = value
+ logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
+ elif "weight" in name:
+ assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
+ f"{full_name} has size {value.shape}, but"
+ f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
+ )
+ feature_extractor.conv_layers[layer_id].conv.weight.data = value
+ logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
+ elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
+ if "bias" in name:
+ assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
+ f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
+ " found."
+ )
+ feature_extractor.conv_layers[layer_id].layer_norm.bias.data = value
+ logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
+ elif "weight" in name:
+ assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
+ f"{full_name} has size {value.shape}, but"
+ f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
+ )
+ feature_extractor.conv_layers[layer_id].layer_norm.weight.data = value
+ logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
+ else:
+ unused_weights.append(full_name)
+
+
+def convert_config(model, is_finetuned):
+ config = SEWConfig()
+ if is_finetuned:
+ fs_config = model.w2v_encoder.w2v_model.cfg
+ else:
+ fs_config = model.cfg
+
+ config.conv_bias = fs_config.conv_bias
+ conv_layers = eval(fs_config.conv_feature_layers)
+ config.conv_dim = [x[0] for x in conv_layers]
+ config.conv_kernel = [x[1] for x in conv_layers]
+ config.conv_stride = [x[2] for x in conv_layers]
+ config.feat_extract_activation = "gelu"
+ config.feat_extract_norm = "layer" if fs_config.extractor_mode == "layer_norm" else "group"
+ config.final_dropout = 0.0
+ config.hidden_act = fs_config.activation_fn.name
+ config.hidden_size = fs_config.encoder_embed_dim
+ config.initializer_range = 0.02
+ config.intermediate_size = fs_config.encoder_ffn_embed_dim
+ config.layer_norm_eps = 1e-5
+ config.layerdrop = fs_config.encoder_layerdrop
+ config.num_attention_heads = fs_config.encoder_attention_heads
+ config.num_conv_pos_embedding_groups = fs_config.conv_pos_groups
+ config.num_conv_pos_embeddings = fs_config.conv_pos
+ config.num_feat_extract_layers = len(conv_layers)
+ config.num_hidden_layers = fs_config.encoder_layers
+ config.squeeze_factor = fs_config.squeeze_factor
+
+ # take care of any params that are overridden by the Wav2VecCtc model
+ if is_finetuned:
+ fs_config = model.cfg
+ config.final_dropout = fs_config.final_dropout
+ config.layerdrop = fs_config.layerdrop
+ config.activation_dropout = fs_config.activation_dropout
+ config.apply_spec_augment = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
+ config.attention_dropout = fs_config.attention_dropout
+ config.feat_proj_dropout = fs_config.dropout_input
+ config.hidden_dropout = fs_config.dropout
+ config.mask_feature_length = fs_config.mask_channel_length
+ config.mask_feature_prob = fs_config.mask_channel_prob
+ config.mask_time_length = fs_config.mask_length
+ config.mask_time_prob = fs_config.mask_prob
+
+ config.feature_extractor_type = "Wav2Vec2FeatureExtractor"
+ config.tokenizer_class = "Wav2Vec2CTCTokenizer"
+
+ return config
+
+
+@torch.no_grad()
+def convert_sew_checkpoint(
+ checkpoint_path, pytorch_dump_folder_path, config_path=None, dict_path=None, is_finetuned=True
+):
+ """
+ Copy/paste/tweak model's weights to transformers design.
+ """
+
+ if is_finetuned:
+ model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
+ [checkpoint_path], arg_overrides={"data": "/".join(dict_path.split("/")[:-1])}
+ )
+ else:
+ model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path])
+
+ if config_path is not None:
+ config = SEWConfig.from_pretrained(config_path)
+ else:
+ config = convert_config(model[0], is_finetuned)
+ model = model[0].eval()
+
+ return_attention_mask = True if config.feat_extract_norm == "layer" else False
+ feature_extractor = Wav2Vec2FeatureExtractor(
+ feature_size=1,
+ sampling_rate=16000,
+ padding_value=0,
+ do_normalize=True,
+ return_attention_mask=return_attention_mask,
+ )
+
+ if is_finetuned:
+ if dict_path:
+ target_dict = Dictionary.load(dict_path)
+
+ # important change bos & pad token id since CTC symbol is and
+ # not as in fairseq
+ target_dict.indices[target_dict.bos_word] = target_dict.pad_index
+ target_dict.indices[target_dict.pad_word] = target_dict.bos_index
+ config.bos_token_id = target_dict.pad_index
+ config.pad_token_id = target_dict.bos_index
+ config.eos_token_id = target_dict.eos_index
+ config.vocab_size = len(target_dict.symbols)
+ vocab_path = os.path.join(pytorch_dump_folder_path, "vocab.json")
+ if not os.path.isdir(pytorch_dump_folder_path):
+ logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(pytorch_dump_folder_path))
+ return
+ os.makedirs(pytorch_dump_folder_path, exist_ok=True)
+ with open(vocab_path, "w", encoding="utf-8") as vocab_handle:
+ json.dump(target_dict.indices, vocab_handle)
+ tokenizer = Wav2Vec2CTCTokenizer(
+ vocab_path,
+ unk_token=target_dict.unk_word,
+ pad_token=target_dict.pad_word,
+ bos_token=target_dict.bos_word,
+ eos_token=target_dict.eos_word,
+ word_delimiter_token="|",
+ do_lower_case=False,
+ )
+ processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)
+ processor.save_pretrained(pytorch_dump_folder_path)
+
+ hf_model = SEWForCTC(config)
+ else:
+ hf_model = SEWModel(config)
+ feature_extractor.save_pretrained(pytorch_dump_folder_path)
+
+ recursively_load_weights(model, hf_model, is_finetuned)
+
+ hf_model.save_pretrained(pytorch_dump_folder_path)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
+ parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
+ parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
+ parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
+ parser.add_argument(
+ "--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
+ )
+ args = parser.parse_args()
+ convert_sew_checkpoint(
+ args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
+ )
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/sew/modeling_sew.py b/llava_next/lib/python3.10/site-packages/transformers/models/sew/modeling_sew.py
new file mode 100644
index 0000000000000000000000000000000000000000..a5ebb9b2bb424540056d0ff7f8cb7f2049317636
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/transformers/models/sew/modeling_sew.py
@@ -0,0 +1,1230 @@
+# coding=utf-8
+# Copyright 2021 ASAPP Inc. and the HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch SEW model."""
+
+import math
+import warnings
+from typing import Optional, Tuple, Union
+
+import numpy as np
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import CrossEntropyLoss
+
+from ...activations import ACT2FN
+from ...integrations.deepspeed import is_deepspeed_zero3_enabled
+from ...modeling_outputs import BaseModelOutput, CausalLMOutput, SequenceClassifierOutput
+from ...modeling_utils import PreTrainedModel
+from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
+from .configuration_sew import SEWConfig
+
+
+logger = logging.get_logger(__name__)
+
+
+_HIDDEN_STATES_START_POSITION = 1
+
+# General docstring
+_CONFIG_FOR_DOC = "SEWConfig"
+
+# Base docstring
+_CHECKPOINT_FOR_DOC = "asapp/sew-tiny-100k-ft-ls100h"
+_EXPECTED_OUTPUT_SHAPE = [1, 292, 512]
+
+# CTC docstring
+_CTC_EXPECTED_OUTPUT = (
+ "'MISTER QUILTER IS THE APPOSTILE OF THE MIDDLE CLASSES AND WE ARE GLAD TO WELCOME HIS GOSPOLLE'"
+)
+_CTC_EXPECTED_LOSS = 0.42
+
+# Audio class docstring
+_SEQ_CLASS_CHECKPOINT = "anton-l/sew-mid-100k-ft-keyword-spotting"
+_SEQ_CLASS_EXPECTED_OUTPUT = "'_unknown_'"
+_SEQ_CLASS_EXPECTED_LOSS = 9.52
+
+SEW_PRETRAINED_MODEL_ARCHIVE_LIST = [
+ "asapp/sew-tiny-100k",
+ "asapp/sew-small-100k",
+ "asapp/sew-mid-100k",
+ # See all SEW models at https://huggingface.co/models?filter=sew
+]
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2._compute_mask_indices
+def _compute_mask_indices(
+ shape: Tuple[int, int],
+ mask_prob: float,
+ mask_length: int,
+ attention_mask: Optional[torch.LongTensor] = None,
+ min_masks: int = 0,
+) -> np.ndarray:
+ """
+ Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for
+ ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on
+ CPU as part of the preprocessing during training.
+
+ Args:
+ shape: The shape for which to compute masks. This should be of a tuple of size 2 where
+ the first element is the batch size and the second element is the length of the axis to span.
+ mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of
+ independently generated mask spans of length `mask_length` is computed by
+ `mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the
+ actual percentage will be smaller.
+ mask_length: size of the mask
+ min_masks: minimum number of masked spans
+ attention_mask: A (right-padded) attention mask which independently shortens the feature axis of
+ each batch dimension.
+ """
+ batch_size, sequence_length = shape
+
+ if mask_length < 1:
+ raise ValueError("`mask_length` has to be bigger than 0.")
+
+ if mask_length > sequence_length:
+ raise ValueError(
+ f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}"
+ f" and `sequence_length`: {sequence_length}`"
+ )
+
+ # epsilon is used for probabilistic rounding
+ epsilon = np.random.rand(1).item()
+
+ def compute_num_masked_span(input_length):
+ """Given input length, compute how many spans should be masked"""
+ num_masked_span = int(mask_prob * input_length / mask_length + epsilon)
+ num_masked_span = max(num_masked_span, min_masks)
+
+ # make sure num masked span <= sequence_length
+ if num_masked_span * mask_length > sequence_length:
+ num_masked_span = sequence_length // mask_length
+
+ # make sure num_masked span is also <= input_length - (mask_length - 1)
+ if input_length - (mask_length - 1) < num_masked_span:
+ num_masked_span = max(input_length - (mask_length - 1), 0)
+
+ return num_masked_span
+
+ # compute number of masked spans in batch
+ input_lengths = (
+ attention_mask.sum(-1).detach().tolist()
+ if attention_mask is not None
+ else [sequence_length for _ in range(batch_size)]
+ )
+
+ # SpecAugment mask to fill
+ spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=bool)
+ spec_aug_mask_idxs = []
+
+ max_num_masked_span = compute_num_masked_span(sequence_length)
+
+ if max_num_masked_span == 0:
+ return spec_aug_mask
+
+ for input_length in input_lengths:
+ # compute num of masked spans for this input
+ num_masked_span = compute_num_masked_span(input_length)
+
+ # get random indices to mask
+ spec_aug_mask_idx = np.random.choice(
+ np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False
+ )
+
+ # pick first sampled index that will serve as a dummy index to pad vector
+ # to ensure same dimension for all batches due to probabilistic rounding
+ # Picking first sample just pads those vectors twice.
+ if len(spec_aug_mask_idx) == 0:
+ # this case can only happen if `input_length` is strictly smaller then
+ # `sequence_length` in which case the last token has to be a padding
+ # token which we can use as a dummy mask id
+ dummy_mask_idx = sequence_length - 1
+ else:
+ dummy_mask_idx = spec_aug_mask_idx[0]
+
+ spec_aug_mask_idx = np.concatenate(
+ [spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx]
+ )
+ spec_aug_mask_idxs.append(spec_aug_mask_idx)
+
+ spec_aug_mask_idxs = np.array(spec_aug_mask_idxs)
+
+ # expand masked indices to masked spans
+ spec_aug_mask_idxs = np.broadcast_to(
+ spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length)
+ )
+ spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length)
+
+ # add offset to the starting indexes so that indexes now create a span
+ offsets = np.arange(mask_length)[None, None, :]
+ offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape(
+ batch_size, max_num_masked_span * mask_length
+ )
+ spec_aug_mask_idxs = spec_aug_mask_idxs + offsets
+
+ # ensure that we cannot have indices larger than sequence_length
+ if spec_aug_mask_idxs.max() > sequence_length - 1:
+ spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1
+
+ # scatter indices to mask
+ np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1)
+
+ return spec_aug_mask
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2NoLayerNormConvLayer with Wav2Vec2->SEW
+class SEWNoLayerNormConvLayer(nn.Module):
+ def __init__(self, config, layer_id=0):
+ super().__init__()
+ self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
+ self.out_conv_dim = config.conv_dim[layer_id]
+
+ self.conv = nn.Conv1d(
+ self.in_conv_dim,
+ self.out_conv_dim,
+ kernel_size=config.conv_kernel[layer_id],
+ stride=config.conv_stride[layer_id],
+ bias=config.conv_bias,
+ )
+ self.activation = ACT2FN[config.feat_extract_activation]
+
+ def forward(self, hidden_states):
+ hidden_states = self.conv(hidden_states)
+ hidden_states = self.activation(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2LayerNormConvLayer with Wav2Vec2->SEW
+class SEWLayerNormConvLayer(nn.Module):
+ def __init__(self, config, layer_id=0):
+ super().__init__()
+ self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
+ self.out_conv_dim = config.conv_dim[layer_id]
+
+ self.conv = nn.Conv1d(
+ self.in_conv_dim,
+ self.out_conv_dim,
+ kernel_size=config.conv_kernel[layer_id],
+ stride=config.conv_stride[layer_id],
+ bias=config.conv_bias,
+ )
+ self.layer_norm = nn.LayerNorm(self.out_conv_dim, elementwise_affine=True)
+ self.activation = ACT2FN[config.feat_extract_activation]
+
+ def forward(self, hidden_states):
+ hidden_states = self.conv(hidden_states)
+
+ hidden_states = hidden_states.transpose(-2, -1)
+ hidden_states = self.layer_norm(hidden_states)
+ hidden_states = hidden_states.transpose(-2, -1)
+
+ hidden_states = self.activation(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2GroupNormConvLayer with Wav2Vec2->SEW
+class SEWGroupNormConvLayer(nn.Module):
+ def __init__(self, config, layer_id=0):
+ super().__init__()
+ self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
+ self.out_conv_dim = config.conv_dim[layer_id]
+
+ self.conv = nn.Conv1d(
+ self.in_conv_dim,
+ self.out_conv_dim,
+ kernel_size=config.conv_kernel[layer_id],
+ stride=config.conv_stride[layer_id],
+ bias=config.conv_bias,
+ )
+ self.activation = ACT2FN[config.feat_extract_activation]
+
+ self.layer_norm = nn.GroupNorm(num_groups=self.out_conv_dim, num_channels=self.out_conv_dim, affine=True)
+
+ def forward(self, hidden_states):
+ hidden_states = self.conv(hidden_states)
+ hidden_states = self.layer_norm(hidden_states)
+ hidden_states = self.activation(hidden_states)
+ return hidden_states
+
+
+class SEWPositionalConvEmbedding(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.conv = nn.Conv1d(
+ config.hidden_size,
+ config.hidden_size,
+ kernel_size=config.num_conv_pos_embeddings,
+ padding=config.num_conv_pos_embeddings // 2,
+ groups=config.num_conv_pos_embedding_groups,
+ stride=config.squeeze_factor,
+ )
+
+ if is_deepspeed_zero3_enabled():
+ import deepspeed
+
+ with deepspeed.zero.GatheredParameters(self.conv.weight, modifier_rank=0):
+ self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2)
+ deepspeed.zero.register_external_parameter(self, self.conv.weight_v)
+ deepspeed.zero.register_external_parameter(self, self.conv.weight_g)
+ else:
+ self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2)
+
+ self.padding = SEWSamePadLayer(config.num_conv_pos_embeddings)
+ self.activation = ACT2FN[config.feat_extract_activation]
+
+ def forward(self, hidden_states):
+ hidden_states = self.conv(hidden_states)
+ hidden_states = self.padding(hidden_states)
+ hidden_states = self.activation(hidden_states)
+
+ return hidden_states
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2SamePadLayer with Wav2Vec2->SEW
+class SEWSamePadLayer(nn.Module):
+ def __init__(self, num_conv_pos_embeddings):
+ super().__init__()
+ self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0
+
+ def forward(self, hidden_states):
+ if self.num_pad_remove > 0:
+ hidden_states = hidden_states[:, :, : -self.num_pad_remove]
+ return hidden_states
+
+
+class SEWUpsampling(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.projection = nn.Linear(config.hidden_size, config.hidden_size * config.squeeze_factor)
+ self.activation = ACT2FN[config.feat_extract_activation]
+ self.squeeze_factor = config.squeeze_factor
+
+ def forward(self, hidden_states):
+ hidden_states = self.projection(hidden_states)
+ hidden_states = self.activation(hidden_states)
+
+ if self.squeeze_factor > 1:
+ # transform embedding channels to sequence length
+ bsz, src_len, src_embed_dim = hidden_states.size()
+ tgt_len = src_len * self.squeeze_factor
+ tgt_embed_dim = src_embed_dim // self.squeeze_factor
+ hidden_states = hidden_states.reshape(bsz, src_len, self.squeeze_factor, tgt_embed_dim)
+ hidden_states = hidden_states.reshape(bsz, tgt_len, tgt_embed_dim)
+
+ return hidden_states
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureEncoder with Wav2Vec2->SEW
+class SEWFeatureEncoder(nn.Module):
+ """Construct the features from raw audio waveform"""
+
+ def __init__(self, config):
+ super().__init__()
+
+ if config.feat_extract_norm == "group":
+ conv_layers = [SEWGroupNormConvLayer(config, layer_id=0)] + [
+ SEWNoLayerNormConvLayer(config, layer_id=i + 1) for i in range(config.num_feat_extract_layers - 1)
+ ]
+ elif config.feat_extract_norm == "layer":
+ conv_layers = [SEWLayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers)]
+ else:
+ raise ValueError(
+ f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']"
+ )
+ self.conv_layers = nn.ModuleList(conv_layers)
+ self.gradient_checkpointing = False
+ self._requires_grad = True
+
+ def _freeze_parameters(self):
+ for param in self.parameters():
+ param.requires_grad = False
+ self._requires_grad = False
+
+ def forward(self, input_values):
+ hidden_states = input_values[:, None]
+
+ # make sure hidden_states require grad for gradient_checkpointing
+ if self._requires_grad and self.training:
+ hidden_states.requires_grad = True
+
+ for conv_layer in self.conv_layers:
+ if self._requires_grad and self.gradient_checkpointing and self.training:
+ hidden_states = self._gradient_checkpointing_func(
+ conv_layer.__call__,
+ hidden_states,
+ )
+ else:
+ hidden_states = conv_layer(hidden_states)
+
+ return hidden_states
+
+
+class SEWFeatureExtractor(SEWFeatureEncoder):
+ def __init__(self, config):
+ super().__init__(config)
+ warnings.warn(
+ f"The class `{self.__class__.__name__}` has been depreciated "
+ "and will be removed in Transformers v5. "
+ f"Use `{self.__class__.__bases__[0].__name__}` instead.",
+ FutureWarning,
+ )
+
+
+# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->SEW
+class SEWAttention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(
+ self,
+ embed_dim: int,
+ num_heads: int,
+ dropout: float = 0.0,
+ is_decoder: bool = False,
+ bias: bool = True,
+ is_causal: bool = False,
+ config: Optional[SEWConfig] = None,
+ ):
+ super().__init__()
+ self.embed_dim = embed_dim
+ self.num_heads = num_heads
+ self.dropout = dropout
+ self.head_dim = embed_dim // num_heads
+ self.config = config
+
+ if (self.head_dim * num_heads) != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
+ f" and `num_heads`: {num_heads})."
+ )
+ self.scaling = self.head_dim**-0.5
+ self.is_decoder = is_decoder
+ self.is_causal = is_causal
+
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ key_value_states: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ layer_head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ """Input shape: Batch x Time x Channel"""
+
+ # if key_value_states are provided this layer is used as a cross-attention layer
+ # for the decoder
+ is_cross_attention = key_value_states is not None
+
+ bsz, tgt_len, _ = hidden_states.size()
+
+ # get query proj
+ query_states = self.q_proj(hidden_states) * self.scaling
+ # get key, value proj
+ # `past_key_value[0].shape[2] == key_value_states.shape[1]`
+ # is checking that the `sequence_length` of the `past_key_value` is the same as
+ # the provided `key_value_states` to support prefix tuning
+ if (
+ is_cross_attention
+ and past_key_value is not None
+ and past_key_value[0].shape[2] == key_value_states.shape[1]
+ ):
+ # reuse k,v, cross_attentions
+ key_states = past_key_value[0]
+ value_states = past_key_value[1]
+ elif is_cross_attention:
+ # cross_attentions
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
+ elif past_key_value is not None:
+ # reuse k, v, self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
+ else:
+ # self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+
+ if self.is_decoder:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_states, value_states)
+
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
+ key_states = key_states.reshape(*proj_shape)
+ value_states = value_states.reshape(*proj_shape)
+
+ src_len = key_states.size(1)
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
+
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
+ raise ValueError(
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
+ f" {attn_weights.size()}"
+ )
+
+ if attention_mask is not None:
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
+ raise ValueError(
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
+ )
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
+
+ if layer_head_mask is not None:
+ if layer_head_mask.size() != (self.num_heads,):
+ raise ValueError(
+ f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
+ f" {layer_head_mask.size()}"
+ )
+ attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ if output_attentions:
+ # this operation is a bit awkward, but it's required to
+ # make sure that attn_weights keeps its gradient.
+ # In order to do so, attn_weights have to be reshaped
+ # twice and have to be reused in the following
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
+ else:
+ attn_weights_reshaped = None
+
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
+
+ attn_output = torch.bmm(attn_probs, value_states)
+
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
+ f" {attn_output.size()}"
+ )
+
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
+ attn_output = attn_output.transpose(1, 2)
+
+ # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
+ # partitioned across GPUs when using tensor-parallelism.
+ attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
+
+ attn_output = self.out_proj(attn_output)
+
+ return attn_output, attn_weights_reshaped, past_key_value
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeedForward with Wav2Vec2->SEW
+class SEWFeedForward(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.intermediate_dropout = nn.Dropout(config.activation_dropout)
+
+ self.intermediate_dense = nn.Linear(config.hidden_size, config.intermediate_size)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ self.output_dense = nn.Linear(config.intermediate_size, config.hidden_size)
+ self.output_dropout = nn.Dropout(config.hidden_dropout)
+
+ def forward(self, hidden_states):
+ hidden_states = self.intermediate_dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ hidden_states = self.intermediate_dropout(hidden_states)
+
+ hidden_states = self.output_dense(hidden_states)
+ hidden_states = self.output_dropout(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2EncoderLayer with Wav2Vec2->SEW
+class SEWEncoderLayer(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.attention = SEWAttention(
+ embed_dim=config.hidden_size,
+ num_heads=config.num_attention_heads,
+ dropout=config.attention_dropout,
+ is_decoder=False,
+ )
+ self.dropout = nn.Dropout(config.hidden_dropout)
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.feed_forward = SEWFeedForward(config)
+ self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ def forward(self, hidden_states, attention_mask=None, output_attentions=False):
+ attn_residual = hidden_states
+ hidden_states, attn_weights, _ = self.attention(
+ hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
+ )
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = attn_residual + hidden_states
+
+ hidden_states = self.layer_norm(hidden_states)
+ hidden_states = hidden_states + self.feed_forward(hidden_states)
+ hidden_states = self.final_layer_norm(hidden_states)
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs
+
+
+class SEWEncoder(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.pos_conv_embed = SEWPositionalConvEmbedding(config)
+ self.pool = nn.AvgPool1d(config.squeeze_factor, config.squeeze_factor)
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout)
+ self.layers = nn.ModuleList([SEWEncoderLayer(config) for _ in range(config.num_hidden_layers)])
+ self.upsample = SEWUpsampling(config)
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ output_attentions=False,
+ output_hidden_states=False,
+ return_dict=True,
+ ):
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+
+ if attention_mask is not None:
+ # make sure padded tokens output 0
+ hidden_states[~attention_mask] = 0.0
+
+ input_lengths = (attention_mask.long()).sum(-1)
+ # apply pooling formula to get real output_lengths
+ output_lengths = input_lengths // self.config.squeeze_factor
+ max_encoder_length = hidden_states.shape[1] // self.config.squeeze_factor
+ attention_ids = (
+ torch.arange(0, max_encoder_length, device=output_lengths.device)
+ .view(1, -1)
+ .expand(output_lengths.shape[0], -1)
+ )
+ attention_mask = (attention_ids < output_lengths.view(-1, 1)).long()
+
+ # extend attention_mask
+ attention_mask = 1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)
+ attention_mask = attention_mask * torch.finfo(hidden_states.dtype).min
+ attention_mask = attention_mask.expand(
+ attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]
+ )
+
+ n_input_timesteps = hidden_states.shape[1]
+
+ hidden_states = hidden_states.transpose(1, 2)
+ position_embeddings = self.pos_conv_embed(hidden_states)
+ pooled_hidden_states = self.pool(hidden_states)
+ min_length = min(position_embeddings.size(-1), pooled_hidden_states.size(-1))
+ hidden_states = pooled_hidden_states[..., :min_length] + position_embeddings[..., :min_length]
+ hidden_states = hidden_states.transpose(1, 2)
+
+ hidden_states = self.layer_norm(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+
+ deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()
+
+ for layer in self.layers:
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ dropout_probability = torch.rand([])
+
+ skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False
+ if not skip_the_layer or deepspeed_zero3_is_enabled:
+ # under deepspeed zero3 all gpus must run in sync
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer.__call__,
+ hidden_states,
+ attention_mask,
+ output_attentions,
+ )
+ else:
+ layer_outputs = layer(
+ hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
+ )
+ hidden_states = layer_outputs[0]
+
+ if skip_the_layer:
+ layer_outputs = (None, None)
+
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ hidden_states = self.upsample(hidden_states)
+ if hidden_states.shape[1] < n_input_timesteps:
+ hidden_states = nn.functional.pad(hidden_states, (0, 0, 0, n_input_timesteps - hidden_states.shape[1]))
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ )
+
+
+class SEWPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = SEWConfig
+ base_model_prefix = "sew"
+ main_input_name = "input_values"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, SEWPositionalConvEmbedding):
+ nn.init.normal_(
+ module.conv.weight,
+ mean=0,
+ std=2 * math.sqrt(1 / (module.conv.kernel_size[0] * module.conv.in_channels)),
+ )
+ nn.init.constant_(module.conv.bias, 0)
+ elif isinstance(module, nn.Linear):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+ elif isinstance(module, nn.Conv1d):
+ if is_deepspeed_zero3_enabled():
+ import deepspeed
+
+ if hasattr(module, "weight_v") and hasattr(module, "weight_g"):
+ with deepspeed.zero.GatheredParameters([module.weight_v, module.weight_g], modifier_rank=0):
+ nn.init.kaiming_normal_(module.weight.data)
+ else:
+ with deepspeed.zero.GatheredParameters(module.weight, modifier_rank=0):
+ nn.init.kaiming_normal_(module.weight.data)
+ else:
+ nn.init.kaiming_normal_(module.weight.data)
+
+ if isinstance(module, (nn.Linear, nn.Conv1d)) and module.bias is not None:
+ module.bias.data.zero_()
+
+ def _get_feat_extract_output_lengths(self, input_lengths: Union[torch.LongTensor, int]):
+ """
+ Computes the output length of the convolutional layers
+ """
+
+ def _conv_out_length(input_length, kernel_size, stride):
+ # 1D convolutional layer output length formula taken
+ # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
+ return torch.div(input_length - kernel_size, stride, rounding_mode="floor") + 1
+
+ for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride):
+ input_lengths = _conv_out_length(input_lengths, kernel_size, stride)
+
+ return input_lengths
+
+ def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor):
+ output_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long)
+ batch_size = attention_mask.shape[0]
+
+ attention_mask = torch.zeros(
+ (batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device
+ )
+ # these two operations makes sure that all values before the output lengths idxs are attended to
+ attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1
+ attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
+ return attention_mask
+
+
+SEW_START_DOCSTRING = r"""
+ SEW was proposed in [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech
+ Recognition](https://arxiv.org/abs/2109.06870) by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger,
+ Yoav Artzi.
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving etc.).
+
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`SEWConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+
+SEW_INPUTS_DOCSTRING = r"""
+ Args:
+ input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
+ Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file
+ into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (`pip install
+ soundfile`). To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and
+ conversion into a tensor of type `torch.FloatTensor`. See [`Wav2Vec2Processor.__call__`] for details.
+ attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0,
+ 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare SEW Model transformer outputting raw hidden-states without any specific head on top.",
+ SEW_START_DOCSTRING,
+)
+class SEWModel(SEWPreTrainedModel):
+ def __init__(self, config: SEWConfig):
+ super().__init__(config)
+ self.config = config
+ self.feature_extractor = SEWFeatureEncoder(config)
+ self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps)
+
+ self.project_features = config.conv_dim[-1] != config.hidden_size
+ if self.project_features:
+ self.feature_projection = nn.Linear(config.conv_dim[-1], config.hidden_size)
+ self.feature_dropout = nn.Dropout(config.feat_proj_dropout)
+
+ if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0:
+ self.masked_spec_embed = nn.Parameter(torch.FloatTensor(config.hidden_size).uniform_())
+
+ self.encoder = SEWEncoder(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model._mask_hidden_states
+ def _mask_hidden_states(
+ self,
+ hidden_states: torch.FloatTensor,
+ mask_time_indices: Optional[torch.FloatTensor] = None,
+ attention_mask: Optional[torch.LongTensor] = None,
+ ):
+ """
+ Masks extracted features along time axis and/or along feature axis according to
+ [SpecAugment](https://arxiv.org/abs/1904.08779).
+ """
+
+ # `config.apply_spec_augment` can set masking to False
+ if not getattr(self.config, "apply_spec_augment", True):
+ return hidden_states
+
+ # generate indices & apply SpecAugment along time axis
+ batch_size, sequence_length, hidden_size = hidden_states.size()
+
+ if mask_time_indices is not None:
+ # apply SpecAugment along time axis with given mask_time_indices
+ hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
+ elif self.config.mask_time_prob > 0 and self.training:
+ mask_time_indices = _compute_mask_indices(
+ (batch_size, sequence_length),
+ mask_prob=self.config.mask_time_prob,
+ mask_length=self.config.mask_time_length,
+ attention_mask=attention_mask,
+ min_masks=self.config.mask_time_min_masks,
+ )
+ mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool)
+ hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
+
+ if self.config.mask_feature_prob > 0 and self.training:
+ # generate indices & apply SpecAugment along feature axis
+ mask_feature_indices = _compute_mask_indices(
+ (batch_size, hidden_size),
+ mask_prob=self.config.mask_feature_prob,
+ mask_length=self.config.mask_feature_length,
+ min_masks=self.config.mask_feature_min_masks,
+ )
+ mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool)
+ mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1)
+ hidden_states[mask_feature_indices] = 0
+
+ return hidden_states
+
+ @add_start_docstrings_to_model_forward(SEW_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ modality="audio",
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
+ )
+ def forward(
+ self,
+ input_values: Optional[torch.Tensor],
+ attention_mask: Optional[torch.Tensor] = None,
+ mask_time_indices: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutput]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ extract_features = self.feature_extractor(input_values)
+ extract_features = extract_features.transpose(1, 2)
+ extract_features = self.layer_norm(extract_features)
+
+ if self.project_features:
+ extract_features = self.feature_projection(extract_features)
+ hidden_states = self.feature_dropout(extract_features)
+
+ if attention_mask is not None:
+ # compute reduced attention_mask corresponding to feature vectors
+ attention_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask)
+
+ hidden_states = self._mask_hidden_states(hidden_states, mask_time_indices=mask_time_indices)
+
+ encoder_outputs = self.encoder(
+ hidden_states,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = encoder_outputs[0]
+
+ if not return_dict:
+ return (hidden_states,) + encoder_outputs[1:]
+
+ return BaseModelOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """SEW Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).""",
+ SEW_START_DOCSTRING,
+)
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForCTC with Wav2Vec2->SEW, wav2vec2->sew, WAV_2_VEC_2->SEW
+class SEWForCTC(SEWPreTrainedModel):
+ def __init__(self, config, target_lang: Optional[str] = None):
+ super().__init__(config)
+
+ self.sew = SEWModel(config)
+ self.dropout = nn.Dropout(config.final_dropout)
+
+ self.target_lang = target_lang
+
+ if config.vocab_size is None:
+ raise ValueError(
+ f"You are trying to instantiate {self.__class__} with a configuration that "
+ "does not define the vocabulary size of the language model head. Please "
+ "instantiate the model as follows: `SEWForCTC.from_pretrained(..., vocab_size=vocab_size)`. "
+ "or define `vocab_size` of your model's configuration."
+ )
+ output_hidden_size = (
+ config.output_hidden_size if hasattr(config, "add_adapter") and config.add_adapter else config.hidden_size
+ )
+ self.lm_head = nn.Linear(output_hidden_size, config.vocab_size)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def tie_weights(self):
+ """
+ This method overwrites [`~PreTrainedModel.tie_weights`] so that adapter weights can be correctly loaded when
+ passing `target_lang=...` to `from_pretrained(...)`.
+
+ This method is **not** supposed to be called by the user and is prone to be changed in the future.
+ """
+
+ # Note that `tie_weights` is usually used to tie input and output embedding weights. The method is re-purposed to
+ # correctly load adapter layers for SEW so that we do not have to introduce a new API to
+ # [`PreTrainedModel`]. While slightly hacky, SEW never has to tie input and output embeddings, so that it is
+ # ok to repurpose this function here.
+ target_lang = self.target_lang
+
+ if target_lang is not None and getattr(self.config, "adapter_attn_dim", None) is None:
+ raise ValueError(f"Cannot pass `target_lang`: {target_lang} if `config.adapter_attn_dim` is not defined.")
+ elif target_lang is None and getattr(self.config, "adapter_attn_dim", None) is not None:
+ logger.info("By default `target_lang` is set to 'eng'.")
+ elif target_lang is not None:
+ self.load_adapter(target_lang, force_load=True)
+
+ def freeze_feature_extractor(self):
+ """
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
+ not be updated during training.
+ """
+ warnings.warn(
+ "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
+ "Please use the equivalent `freeze_feature_encoder` method instead.",
+ FutureWarning,
+ )
+ self.freeze_feature_encoder()
+
+ def freeze_feature_encoder(self):
+ """
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
+ not be updated during training.
+ """
+ self.sew.feature_extractor._freeze_parameters()
+
+ def freeze_base_model(self):
+ """
+ Calling this function will disable the gradient computation for the base model so that its parameters will not
+ be updated during training. Only the classification head will be updated.
+ """
+ for param in self.sew.parameters():
+ param.requires_grad = False
+
+ @add_start_docstrings_to_model_forward(SEW_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=CausalLMOutput,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output=_CTC_EXPECTED_OUTPUT,
+ expected_loss=_CTC_EXPECTED_LOSS,
+ )
+ def forward(
+ self,
+ input_values: Optional[torch.Tensor],
+ attention_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: Optional[torch.Tensor] = None,
+ ) -> Union[Tuple, CausalLMOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*):
+ Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to
+ the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`.
+ All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,
+ config.vocab_size - 1]`.
+ """
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.sew(
+ input_values,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = outputs[0]
+ hidden_states = self.dropout(hidden_states)
+
+ logits = self.lm_head(hidden_states)
+
+ loss = None
+ if labels is not None:
+ if labels.max() >= self.config.vocab_size:
+ raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}")
+
+ # retrieve loss input_lengths from attention_mask
+ attention_mask = (
+ attention_mask if attention_mask is not None else torch.ones_like(input_values, dtype=torch.long)
+ )
+ input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long)
+
+ # assuming that padded tokens are filled with -100
+ # when not being attended to
+ labels_mask = labels >= 0
+ target_lengths = labels_mask.sum(-1)
+ flattened_targets = labels.masked_select(labels_mask)
+
+ # ctc_loss doesn't support fp16
+ log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1)
+
+ with torch.backends.cudnn.flags(enabled=False):
+ loss = nn.functional.ctc_loss(
+ log_probs,
+ flattened_targets,
+ input_lengths,
+ target_lengths,
+ blank=self.config.pad_token_id,
+ reduction=self.config.ctc_loss_reduction,
+ zero_infinity=self.config.ctc_zero_infinity,
+ )
+
+ if not return_dict:
+ output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
+ return ((loss,) + output) if loss is not None else output
+
+ return CausalLMOutput(
+ loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions
+ )
+
+
+@add_start_docstrings(
+ """
+ SEW Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like SUPERB
+ Keyword Spotting.
+ """,
+ SEW_START_DOCSTRING,
+)
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification with Wav2Vec2->SEW, wav2vec2->sew, WAV_2_VEC_2->SEW
+class SEWForSequenceClassification(SEWPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ if hasattr(config, "add_adapter") and config.add_adapter:
+ raise ValueError(
+ "Sequence classification does not support the use of SEW adapters (config.add_adapter=True)"
+ )
+ self.sew = SEWModel(config)
+ num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings
+ if config.use_weighted_layer_sum:
+ self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
+ self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size)
+ self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def freeze_feature_extractor(self):
+ """
+ Calling this function will disable the gradient computation for the feature encoder so that its parameters will
+ not be updated during training.
+ """
+ warnings.warn(
+ "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
+ "Please use the equivalent `freeze_feature_encoder` method instead.",
+ FutureWarning,
+ )
+ self.freeze_feature_encoder()
+
+ def freeze_feature_encoder(self):
+ """
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
+ not be updated during training.
+ """
+ self.sew.feature_extractor._freeze_parameters()
+
+ def freeze_base_model(self):
+ """
+ Calling this function will disable the gradient computation for the base model so that its parameters will not
+ be updated during training. Only the classification head will be updated.
+ """
+ for param in self.sew.parameters():
+ param.requires_grad = False
+
+ @add_start_docstrings_to_model_forward(SEW_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_SEQ_CLASS_CHECKPOINT,
+ output_type=SequenceClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ modality="audio",
+ expected_output=_SEQ_CLASS_EXPECTED_OUTPUT,
+ expected_loss=_SEQ_CLASS_EXPECTED_LOSS,
+ )
+ def forward(
+ self,
+ input_values: Optional[torch.Tensor],
+ attention_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: Optional[torch.Tensor] = None,
+ ) -> Union[Tuple, SequenceClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
+
+ outputs = self.sew(
+ input_values,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ if self.config.use_weighted_layer_sum:
+ hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
+ hidden_states = torch.stack(hidden_states, dim=1)
+ norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
+ hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
+ else:
+ hidden_states = outputs[0]
+
+ hidden_states = self.projector(hidden_states)
+ if attention_mask is None:
+ pooled_output = hidden_states.mean(dim=1)
+ else:
+ padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask)
+ hidden_states[~padding_mask] = 0.0
+ pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1)
+
+ logits = self.classifier(pooled_output)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
+
+ if not return_dict:
+ output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/speech_encoder_decoder/configuration_speech_encoder_decoder.py b/llava_next/lib/python3.10/site-packages/transformers/models/speech_encoder_decoder/configuration_speech_encoder_decoder.py
new file mode 100644
index 0000000000000000000000000000000000000000..378f082e4b9c17727ead6baf1cae7a09d7f5a853
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/transformers/models/speech_encoder_decoder/configuration_speech_encoder_decoder.py
@@ -0,0 +1,108 @@
+# coding=utf-8
+# Copyright 2021 The HuggingFace Inc. team.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+from ..auto.configuration_auto import AutoConfig
+
+
+logger = logging.get_logger(__name__)
+
+
+class SpeechEncoderDecoderConfig(PretrainedConfig):
+ r"""
+ [`SpeechEncoderDecoderConfig`] is the configuration class to store the configuration of a
+ [`SpeechEncoderDecoderModel`]. It is used to instantiate an Encoder Decoder model according to the specified
+ arguments, defining the encoder and decoder configs.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ kwargs (*optional*):
+ Dictionary of keyword arguments. Notably:
+
+ - **encoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines
+ the encoder config.
+ - **decoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines
+ the decoder config.
+
+ Examples:
+
+ ```python
+ >>> from transformers import BertConfig, Wav2Vec2Config, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel
+
+ >>> # Initializing a Wav2Vec2 & BERT style configuration
+ >>> config_encoder = Wav2Vec2Config()
+ >>> config_decoder = BertConfig()
+
+ >>> config = SpeechEncoderDecoderConfig.from_encoder_decoder_configs(config_encoder, config_decoder)
+
+ >>> # Initializing a Wav2Vec2Bert model from a Wav2Vec2 & bert-base-uncased style configurations
+ >>> model = SpeechEncoderDecoderModel(config=config)
+
+ >>> # Accessing the model configuration
+ >>> config_encoder = model.config.encoder
+ >>> config_decoder = model.config.decoder
+ >>> # set decoder config to causal lm
+ >>> config_decoder.is_decoder = True
+ >>> config_decoder.add_cross_attention = True
+
+ >>> # Saving the model, including its configuration
+ >>> model.save_pretrained("my-model")
+
+ >>> # loading model and config from pretrained folder
+ >>> encoder_decoder_config = SpeechEncoderDecoderConfig.from_pretrained("my-model")
+ >>> model = SpeechEncoderDecoderModel.from_pretrained("my-model", config=encoder_decoder_config)
+ ```"""
+
+ model_type = "speech-encoder-decoder"
+ is_composition = True
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+ if "encoder" not in kwargs or "decoder" not in kwargs:
+ raise ValueError(
+ f"A configuraton of type {self.model_type} cannot be instantiated because not both `encoder` and"
+ f" `decoder` sub-configurations are passed, but only {kwargs}"
+ )
+
+ encoder_config = kwargs.pop("encoder")
+ encoder_model_type = encoder_config.pop("model_type")
+ decoder_config = kwargs.pop("decoder")
+ decoder_model_type = decoder_config.pop("model_type")
+
+ self.encoder = AutoConfig.for_model(encoder_model_type, **encoder_config)
+ self.decoder = AutoConfig.for_model(decoder_model_type, **decoder_config)
+ self.is_encoder_decoder = True
+
+ @classmethod
+ def from_encoder_decoder_configs(
+ cls, encoder_config: PretrainedConfig, decoder_config: PretrainedConfig, **kwargs
+ ) -> PretrainedConfig:
+ r"""
+ Instantiate a [`SpeechEncoderDecoderConfig`] (or a derived class) from a pre-trained encoder model
+ configuration and decoder model configuration.
+
+ Returns:
+ [`SpeechEncoderDecoderConfig`]: An instance of a configuration object
+ """
+ logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config")
+ decoder_config.is_decoder = True
+ decoder_config.add_cross_attention = True
+
+ return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **kwargs)
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/speech_encoder_decoder/modeling_speech_encoder_decoder.py b/llava_next/lib/python3.10/site-packages/transformers/models/speech_encoder_decoder/modeling_speech_encoder_decoder.py
new file mode 100644
index 0000000000000000000000000000000000000000..5028e30344ccb8a7fe60042e6034d8cacfe2d794
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/transformers/models/speech_encoder_decoder/modeling_speech_encoder_decoder.py
@@ -0,0 +1,604 @@
+# coding=utf-8
+# Copyright 2021 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Classes to support Speech-Encoder-Text-Decoder architectures"""
+
+
+from typing import Optional, Tuple, Union
+
+import torch
+from torch import nn
+from torch.nn import CrossEntropyLoss
+
+from ...configuration_utils import PretrainedConfig
+from ...modeling_outputs import BaseModelOutput, Seq2SeqLMOutput
+from ...modeling_utils import PreTrainedModel
+from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
+from ..auto.configuration_auto import AutoConfig
+from ..auto.modeling_auto import AutoModel, AutoModelForCausalLM
+from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "SpeechEncoderDecoderConfig"
+
+SPEECH_ENCODER_DECODER_START_DOCSTRING = r"""
+ This class can be used to initialize a speech-sequence-to-text-sequence model with any pretrained speech
+ autoencoding model as the encoder and any pretrained text autoregressive model as the decoder. The encoder is
+ loaded via [`~AutoModel.from_pretrained`] function and the decoder is loaded via
+ [`~AutoModelForCausalLM.from_pretrained`] function. Cross-attention layers are automatically added to the decoder
+ and should be fine-tuned on a downstream generative task, like summarization.
+
+ The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation
+ tasks was shown in [Leveraging Pre-trained Checkpoints for Sequence Generation
+ Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi
+ Zhou, Wei Li, Peter J. Liu.
+
+ Additionally, in [Large-Scale Self- and Semi-Supervised Learning for Speech
+ Translation](https://arxiv.org/abs/2104.06678) it is shown how leveraging large pretrained speech models for speech
+ translation yields a significant performance improvement.
+
+ After such an Speech-Encoder Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other
+ models (see the examples for more information).
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`SpeechEncoderDecoderConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+SPEECH_ENCODER_DECODER_INPUTS_DOCSTRING = r"""
+ Args:
+ inputs (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, feature_dim)`, *optional*):
+ Float values of input raw speech waveform or speech features. Values can be obtained by loading a `.flac`
+ or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile
+ library (`pip install soundfile`). To prepare the array into `inputs`, either the [`Wav2Vec2Processor`] or
+ [`Speech2TextProcessor`] should be used for padding and conversion into a tensor of type
+ `torch.FloatTensor`.
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Indices of decoder input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
+ `past_key_values`).
+
+ For training, `decoder_input_ids` are automatically created by the model by shifting the `labels` to the
+ right, replacing -100 by the `pad_token_id` and prepending them with the `decoder_start_token_id`.
+ decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
+ be used by default.
+ encoder_outputs (`tuple(torch.FloatTensor)`, *optional*):
+ This tuple must consist of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
+ `last_hidden_state` (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`) is a tensor
+ of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the
+ decoder.
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
+ representation. This is useful if you want more control over how to convert `decoder_input_ids` indices
+ into associated vectors than the model's internal embedding lookup matrix.
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss for the decoder. Indices should be in `[-100, 0,
+ ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Float values of input raw speech waveform. Values can be obtained by loading a *.flac* or *.wav* audio file
+ into an array of type *List[float]* or a *numpy.ndarray*, *e.g.* via the soundfile library (*pip install
+ soundfile*). To prepare the array into *input_values*, the [`Wav2Vec2Processor`] should be used for padding
+ and conversion into a tensor of type *torch.FloatTensor*. See [`Wav2Vec2Processor.__call__`] for details.
+ input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, feature_size)`, *optional*):
+ Float values of fbank features extracted from the raw speech waveform. Raw speech waveform can be obtained
+ by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.*
+ via the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the
+ [`Speech2TextFeatureExtractor`] should be used for extracting the fbank features, padding and conversion
+ into a tensor of type `torch.FloatTensor`. See [`~Speech2TextFeatureExtractor.__call__`]
+ return_dict (`bool`, *optional*):
+ If set to `True`, the model will return a [`~utils.Seq2SeqLMOutput`] instead of a plain tuple.
+ kwargs (*optional*): Remaining dictionary of keyword arguments. Keyword arguments come in two flavors:
+
+ - Without a prefix which will be input as `**encoder_kwargs` for the encoder forward function.
+ - With a *decoder_* prefix which will be input as `**decoder_kwargs` for the decoder forward function.
+"""
+
+
+# Copied from transformers.models.encoder_decoder.modeling_encoder_decoder.shift_tokens_right
+def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
+ """
+ Shift input ids one token to the right.
+ """
+ shifted_input_ids = input_ids.new_zeros(input_ids.shape)
+ shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
+ if decoder_start_token_id is None:
+ raise ValueError("Make sure to set the decoder_start_token_id attribute of the model's configuration.")
+ shifted_input_ids[:, 0] = decoder_start_token_id
+
+ if pad_token_id is None:
+ raise ValueError("Make sure to set the pad_token_id attribute of the model's configuration.")
+ # replace possible -100 values in labels by `pad_token_id`
+ shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
+
+ return shifted_input_ids
+
+
+@add_start_docstrings(SPEECH_ENCODER_DECODER_START_DOCSTRING)
+class SpeechEncoderDecoderModel(PreTrainedModel):
+ r"""
+ [`SpeechEncoderDecoderModel`] is a generic model class that will be instantiated as a transformer architecture with
+ one of the base model classes of the library as encoder and another one as decoder when created with the
+ :meth*~transformers.AutoModel.from_pretrained* class method for the encoder and
+ :meth*~transformers.AutoModelForCausalLM.from_pretrained* class method for the decoder.
+ """
+
+ config_class = SpeechEncoderDecoderConfig
+ base_model_prefix = "speech_encoder_decoder"
+ main_input_name = "inputs"
+ supports_gradient_checkpointing = True
+
+ def __init__(
+ self,
+ config: Optional[PretrainedConfig] = None,
+ encoder: Optional[PreTrainedModel] = None,
+ decoder: Optional[PreTrainedModel] = None,
+ ):
+ if config is None and (encoder is None or decoder is None):
+ raise ValueError("Either a configuration or an encoder and a decoder has to be provided.")
+ if config is None:
+ config = SpeechEncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config)
+ else:
+ if not isinstance(config, self.config_class):
+ raise ValueError(f"Config: {config} has to be of type {self.config_class}")
+
+ if config.decoder.cross_attention_hidden_size is not None:
+ if config.decoder.cross_attention_hidden_size != config.encoder.hidden_size:
+ raise ValueError(
+ "If `cross_attention_hidden_size` is specified in the decoder's configuration, it has to be equal"
+ f" to the encoder's `hidden_size`. Got {config.decoder.cross_attention_hidden_size} for"
+ f" `config.decoder.cross_attention_hidden_size` and {config.encoder.hidden_size} for"
+ " `config.encoder.hidden_size`."
+ )
+
+ # initialize with config
+ # make sure input & output embeddings is not tied
+ config.tie_word_embeddings = False
+ super().__init__(config)
+
+ if encoder is None:
+ encoder = AutoModel.from_config(config.encoder)
+
+ if decoder is None:
+ decoder = AutoModelForCausalLM.from_config(config.decoder)
+
+ self.encoder = encoder
+ self.decoder = decoder
+
+ if self.encoder.config.to_dict() != self.config.encoder.to_dict():
+ logger.warning(
+ f"Config of the encoder: {self.encoder.__class__} is overwritten by shared encoder config:"
+ f" {self.config.encoder}"
+ )
+ if self.decoder.config.to_dict() != self.config.decoder.to_dict():
+ logger.warning(
+ f"Config of the decoder: {self.decoder.__class__} is overwritten by shared decoder config:"
+ f" {self.config.decoder}"
+ )
+
+ # make sure that the individual model's config refers to the shared config
+ # so that the updates to the config will be synced
+ self.encoder.config = self.config.encoder
+ self.decoder.config = self.config.decoder
+
+ # get encoder output hidden size
+ self.encoder_output_dim = getattr(config.encoder, "output_hidden_size", config.encoder.hidden_size)
+ if (
+ self.encoder_output_dim != self.decoder.config.hidden_size
+ and self.decoder.config.cross_attention_hidden_size is None
+ ):
+ # encoder outputs might need to be projected to different dimension for decoder
+ self.enc_to_dec_proj = nn.Linear(self.encoder.config.hidden_size, self.decoder.config.hidden_size)
+
+ if self.encoder.get_output_embeddings() is not None:
+ raise ValueError(
+ f"The encoder {self.encoder} should not have a LM Head. Please use a model without LM Head"
+ )
+
+ def get_encoder(self):
+ return self.encoder
+
+ def get_decoder(self):
+ return self.decoder
+
+ def get_output_embeddings(self):
+ return self.decoder.get_output_embeddings()
+
+ def set_output_embeddings(self, new_embeddings):
+ return self.decoder.set_output_embeddings(new_embeddings)
+
+ def freeze_feature_encoder(self):
+ """
+ Calling this function will disable the gradient computation for the feature encoder of the speech encoder so
+ that its parameters will not be updated during training.
+ """
+ self.encoder.freeze_feature_encoder()
+
+ @classmethod
+ def from_pretrained(cls, *args, **kwargs):
+ # At the moment fast initialization is not supported for composite models
+ if kwargs.get("_fast_init", False):
+ logger.warning(
+ "Fast initialization is currently not supported for SpeechEncoderDecoderModel. "
+ "Falling back to slow initialization..."
+ )
+ kwargs["_fast_init"] = False
+ return super().from_pretrained(*args, **kwargs)
+
+ @classmethod
+ def from_encoder_decoder_pretrained(
+ cls,
+ encoder_pretrained_model_name_or_path: str = None,
+ decoder_pretrained_model_name_or_path: str = None,
+ *model_args,
+ **kwargs,
+ ) -> PreTrainedModel:
+ r"""
+ Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model
+ checkpoints.
+
+
+ The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train
+ the model, you need to first set it back in training mode with `model.train()`.
+
+ Params:
+ encoder_pretrained_model_name_or_path (`str`, *optional*):
+ Information necessary to initiate the encoder. Can be either:
+
+ - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
+ Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a
+ user or organization name, like `dbmdz/bert-base-german-cased`.
+ - A path to a *directory* containing model weights saved using
+ [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
+ - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In
+ this case, `from_tf` should be set to `True` and a configuration object should be provided as
+ `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a
+ PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
+
+ decoder_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`):
+ Information necessary to initiate the decoder. Can be either:
+
+ - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
+ Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a
+ user or organization name, like `dbmdz/bert-base-german-cased`.
+ - A path to a *directory* containing model weights saved using
+ [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
+ - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In
+ this case, `from_tf` should be set to `True` and a configuration object should be provided as
+ `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a
+ PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
+
+ model_args (remaining positional arguments, *optional*):
+ All remaning positional arguments will be passed to the underlying model's `__init__` method.
+
+ kwargs (remaining dictionary of keyword arguments, *optional*):
+ Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
+ `output_attentions=True`).
+
+ - To update the encoder configuration, use the prefix *encoder_* for each configuration parameter.
+ - To update the decoder configuration, use the prefix *decoder_* for each configuration parameter.
+ - To update the parent model configuration, do not use a prefix for each configuration parameter.
+
+ Behaves differently depending on whether a `config` is provided or automatically loaded.
+
+ Example:
+
+ ```python
+ >>> from transformers import SpeechEncoderDecoderModel
+
+ >>> # initialize a wav2vec2bert from a pretrained Wav2Vec2 and a pretrained BERT model. Note that the cross-attention layers will be randomly initialized
+ >>> model = SpeechEncoderDecoderModel.from_encoder_decoder_pretrained(
+ ... "facebook/wav2vec2-base-960h", "bert-base-uncased"
+ ... )
+ >>> # saving model after fine-tuning
+ >>> model.save_pretrained("./wav2vec2bert")
+ >>> # load fine-tuned model
+ >>> model = SpeechEncoderDecoderModel.from_pretrained("./wav2vec2bert")
+ ```"""
+
+ kwargs_encoder = {
+ argument[len("encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("encoder_")
+ }
+
+ kwargs_decoder = {
+ argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
+ }
+
+ # remove encoder, decoder kwargs from kwargs
+ for key in kwargs_encoder.keys():
+ del kwargs["encoder_" + key]
+ for key in kwargs_decoder.keys():
+ del kwargs["decoder_" + key]
+
+ # Load and initialize the encoder and decoder
+ # The distinction between encoder and decoder at the model level is made
+ # by the value of the flag `is_decoder` that we need to set correctly.
+ encoder = kwargs_encoder.pop("model", None)
+ if encoder is None:
+ if encoder_pretrained_model_name_or_path is None:
+ raise ValueError(
+ "If `encoder_model` is not defined as an argument, a `encoder_pretrained_model_name_or_path` has "
+ "to be defined."
+ )
+
+ if "config" not in kwargs_encoder:
+ encoder_config, kwargs_encoder = AutoConfig.from_pretrained(
+ encoder_pretrained_model_name_or_path, **kwargs_encoder, return_unused_kwargs=True
+ )
+
+ if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True:
+ logger.info(
+ f"Initializing {encoder_pretrained_model_name_or_path} as a encoder model "
+ "from a decoder model. Cross-attention and casual mask are disabled."
+ )
+ encoder_config.is_decoder = False
+ encoder_config.add_cross_attention = False
+
+ kwargs_encoder["config"] = encoder_config
+
+ encoder = AutoModel.from_pretrained(encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder)
+
+ decoder = kwargs_decoder.pop("model", None)
+ if decoder is None:
+ if decoder_pretrained_model_name_or_path is None:
+ raise ValueError(
+ "If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has "
+ "to be defined."
+ )
+
+ if "config" not in kwargs_decoder:
+ decoder_config, kwargs_decoder = AutoConfig.from_pretrained(
+ decoder_pretrained_model_name_or_path, **kwargs_decoder, return_unused_kwargs=True
+ )
+
+ if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False:
+ logger.info(
+ f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention"
+ f" layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if"
+ f" {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers."
+ )
+ decoder_config.is_decoder = True
+ decoder_config.add_cross_attention = True
+
+ kwargs_decoder["config"] = decoder_config
+
+ if kwargs_decoder["config"].is_decoder is False or kwargs_decoder["config"].add_cross_attention is False:
+ logger.warning(
+ f"Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. "
+ f"In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, "
+ "make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` "
+ "passed to `.from_encoder_decoder_pretrained(...)` are set to `True` or do not pass a "
+ "`decoder_config` to `.from_encoder_decoder_pretrained(...)`"
+ )
+
+ decoder = AutoModelForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder)
+
+ # instantiate config with corresponding kwargs
+ config = SpeechEncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config, **kwargs)
+
+ # make sure input & output embeddings is not tied
+ config.tie_word_embeddings = False
+ return cls(encoder=encoder, decoder=decoder, config=config)
+
+ @add_start_docstrings_to_model_forward(SPEECH_ENCODER_DECODER_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ inputs: Optional[torch.FloatTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
+ encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ input_values: Optional[torch.FloatTensor] = None,
+ input_features: Optional[torch.FloatTensor] = None,
+ return_dict: Optional[bool] = None,
+ **kwargs,
+ ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import SpeechEncoderDecoderModel, AutoProcessor
+ >>> from datasets import load_dataset
+ >>> import torch
+
+ >>> processor = AutoProcessor.from_pretrained("facebook/wav2vec2-xls-r-300m-en-to-15")
+ >>> model = SpeechEncoderDecoderModel.from_pretrained("facebook/wav2vec2-xls-r-300m-en-to-15")
+
+ >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
+
+ >>> input_values = processor(ds[0]["audio"]["array"], return_tensors="pt").input_values
+ >>> # Inference: Translate English speech to German
+ >>> generated = model.generate(input_values)
+ >>> decoded = processor.batch_decode(generated, skip_special_tokens=True)[0]
+ >>> decoded
+ 'Mr. Quilter ist der Apostel der Mittelschicht und wir freuen uns, sein Evangelium willkommen heißen zu können.'
+
+ >>> # Training: Train model on English transcription
+ >>> labels = processor(text=ds[0]["text"], return_tensors="pt").input_ids
+
+ >>> loss = model(input_values, labels=labels).loss
+ >>> loss.backward()
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ kwargs_encoder = {argument: value for argument, value in kwargs.items() if not argument.startswith("decoder_")}
+
+ kwargs_decoder = {
+ argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
+ }
+
+ if encoder_outputs is None:
+ if inputs is None:
+ if input_values is not None and input_features is not None:
+ raise ValueError("You cannot specify both input_values and input_features at the same time")
+ elif input_values is not None:
+ inputs = input_values
+ elif input_features is not None:
+ inputs = input_features
+ else:
+ raise ValueError("You have to specify either input_values or input_features")
+
+ encoder_outputs = self.encoder(
+ inputs,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ **kwargs_encoder,
+ )
+ elif isinstance(encoder_outputs, tuple):
+ encoder_outputs = BaseModelOutput(*encoder_outputs)
+
+ encoder_hidden_states = encoder_outputs[0]
+
+ # optionally project encoder_hidden_states
+ if (
+ self.encoder_output_dim != self.decoder.config.hidden_size
+ and self.decoder.config.cross_attention_hidden_size is None
+ ):
+ encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states)
+
+ # compute correct encoder attention mask
+ if attention_mask is not None:
+ encoder_attention_mask = self.encoder._get_feature_vector_attention_mask(
+ encoder_hidden_states.shape[1], attention_mask
+ )
+ else:
+ encoder_attention_mask = None
+
+ if (labels is not None) and (decoder_input_ids is None and decoder_inputs_embeds is None):
+ decoder_input_ids = shift_tokens_right(
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
+ )
+
+ # Decode
+ decoder_outputs = self.decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ inputs_embeds=decoder_inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ use_cache=use_cache,
+ past_key_values=past_key_values,
+ return_dict=return_dict,
+ **kwargs_decoder,
+ )
+
+ # Compute loss independent from decoder (as some shift the logits inside them)
+ loss = None
+ if labels is not None:
+ logits = decoder_outputs.logits if return_dict else decoder_outputs[0]
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.reshape(-1, self.decoder.config.vocab_size), labels.reshape(-1))
+
+ if not return_dict:
+ if loss is not None:
+ return (loss,) + decoder_outputs + encoder_outputs
+ else:
+ return decoder_outputs + encoder_outputs
+
+ return Seq2SeqLMOutput(
+ loss=loss,
+ logits=decoder_outputs.logits,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_hidden_states,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+ def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
+ return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
+
+ def prepare_inputs_for_generation(
+ self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, encoder_outputs=None, **kwargs
+ ):
+ decoder_inputs = self.decoder.prepare_inputs_for_generation(input_ids, past_key_values=past_key_values)
+ decoder_attention_mask = decoder_inputs["attention_mask"] if "attention_mask" in decoder_inputs else None
+ input_dict = {
+ "attention_mask": attention_mask,
+ "decoder_attention_mask": decoder_attention_mask,
+ "decoder_input_ids": decoder_inputs["input_ids"],
+ "encoder_outputs": encoder_outputs,
+ "past_key_values": decoder_inputs["past_key_values"],
+ "use_cache": use_cache,
+ }
+ return input_dict
+
+ def resize_token_embeddings(self, *args, **kwargs):
+ raise NotImplementedError(
+ "Resizing the embedding layers via the SpeechEncoderDecoderModel directly is not supported. Please use the"
+ " respective methods of the wrapped decoder object (model.decoder.resize_token_embeddings(...))"
+ )
+
+ def _reorder_cache(self, past_key_values, beam_idx):
+ # apply decoder cache reordering here
+ return self.decoder._reorder_cache(past_key_values, beam_idx)
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/videomae/__init__.py b/llava_next/lib/python3.10/site-packages/transformers/models/videomae/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..663b6d41aba605b98e97509cd7dbc4b0acf001f7
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/transformers/models/videomae/__init__.py
@@ -0,0 +1,75 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
+
+
+_import_structure = {
+ "configuration_videomae": ["VIDEOMAE_PRETRAINED_CONFIG_ARCHIVE_MAP", "VideoMAEConfig"],
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_videomae"] = [
+ "VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "VideoMAEForPreTraining",
+ "VideoMAEModel",
+ "VideoMAEPreTrainedModel",
+ "VideoMAEForVideoClassification",
+ ]
+
+try:
+ if not is_vision_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["feature_extraction_videomae"] = ["VideoMAEFeatureExtractor"]
+ _import_structure["image_processing_videomae"] = ["VideoMAEImageProcessor"]
+
+if TYPE_CHECKING:
+ from .configuration_videomae import VIDEOMAE_PRETRAINED_CONFIG_ARCHIVE_MAP, VideoMAEConfig
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_videomae import (
+ VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST,
+ VideoMAEForPreTraining,
+ VideoMAEForVideoClassification,
+ VideoMAEModel,
+ VideoMAEPreTrainedModel,
+ )
+
+ try:
+ if not is_vision_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .feature_extraction_videomae import VideoMAEFeatureExtractor
+ from .image_processing_videomae import VideoMAEImageProcessor
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/videomae/__pycache__/__init__.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/videomae/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3f561f40cd12076d9ccf838c8fa98eeca46ba35b
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/videomae/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/videomae/__pycache__/configuration_videomae.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/videomae/__pycache__/configuration_videomae.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0bd3058c14e8bf712f4fada5edebb1fd5202f837
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/videomae/__pycache__/configuration_videomae.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/videomae/__pycache__/convert_videomae_to_pytorch.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/videomae/__pycache__/convert_videomae_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0619fe80c6ffdaf7ab1d15efe16c2707ea98f932
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/videomae/__pycache__/convert_videomae_to_pytorch.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/videomae/__pycache__/feature_extraction_videomae.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/videomae/__pycache__/feature_extraction_videomae.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..11544b7f18a1215b67bad37c0bfec5ff5dce10a6
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/videomae/__pycache__/feature_extraction_videomae.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/videomae/__pycache__/image_processing_videomae.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/videomae/__pycache__/image_processing_videomae.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f9f9aa22f14dba2ab5f4a18e79d728c57a2c0fa7
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/videomae/__pycache__/image_processing_videomae.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/videomae/__pycache__/modeling_videomae.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/videomae/__pycache__/modeling_videomae.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e35e01bb89a22e66f3f72cf92e81eab9061e1d2c
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/videomae/__pycache__/modeling_videomae.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/videomae/configuration_videomae.py b/llava_next/lib/python3.10/site-packages/transformers/models/videomae/configuration_videomae.py
new file mode 100644
index 0000000000000000000000000000000000000000..61bfe1d6a890756c20011a37d12373155ae1b15f
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/transformers/models/videomae/configuration_videomae.py
@@ -0,0 +1,149 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" VideoMAE model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+VIDEOMAE_PRETRAINED_CONFIG_ARCHIVE_MAP = {
+ "MCG-NJU/videomae-base": "https://huggingface.co/MCG-NJU/videomae-base/resolve/main/config.json",
+}
+
+
+class VideoMAEConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`VideoMAEModel`]. It is used to instantiate a
+ VideoMAE model according to the specified arguments, defining the model architecture. Instantiating a configuration
+ with the defaults will yield a similar configuration to that of the VideoMAE
+ [MCG-NJU/videomae-base](https://huggingface.co/MCG-NJU/videomae-base) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ image_size (`int`, *optional*, defaults to 224):
+ The size (resolution) of each image.
+ patch_size (`int`, *optional*, defaults to 16):
+ The size (resolution) of each patch.
+ num_channels (`int`, *optional*, defaults to 3):
+ The number of input channels.
+ num_frames (`int`, *optional*, defaults to 16):
+ The number of frames in each video.
+ tubelet_size (`int`, *optional*, defaults to 2):
+ The number of tubelets.
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimensionality of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ intermediate_size (`int`, *optional*, defaults to 3072):
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
+ The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+ qkv_bias (`bool`, *optional*, defaults to `True`):
+ Whether to add a bias to the queries, keys and values.
+ use_mean_pooling (`bool`, *optional*, defaults to `True`):
+ Whether to mean pool the final hidden states instead of using the final hidden state of the [CLS] token.
+ decoder_num_attention_heads (`int`, *optional*, defaults to 6):
+ Number of attention heads for each attention layer in the decoder.
+ decoder_hidden_size (`int`, *optional*, defaults to 384):
+ Dimensionality of the decoder.
+ decoder_num_hidden_layers (`int`, *optional*, defaults to 4):
+ Number of hidden layers in the decoder.
+ decoder_intermediate_size (`int`, *optional*, defaults to 1536):
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the decoder.
+ norm_pix_loss (`bool`, *optional*, defaults to `True`):
+ Whether to normalize the target patch pixels.
+
+ Example:
+
+ ```python
+ >>> from transformers import VideoMAEConfig, VideoMAEModel
+
+ >>> # Initializing a VideoMAE videomae-base style configuration
+ >>> configuration = VideoMAEConfig()
+
+ >>> # Randomly initializing a model from the configuration
+ >>> model = VideoMAEModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "videomae"
+
+ def __init__(
+ self,
+ image_size=224,
+ patch_size=16,
+ num_channels=3,
+ num_frames=16,
+ tubelet_size=2,
+ hidden_size=768,
+ num_hidden_layers=12,
+ num_attention_heads=12,
+ intermediate_size=3072,
+ hidden_act="gelu",
+ hidden_dropout_prob=0.0,
+ attention_probs_dropout_prob=0.0,
+ initializer_range=0.02,
+ layer_norm_eps=1e-12,
+ qkv_bias=True,
+ use_mean_pooling=True,
+ decoder_num_attention_heads=6,
+ decoder_hidden_size=384,
+ decoder_num_hidden_layers=4,
+ decoder_intermediate_size=1536,
+ norm_pix_loss=True,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+
+ self.image_size = image_size
+ self.patch_size = patch_size
+ self.num_channels = num_channels
+ self.num_frames = num_frames
+ self.tubelet_size = tubelet_size
+
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.intermediate_size = intermediate_size
+ self.hidden_act = hidden_act
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+ self.qkv_bias = qkv_bias
+ self.use_mean_pooling = use_mean_pooling
+
+ self.decoder_num_attention_heads = decoder_num_attention_heads
+ self.decoder_hidden_size = decoder_hidden_size
+ self.decoder_num_hidden_layers = decoder_num_hidden_layers
+ self.decoder_intermediate_size = decoder_intermediate_size
+ self.norm_pix_loss = norm_pix_loss
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/videomae/convert_videomae_to_pytorch.py b/llava_next/lib/python3.10/site-packages/transformers/models/videomae/convert_videomae_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..c98160a6bb82bbdc96f164455fee1b1b2c13992a
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/transformers/models/videomae/convert_videomae_to_pytorch.py
@@ -0,0 +1,324 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert VideoMAE checkpoints from the original repository: https://github.com/MCG-NJU/VideoMAE"""
+
+import argparse
+import json
+
+import gdown
+import numpy as np
+import torch
+from huggingface_hub import hf_hub_download
+
+from transformers import (
+ VideoMAEConfig,
+ VideoMAEForPreTraining,
+ VideoMAEForVideoClassification,
+ VideoMAEImageProcessor,
+)
+
+
+def get_videomae_config(model_name):
+ config = VideoMAEConfig()
+
+ set_architecture_configs(model_name, config)
+
+ if "finetuned" not in model_name:
+ config.use_mean_pooling = False
+
+ if "finetuned" in model_name:
+ repo_id = "huggingface/label-files"
+ if "kinetics" in model_name:
+ config.num_labels = 400
+ filename = "kinetics400-id2label.json"
+ elif "ssv2" in model_name:
+ config.num_labels = 174
+ filename = "something-something-v2-id2label.json"
+ else:
+ raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.")
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
+ id2label = {int(k): v for k, v in id2label.items()}
+ config.id2label = id2label
+ config.label2id = {v: k for k, v in id2label.items()}
+
+ return config
+
+
+def set_architecture_configs(model_name, config):
+ if "small" in model_name:
+ config.hidden_size = 384
+ config.intermediate_size = 1536
+ config.num_hidden_layers = 12
+ config.num_attention_heads = 16
+ config.decoder_num_hidden_layers = 12
+ config.decoder_num_attention_heads = 3
+ config.decoder_hidden_size = 192
+ config.decoder_intermediate_size = 768
+ elif "large" in model_name:
+ config.hidden_size = 1024
+ config.intermediate_size = 4096
+ config.num_hidden_layers = 24
+ config.num_attention_heads = 16
+ config.decoder_num_hidden_layers = 12
+ config.decoder_num_attention_heads = 8
+ config.decoder_hidden_size = 512
+ config.decoder_intermediate_size = 2048
+ elif "huge" in model_name:
+ config.hidden_size = 1280
+ config.intermediate_size = 5120
+ config.num_hidden_layers = 32
+ config.num_attention_heads = 16
+ config.decoder_num_hidden_layers = 12
+ config.decoder_num_attention_heads = 8
+ config.decoder_hidden_size = 640
+ config.decoder_intermediate_size = 2560
+ elif "base" not in model_name:
+ raise ValueError('Model name should include either "small", "base", "large", or "huge"')
+
+
+def rename_key(name):
+ if "encoder." in name:
+ name = name.replace("encoder.", "")
+ if "cls_token" in name:
+ name = name.replace("cls_token", "videomae.embeddings.cls_token")
+ if "decoder_pos_embed" in name:
+ name = name.replace("decoder_pos_embed", "decoder.decoder_pos_embed")
+ if "pos_embed" in name and "decoder" not in name:
+ name = name.replace("pos_embed", "videomae.embeddings.position_embeddings")
+ if "patch_embed.proj" in name:
+ name = name.replace("patch_embed.proj", "videomae.embeddings.patch_embeddings.projection")
+ if "patch_embed.norm" in name:
+ name = name.replace("patch_embed.norm", "videomae.embeddings.norm")
+ if "decoder.blocks" in name:
+ name = name.replace("decoder.blocks", "decoder.decoder_layers")
+ if "blocks" in name:
+ name = name.replace("blocks", "videomae.encoder.layer")
+ if "attn.proj" in name:
+ name = name.replace("attn.proj", "attention.output.dense")
+ if "attn" in name and "bias" not in name:
+ name = name.replace("attn", "attention.self")
+ if "attn" in name:
+ name = name.replace("attn", "attention.attention")
+ if "norm1" in name:
+ name = name.replace("norm1", "layernorm_before")
+ if "norm2" in name:
+ name = name.replace("norm2", "layernorm_after")
+ if "mlp.fc1" in name:
+ name = name.replace("mlp.fc1", "intermediate.dense")
+ if "mlp.fc2" in name:
+ name = name.replace("mlp.fc2", "output.dense")
+ if "decoder_embed" in name:
+ name = name.replace("decoder_embed", "decoder.decoder_embed")
+ if "decoder_norm" in name:
+ name = name.replace("decoder_norm", "decoder.decoder_norm")
+ if "decoder_pred" in name:
+ name = name.replace("decoder_pred", "decoder.decoder_pred")
+ if "norm.weight" in name and "decoder" not in name and "fc" not in name:
+ name = name.replace("norm.weight", "videomae.layernorm.weight")
+ if "norm.bias" in name and "decoder" not in name and "fc" not in name:
+ name = name.replace("norm.bias", "videomae.layernorm.bias")
+ if "head" in name and "decoder" not in name:
+ name = name.replace("head", "classifier")
+
+ return name
+
+
+def convert_state_dict(orig_state_dict, config):
+ for key in orig_state_dict.copy().keys():
+ val = orig_state_dict.pop(key)
+
+ if key.startswith("encoder."):
+ key = key.replace("encoder.", "")
+
+ if "qkv" in key:
+ key_split = key.split(".")
+ if key.startswith("decoder.blocks"):
+ dim = config.decoder_hidden_size
+ layer_num = int(key_split[2])
+ prefix = "decoder.decoder_layers."
+ if "weight" in key:
+ orig_state_dict[f"{prefix}{layer_num}.attention.attention.query.weight"] = val[:dim, :]
+ orig_state_dict[f"{prefix}{layer_num}.attention.attention.key.weight"] = val[dim : dim * 2, :]
+ orig_state_dict[f"{prefix}{layer_num}.attention.attention.value.weight"] = val[-dim:, :]
+ else:
+ dim = config.hidden_size
+ layer_num = int(key_split[1])
+ prefix = "videomae.encoder.layer."
+ if "weight" in key:
+ orig_state_dict[f"{prefix}{layer_num}.attention.attention.query.weight"] = val[:dim, :]
+ orig_state_dict[f"{prefix}{layer_num}.attention.attention.key.weight"] = val[dim : dim * 2, :]
+ orig_state_dict[f"{prefix}{layer_num}.attention.attention.value.weight"] = val[-dim:, :]
+ else:
+ orig_state_dict[rename_key(key)] = val
+
+ return orig_state_dict
+
+
+# We will verify our results on a video of eating spaghetti
+# Frame indices used: [164 168 172 176 181 185 189 193 198 202 206 210 215 219 223 227]
+def prepare_video():
+ file = hf_hub_download(
+ repo_id="hf-internal-testing/spaghetti-video", filename="eating_spaghetti.npy", repo_type="dataset"
+ )
+ video = np.load(file)
+ return list(video)
+
+
+def convert_videomae_checkpoint(checkpoint_url, pytorch_dump_folder_path, model_name, push_to_hub):
+ config = get_videomae_config(model_name)
+
+ if "finetuned" in model_name:
+ model = VideoMAEForVideoClassification(config)
+ else:
+ model = VideoMAEForPreTraining(config)
+
+ # download original checkpoint, hosted on Google Drive
+ output = "pytorch_model.bin"
+ gdown.cached_download(checkpoint_url, output, quiet=False)
+ files = torch.load(output, map_location="cpu")
+ if "model" in files:
+ state_dict = files["model"]
+ else:
+ state_dict = files["module"]
+ new_state_dict = convert_state_dict(state_dict, config)
+
+ model.load_state_dict(new_state_dict)
+ model.eval()
+
+ # verify model on basic input
+ image_processor = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5])
+ video = prepare_video()
+ inputs = image_processor(video, return_tensors="pt")
+
+ if "finetuned" not in model_name:
+ local_path = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos", filename="bool_masked_pos.pt")
+ inputs["bool_masked_pos"] = torch.load(local_path)
+
+ outputs = model(**inputs)
+ logits = outputs.logits
+
+ model_names = [
+ "videomae-small-finetuned-kinetics",
+ "videomae-small-finetuned-ssv2",
+ # Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
+ "videomae-base-short",
+ "videomae-base-short-finetuned-kinetics",
+ "videomae-base",
+ "videomae-base-finetuned-kinetics",
+ "videomae-large",
+ "videomae-large-finetuned-kinetics",
+ "videomae-huge-finetuned-kinetics",
+ # Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
+ "videomae-base-short-ssv2",
+ "videomae-base-short-finetuned-ssv2",
+ "videomae-base-ssv2",
+ "videomae-base-finetuned-ssv2",
+ ]
+
+ # NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
+ if model_name == "videomae-small-finetuned-kinetics":
+ expected_shape = torch.Size([1, 400])
+ expected_slice = torch.tensor([-0.9291, -0.4061, -0.9307])
+ elif model_name == "videomae-small-finetuned-ssv2":
+ expected_shape = torch.Size([1, 174])
+ expected_slice = torch.tensor([0.2671, -0.4689, -0.8235])
+ elif model_name == "videomae-base":
+ expected_shape = torch.Size([1, 1408, 1536])
+ expected_slice = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]])
+ elif model_name == "videomae-base-short":
+ expected_shape = torch.Size([1, 1408, 1536])
+ expected_slice = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]])
+ # we verified the loss both for normalized and unnormalized targets for this one
+ expected_loss = torch.tensor([0.5142]) if config.norm_pix_loss else torch.tensor([0.6469])
+ elif model_name == "videomae-large":
+ expected_shape = torch.Size([1, 1408, 1536])
+ expected_slice = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]])
+ elif model_name == "videomae-large-finetuned-kinetics":
+ expected_shape = torch.Size([1, 400])
+ expected_slice = torch.tensor([0.0771, 0.0011, -0.3625])
+ elif model_name == "videomae-huge-finetuned-kinetics":
+ expected_shape = torch.Size([1, 400])
+ expected_slice = torch.tensor([0.2433, 0.1632, -0.4894])
+ elif model_name == "videomae-base-short-finetuned-kinetics":
+ expected_shape = torch.Size([1, 400])
+ expected_slice = torch.tensor([0.6588, 0.0990, -0.2493])
+ elif model_name == "videomae-base-finetuned-kinetics":
+ expected_shape = torch.Size([1, 400])
+ expected_slice = torch.tensor([0.3669, -0.0688, -0.2421])
+ elif model_name == "videomae-base-short-ssv2":
+ expected_shape = torch.Size([1, 1408, 1536])
+ expected_slice = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]])
+ elif model_name == "videomae-base-short-finetuned-ssv2":
+ expected_shape = torch.Size([1, 174])
+ expected_slice = torch.tensor([-0.0537, -0.1539, -0.3266])
+ elif model_name == "videomae-base-ssv2":
+ expected_shape = torch.Size([1, 1408, 1536])
+ expected_slice = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]])
+ elif model_name == "videomae-base-finetuned-ssv2":
+ expected_shape = torch.Size([1, 174])
+ expected_slice = torch.tensor([0.1961, -0.8337, -0.6389])
+ else:
+ raise ValueError(f"Model name not supported. Should be one of {model_names}")
+
+ # verify logits
+ assert logits.shape == expected_shape
+ if "finetuned" in model_name:
+ assert torch.allclose(logits[0, :3], expected_slice, atol=1e-4)
+ else:
+ print("Logits:", logits[0, :3, :3])
+ assert torch.allclose(logits[0, :3, :3], expected_slice, atol=1e-4)
+ print("Logits ok!")
+
+ # verify loss, if applicable
+ if model_name == "videomae-base-short":
+ loss = outputs.loss
+ assert torch.allclose(loss, expected_loss, atol=1e-4)
+ print("Loss ok!")
+
+ if pytorch_dump_folder_path is not None:
+ print(f"Saving model and image processor to {pytorch_dump_folder_path}")
+ image_processor.save_pretrained(pytorch_dump_folder_path)
+ model.save_pretrained(pytorch_dump_folder_path)
+
+ if push_to_hub:
+ print("Pushing to the hub...")
+ model.push_to_hub(model_name, organization="nielsr")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--checkpoint_url",
+ default="https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4",
+ type=str,
+ help=(
+ "URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"
+ " download link."
+ ),
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path",
+ default="/Users/nielsrogge/Documents/VideoMAE/Test",
+ type=str,
+ help="Path to the output PyTorch model directory.",
+ )
+ parser.add_argument("--model_name", default="videomae-base", type=str, help="Name of the model.")
+ parser.add_argument(
+ "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
+ )
+
+ args = parser.parse_args()
+ convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/videomae/feature_extraction_videomae.py b/llava_next/lib/python3.10/site-packages/transformers/models/videomae/feature_extraction_videomae.py
new file mode 100644
index 0000000000000000000000000000000000000000..4a90d10c9c55e83711a20e29a494782b6b8415f9
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/transformers/models/videomae/feature_extraction_videomae.py
@@ -0,0 +1,33 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Feature extractor class for VideoMAE."""
+
+import warnings
+
+from ...utils import logging
+from .image_processing_videomae import VideoMAEImageProcessor
+
+
+logger = logging.get_logger(__name__)
+
+
+class VideoMAEFeatureExtractor(VideoMAEImageProcessor):
+ def __init__(self, *args, **kwargs) -> None:
+ warnings.warn(
+ "The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
+ " Please use VideoMAEImageProcessor instead.",
+ FutureWarning,
+ )
+ super().__init__(*args, **kwargs)
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/videomae/image_processing_videomae.py b/llava_next/lib/python3.10/site-packages/transformers/models/videomae/image_processing_videomae.py
new file mode 100644
index 0000000000000000000000000000000000000000..6df708eec3ea0459a87a220ca2f282048a2449f8
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/transformers/models/videomae/image_processing_videomae.py
@@ -0,0 +1,343 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Image processor class for VideoMAE."""
+
+from typing import Dict, List, Optional, Union
+
+import numpy as np
+
+from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
+from ...image_transforms import (
+ get_resize_output_image_size,
+ resize,
+ to_channel_dimension_format,
+)
+from ...image_utils import (
+ IMAGENET_STANDARD_MEAN,
+ IMAGENET_STANDARD_STD,
+ ChannelDimension,
+ ImageInput,
+ PILImageResampling,
+ infer_channel_dimension_format,
+ is_scaled_image,
+ is_valid_image,
+ to_numpy_array,
+ valid_images,
+)
+from ...utils import TensorType, is_vision_available, logging
+
+
+if is_vision_available():
+ import PIL
+
+
+logger = logging.get_logger(__name__)
+
+
+def make_batched(videos) -> List[List[ImageInput]]:
+ if isinstance(videos, (list, tuple)) and isinstance(videos[0], (list, tuple)) and is_valid_image(videos[0][0]):
+ return videos
+
+ elif isinstance(videos, (list, tuple)) and is_valid_image(videos[0]):
+ return [videos]
+
+ elif is_valid_image(videos):
+ return [[videos]]
+
+ raise ValueError(f"Could not make batched video from {videos}")
+
+
+class VideoMAEImageProcessor(BaseImageProcessor):
+ r"""
+ Constructs a VideoMAE image processor.
+
+ Args:
+ do_resize (`bool`, *optional*, defaults to `True`):
+ Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
+ `do_resize` parameter in the `preprocess` method.
+ size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`):
+ Size of the output image after resizing. The shortest edge of the image will be resized to
+ `size["shortest_edge"]` while maintaining the aspect ratio of the original image. Can be overriden by
+ `size` in the `preprocess` method.
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
+ Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
+ `preprocess` method.
+ do_center_crop (`bool`, *optional*, defaults to `True`):
+ Whether to center crop the image to the specified `crop_size`. Can be overridden by the `do_center_crop`
+ parameter in the `preprocess` method.
+ crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`):
+ Size of the image after applying the center crop. Can be overridden by the `crop_size` parameter in the
+ `preprocess` method.
+ do_rescale (`bool`, *optional*, defaults to `True`):
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
+ parameter in the `preprocess` method.
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
+ Defines the scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter
+ in the `preprocess` method.
+ do_normalize (`bool`, *optional*, defaults to `True`):
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
+ method.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
+ """
+
+ model_input_names = ["pixel_values"]
+
+ def __init__(
+ self,
+ do_resize: bool = True,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
+ do_center_crop: bool = True,
+ crop_size: Dict[str, int] = None,
+ do_rescale: bool = True,
+ rescale_factor: Union[int, float] = 1 / 255,
+ do_normalize: bool = True,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ **kwargs,
+ ) -> None:
+ super().__init__(**kwargs)
+ size = size if size is not None else {"shortest_edge": 224}
+ size = get_size_dict(size, default_to_square=False)
+ crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
+ crop_size = get_size_dict(crop_size, param_name="crop_size")
+
+ self.do_resize = do_resize
+ self.size = size
+ self.do_center_crop = do_center_crop
+ self.crop_size = crop_size
+ self.resample = resample
+ self.do_rescale = do_rescale
+ self.rescale_factor = rescale_factor
+ self.do_normalize = do_normalize
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
+ self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
+
+ def resize(
+ self,
+ image: np.ndarray,
+ size: Dict[str, int],
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> np.ndarray:
+ """
+ Resize an image.
+
+ Args:
+ image (`np.ndarray`):
+ Image to resize.
+ size (`Dict[str, int]`):
+ Size of the output image. If `size` is of the form `{"height": h, "width": w}`, the output image will
+ have the size `(h, w)`. If `size` is of the form `{"shortest_edge": s}`, the output image will have its
+ shortest edge of length `s` while keeping the aspect ratio of the original image.
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
+ Resampling filter to use when resiizing the image.
+ data_format (`str` or `ChannelDimension`, *optional*):
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
+ input_data_format (`str` or `ChannelDimension`, *optional*):
+ The channel dimension format of the input image. If not provided, it will be inferred.
+ """
+ size = get_size_dict(size, default_to_square=False)
+ if "shortest_edge" in size:
+ output_size = get_resize_output_image_size(
+ image, size["shortest_edge"], default_to_square=False, input_data_format=input_data_format
+ )
+ elif "height" in size and "width" in size:
+ output_size = (size["height"], size["width"])
+ else:
+ raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}")
+ return resize(
+ image,
+ size=output_size,
+ resample=resample,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ **kwargs,
+ )
+
+ def _preprocess_image(
+ self,
+ image: ImageInput,
+ do_resize: bool = None,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = None,
+ do_center_crop: bool = None,
+ crop_size: Dict[str, int] = None,
+ do_rescale: bool = None,
+ rescale_factor: float = None,
+ do_normalize: bool = None,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ ) -> np.ndarray:
+ """Preprocesses a single image."""
+ if do_resize and size is None or resample is None:
+ raise ValueError("Size and resample must be specified if do_resize is True.")
+
+ if do_center_crop and crop_size is None:
+ raise ValueError("Crop size must be specified if do_center_crop is True.")
+
+ if do_rescale and rescale_factor is None:
+ raise ValueError("Rescale factor must be specified if do_rescale is True.")
+
+ if do_normalize and (image_mean is None or image_std is None):
+ raise ValueError("Image mean and std must be specified if do_normalize is True.")
+
+ # All transformations expect numpy arrays.
+ image = to_numpy_array(image)
+
+ if is_scaled_image(image) and do_rescale:
+ logger.warning_once(
+ "It looks like you are trying to rescale already rescaled images. If the input"
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
+ )
+
+ if input_data_format is None:
+ input_data_format = infer_channel_dimension_format(image)
+
+ if do_resize:
+ image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
+
+ if do_center_crop:
+ image = self.center_crop(image, size=crop_size, input_data_format=input_data_format)
+
+ if do_rescale:
+ image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
+
+ if do_normalize:
+ image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
+
+ image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
+ return image
+
+ def preprocess(
+ self,
+ videos: ImageInput,
+ do_resize: bool = None,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = None,
+ do_center_crop: bool = None,
+ crop_size: Dict[str, int] = None,
+ do_rescale: bool = None,
+ rescale_factor: float = None,
+ do_normalize: bool = None,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ data_format: ChannelDimension = ChannelDimension.FIRST,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> PIL.Image.Image:
+ """
+ Preprocess an image or batch of images.
+
+ Args:
+ images (`ImageInput`):
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
+ Whether to resize the image.
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
+ Size of the image after applying resize.
+ resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
+ has an effect if `do_resize` is set to `True`.
+ do_center_crop (`bool`, *optional*, defaults to `self.do_centre_crop`):
+ Whether to centre crop the image.
+ crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
+ Size of the image after applying the centre crop.
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
+ Whether to rescale the image values between [0 - 1].
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
+ Whether to normalize the image.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
+ Image mean.
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
+ Image standard deviation.
+ return_tensors (`str` or `TensorType`, *optional*):
+ The type of tensors to return. Can be one of:
+ - Unset: Return a list of `np.ndarray`.
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
+ The channel dimension format for the output image. Can be one of:
+ - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - Unset: Use the inferred channel dimension format of the input image.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+ """
+ do_resize = do_resize if do_resize is not None else self.do_resize
+ resample = resample if resample is not None else self.resample
+ do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
+ image_mean = image_mean if image_mean is not None else self.image_mean
+ image_std = image_std if image_std is not None else self.image_std
+
+ size = size if size is not None else self.size
+ size = get_size_dict(size, default_to_square=False)
+ crop_size = crop_size if crop_size is not None else self.crop_size
+ crop_size = get_size_dict(crop_size, param_name="crop_size")
+
+ if not valid_images(videos):
+ raise ValueError(
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
+ "torch.Tensor, tf.Tensor or jax.ndarray."
+ )
+
+ videos = make_batched(videos)
+
+ videos = [
+ [
+ self._preprocess_image(
+ image=img,
+ do_resize=do_resize,
+ size=size,
+ resample=resample,
+ do_center_crop=do_center_crop,
+ crop_size=crop_size,
+ do_rescale=do_rescale,
+ rescale_factor=rescale_factor,
+ do_normalize=do_normalize,
+ image_mean=image_mean,
+ image_std=image_std,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ )
+ for img in video
+ ]
+ for video in videos
+ ]
+
+ data = {"pixel_values": videos}
+ return BatchFeature(data=data, tensor_type=return_tensors)
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/videomae/modeling_videomae.py b/llava_next/lib/python3.10/site-packages/transformers/models/videomae/modeling_videomae.py
new file mode 100644
index 0000000000000000000000000000000000000000..aac69b6c536be4b7fd0120479f58ab953f72ab68
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/transformers/models/videomae/modeling_videomae.py
@@ -0,0 +1,1097 @@
+# coding=utf-8
+# Copyright 2022 Multimedia Computing Group, Nanjing University and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch VideoMAE (masked autoencoder) model."""
+
+
+import collections.abc
+import math
+from copy import deepcopy
+from dataclasses import dataclass
+from typing import Optional, Set, Tuple, Union
+
+import numpy as np
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN
+from ...modeling_outputs import BaseModelOutput, ImageClassifierOutput
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
+from ...utils import (
+ ModelOutput,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from ...utils.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
+from .configuration_videomae import VideoMAEConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "VideoMAEConfig"
+_CHECKPOINT_FOR_DOC = "MCG-NJU/videomae-base"
+
+VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST = [
+ "MCG-NJU/videomae-base",
+ # See all VideoMAE models at https://huggingface.co/models?filter=videomae
+]
+
+
+@dataclass
+class VideoMAEDecoderOutput(ModelOutput):
+ """
+ Class for VideoMAEDecoder's outputs, with potential hidden states and attentions.
+
+ Args:
+ logits (`torch.FloatTensor` of shape `(batch_size, patch_size ** 2 * num_channels)`):
+ Pixel reconstruction logits.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
+ plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
+ the self-attention heads.
+ """
+
+ logits: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+@dataclass
+class VideoMAEForPreTrainingOutput(ModelOutput):
+ """
+ Class for VideoMAEForPreTraining's outputs, with potential hidden states and attentions.
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`):
+ Pixel reconstruction loss.
+ logits (`torch.FloatTensor` of shape `(batch_size, patch_size ** 2 * num_channels)`):
+ Pixel reconstruction logits.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
+ plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
+ the self-attention heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+# sin-cos position encoding
+# https://github.com/jadore801120/attention-is-all-you-need-pytorch/blob/master/transformer/Models.py#L31
+def get_sinusoid_encoding_table(n_position, d_hid):
+ """Sinusoid position encoding table"""
+
+ # TODO: make it with torch instead of numpy
+ def get_position_angle_vec(position):
+ return [position / np.power(10000, 2 * (hid_j // 2) / d_hid) for hid_j in range(d_hid)]
+
+ sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(n_position)])
+ sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i
+ sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1
+
+ return torch.FloatTensor(sinusoid_table).unsqueeze(0)
+
+
+class VideoMAEEmbeddings(nn.Module):
+ """
+ Construct the patch and position embeddings.
+
+ """
+
+ def __init__(self, config):
+ super().__init__()
+
+ self.patch_embeddings = VideoMAEPatchEmbeddings(config)
+ self.num_patches = self.patch_embeddings.num_patches
+ # fixed sin-cos embedding
+ self.position_embeddings = get_sinusoid_encoding_table(self.num_patches, config.hidden_size)
+ self.config = config
+
+ def forward(self, pixel_values, bool_masked_pos):
+ # create patch embeddings
+ embeddings = self.patch_embeddings(pixel_values)
+
+ # add position embeddings
+ embeddings = embeddings + self.position_embeddings.type_as(embeddings).to(embeddings.device).clone().detach()
+
+ # only keep visible patches
+ # ~bool_masked_pos means visible
+ if bool_masked_pos is not None:
+ batch_size, _, num_channels = embeddings.shape
+ embeddings = embeddings[~bool_masked_pos]
+ embeddings = embeddings.reshape(batch_size, -1, num_channels)
+
+ return embeddings
+
+
+class VideoMAEPatchEmbeddings(nn.Module):
+ """
+ Video to Patch Embedding. This module turns a batch of videos of shape (batch_size, num_frames, num_channels,
+ height, width) into a tensor of shape (batch_size, seq_len, hidden_size) to be consumed by a Transformer encoder.
+
+ The seq_len (the number of patches) equals (number of frames // tubelet_size) * (height // patch_size) * (width //
+ patch_size).
+
+ """
+
+ def __init__(self, config):
+ super().__init__()
+
+ image_size = config.image_size
+ patch_size = config.patch_size
+ num_channels = config.num_channels
+ hidden_size = config.hidden_size
+ num_frames = config.num_frames
+ tubelet_size = config.tubelet_size
+
+ image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
+ patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
+ self.image_size = image_size
+ self.patch_size = patch_size
+ self.tubelet_size = int(tubelet_size)
+ num_patches = (
+ (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) * (num_frames // self.tubelet_size)
+ )
+ self.num_channels = num_channels
+ self.num_patches = num_patches
+ self.projection = nn.Conv3d(
+ in_channels=num_channels,
+ out_channels=hidden_size,
+ kernel_size=(self.tubelet_size, patch_size[0], patch_size[1]),
+ stride=(self.tubelet_size, patch_size[0], patch_size[1]),
+ )
+
+ def forward(self, pixel_values):
+ batch_size, num_frames, num_channels, height, width = pixel_values.shape
+ if num_channels != self.num_channels:
+ raise ValueError(
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
+ )
+ if height != self.image_size[0] or width != self.image_size[1]:
+ raise ValueError(
+ f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]})."
+ )
+ # permute to (batch_size, num_channels, num_frames, height, width)
+ pixel_values = pixel_values.permute(0, 2, 1, 3, 4)
+ embeddings = self.projection(pixel_values).flatten(2).transpose(1, 2)
+ return embeddings
+
+
+class VideoMAESelfAttention(nn.Module):
+ def __init__(self, config: VideoMAEConfig) -> None:
+ super().__init__()
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
+ raise ValueError(
+ f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
+ f"heads {config.num_attention_heads}."
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=False)
+ self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=False)
+ self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=False)
+
+ if config.qkv_bias:
+ self.q_bias = nn.Parameter(torch.zeros(self.all_head_size))
+ self.v_bias = nn.Parameter(torch.zeros(self.all_head_size))
+ else:
+ self.q_bias = None
+ self.v_bias = None
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def forward(
+ self, hidden_states, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
+ k_bias = torch.zeros_like(self.v_bias, requires_grad=False) if self.q_bias is not None else None
+ keys = nn.functional.linear(input=hidden_states, weight=self.key.weight, bias=k_bias)
+ values = nn.functional.linear(input=hidden_states, weight=self.value.weight, bias=self.v_bias)
+ queries = nn.functional.linear(input=hidden_states, weight=self.query.weight, bias=self.q_bias)
+
+ key_layer = self.transpose_for_scores(keys)
+ value_layer = self.transpose_for_scores(values)
+ query_layer = self.transpose_for_scores(queries)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ context_layer = torch.matmul(attention_probs, value_layer)
+
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(new_context_layer_shape)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ return outputs
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTSelfOutput with ViT->VideoMAE
+class VideoMAESelfOutput(nn.Module):
+ """
+ The residual connection is defined in VideoMAELayer instead of here (as is the case with other models), due to the
+ layernorm applied before each block.
+ """
+
+ def __init__(self, config: VideoMAEConfig) -> None:
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+
+ return hidden_states
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTAttention with ViT->VideoMAE
+class VideoMAEAttention(nn.Module):
+ def __init__(self, config: VideoMAEConfig) -> None:
+ super().__init__()
+ self.attention = VideoMAESelfAttention(config)
+ self.output = VideoMAESelfOutput(config)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads: Set[int]) -> None:
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.attention.query = prune_linear_layer(self.attention.query, index)
+ self.attention.key = prune_linear_layer(self.attention.key, index)
+ self.attention.value = prune_linear_layer(self.attention.value, index)
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
+ self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
+ self_outputs = self.attention(hidden_states, head_mask, output_attentions)
+
+ attention_output = self.output(self_outputs[0], hidden_states)
+
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTIntermediate ViT->VideoMAE
+class VideoMAEIntermediate(nn.Module):
+ def __init__(self, config: VideoMAEConfig) -> None:
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+
+ return hidden_states
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTOutput ViT->VideoMAE
+class VideoMAEOutput(nn.Module):
+ def __init__(self, config: VideoMAEConfig) -> None:
+ super().__init__()
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+
+ hidden_states = hidden_states + input_tensor
+
+ return hidden_states
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTLayer with ViT->VideoMAE
+class VideoMAELayer(nn.Module):
+ """This corresponds to the Block class in the timm implementation."""
+
+ def __init__(self, config: VideoMAEConfig) -> None:
+ super().__init__()
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+ self.seq_len_dim = 1
+ self.attention = VideoMAEAttention(config)
+ self.intermediate = VideoMAEIntermediate(config)
+ self.output = VideoMAEOutput(config)
+ self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
+ self_attention_outputs = self.attention(
+ self.layernorm_before(hidden_states), # in VideoMAE, layernorm is applied before self-attention
+ head_mask,
+ output_attentions=output_attentions,
+ )
+ attention_output = self_attention_outputs[0]
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
+
+ # first residual connection
+ hidden_states = attention_output + hidden_states
+
+ # in VideoMAE, layernorm is also applied after self-attention
+ layer_output = self.layernorm_after(hidden_states)
+ layer_output = self.intermediate(layer_output)
+
+ # second residual connection is done here
+ layer_output = self.output(layer_output, hidden_states)
+
+ outputs = (layer_output,) + outputs
+
+ return outputs
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTEncoder with ViT->VideoMAE
+class VideoMAEEncoder(nn.Module):
+ def __init__(self, config: VideoMAEConfig) -> None:
+ super().__init__()
+ self.config = config
+ self.layer = nn.ModuleList([VideoMAELayer(config) for _ in range(config.num_hidden_layers)])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ) -> Union[tuple, BaseModelOutput]:
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_head_mask = head_mask[i] if head_mask is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.__call__,
+ hidden_states,
+ layer_head_mask,
+ output_attentions,
+ )
+ else:
+ layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions)
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ )
+
+
+class VideoMAEPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = VideoMAEConfig
+ base_model_prefix = "videomae"
+ main_input_name = "pixel_values"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, (nn.Linear, nn.Conv3d)):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+VIDEOMAE_START_DOCSTRING = r"""
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`VideoMAEConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+VIDEOMAE_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
+ [`VideoMAEImageProcessor.__call__`] for details.
+
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare VideoMAE Model transformer outputting raw hidden-states without any specific head on top.",
+ VIDEOMAE_START_DOCSTRING,
+)
+class VideoMAEModel(VideoMAEPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.config = config
+
+ self.embeddings = VideoMAEEmbeddings(config)
+ self.encoder = VideoMAEEncoder(config)
+
+ if config.use_mean_pooling:
+ self.layernorm = None
+ else:
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embeddings.patch_embeddings
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(VIDEOMAE_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ pixel_values: torch.FloatTensor,
+ bool_masked_pos: Optional[torch.BoolTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutput]:
+ r"""
+ bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Each video in the
+ batch must have the same number of masked patches. If `None`, then all patches are considered. Sequence
+ length is `(num_frames // tubelet_size) * (image_size // patch_size) ** 2`.
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> import av
+ >>> import numpy as np
+
+ >>> from transformers import AutoImageProcessor, VideoMAEModel
+ >>> from huggingface_hub import hf_hub_download
+
+ >>> np.random.seed(0)
+
+
+ >>> def read_video_pyav(container, indices):
+ ... '''
+ ... Decode the video with PyAV decoder.
+ ... Args:
+ ... container (`av.container.input.InputContainer`): PyAV container.
+ ... indices (`List[int]`): List of frame indices to decode.
+ ... Returns:
+ ... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
+ ... '''
+ ... frames = []
+ ... container.seek(0)
+ ... start_index = indices[0]
+ ... end_index = indices[-1]
+ ... for i, frame in enumerate(container.decode(video=0)):
+ ... if i > end_index:
+ ... break
+ ... if i >= start_index and i in indices:
+ ... frames.append(frame)
+ ... return np.stack([x.to_ndarray(format="rgb24") for x in frames])
+
+
+ >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
+ ... '''
+ ... Sample a given number of frame indices from the video.
+ ... Args:
+ ... clip_len (`int`): Total number of frames to sample.
+ ... frame_sample_rate (`int`): Sample every n-th frame.
+ ... seg_len (`int`): Maximum allowed index of sample's last frame.
+ ... Returns:
+ ... indices (`List[int]`): List of sampled frame indices
+ ... '''
+ ... converted_len = int(clip_len * frame_sample_rate)
+ ... end_idx = np.random.randint(converted_len, seg_len)
+ ... start_idx = end_idx - converted_len
+ ... indices = np.linspace(start_idx, end_idx, num=clip_len)
+ ... indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
+ ... return indices
+
+
+ >>> # video clip consists of 300 frames (10 seconds at 30 FPS)
+ >>> file_path = hf_hub_download(
+ ... repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
+ ... )
+ >>> container = av.open(file_path)
+
+ >>> # sample 16 frames
+ >>> indices = sample_frame_indices(clip_len=16, frame_sample_rate=1, seg_len=container.streams.video[0].frames)
+ >>> video = read_video_pyav(container, indices)
+
+ >>> image_processor = AutoImageProcessor.from_pretrained("MCG-NJU/videomae-base")
+ >>> model = VideoMAEModel.from_pretrained("MCG-NJU/videomae-base")
+
+ >>> # prepare video for the model
+ >>> inputs = image_processor(list(video), return_tensors="pt")
+
+ >>> # forward pass
+ >>> outputs = model(**inputs)
+ >>> last_hidden_states = outputs.last_hidden_state
+ >>> list(last_hidden_states.shape)
+ [1, 1568, 768]
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
+
+ embedding_output = self.embeddings(pixel_values, bool_masked_pos)
+
+ encoder_outputs = self.encoder(
+ embedding_output,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = encoder_outputs[0]
+ if self.layernorm is not None:
+ sequence_output = self.layernorm(sequence_output)
+
+ if not return_dict:
+ return (sequence_output,) + encoder_outputs[1:]
+
+ return BaseModelOutput(
+ last_hidden_state=sequence_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+
+class VideoMAEDecoder(nn.Module):
+ def __init__(self, config, num_patches):
+ super().__init__()
+
+ decoder_num_labels = config.num_channels * config.tubelet_size * config.patch_size**2
+
+ decoder_config = deepcopy(config)
+ decoder_config.hidden_size = config.decoder_hidden_size
+ decoder_config.num_hidden_layers = config.decoder_num_hidden_layers
+ decoder_config.num_attention_heads = config.decoder_num_attention_heads
+ decoder_config.intermediate_size = config.decoder_intermediate_size
+ self.decoder_layers = nn.ModuleList(
+ [VideoMAELayer(decoder_config) for _ in range(config.decoder_num_hidden_layers)]
+ )
+
+ self.norm = nn.LayerNorm(config.decoder_hidden_size)
+ self.head = (
+ nn.Linear(config.decoder_hidden_size, decoder_num_labels) if decoder_num_labels > 0 else nn.Identity()
+ )
+
+ self.gradient_checkpointing = False
+ self.config = config
+
+ def forward(
+ self,
+ hidden_states,
+ return_token_num,
+ output_attentions=False,
+ output_hidden_states=False,
+ return_dict=True,
+ ):
+ # apply Transformer layers (blocks)
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+ for i, layer_module in enumerate(self.decoder_layers):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.__call__,
+ hidden_states,
+ None,
+ output_attentions,
+ )
+ else:
+ layer_outputs = layer_module(hidden_states, head_mask=None, output_attentions=output_attentions)
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if return_token_num > 0:
+ hidden_states = hidden_states[:, -return_token_num:]
+
+ # predictor projection
+ hidden_states = self.norm(hidden_states)
+ logits = self.head(hidden_states)
+
+ if not return_dict:
+ return tuple(v for v in [logits, all_hidden_states, all_self_attentions] if v is not None)
+ return VideoMAEDecoderOutput(logits=logits, hidden_states=all_hidden_states, attentions=all_self_attentions)
+
+
+@add_start_docstrings(
+ "The VideoMAE Model transformer with the decoder on top for self-supervised pre-training.",
+ VIDEOMAE_START_DOCSTRING,
+)
+class VideoMAEForPreTraining(VideoMAEPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.config = config
+
+ self.videomae = VideoMAEModel(config)
+
+ self.encoder_to_decoder = nn.Linear(config.hidden_size, config.decoder_hidden_size, bias=False)
+ self.mask_token = nn.Parameter(torch.zeros(1, 1, config.decoder_hidden_size))
+ self.position_embeddings = get_sinusoid_encoding_table(
+ self.videomae.embeddings.num_patches, config.decoder_hidden_size
+ )
+
+ self.decoder = VideoMAEDecoder(config, num_patches=self.videomae.embeddings.num_patches)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(VIDEOMAE_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=VideoMAEForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ pixel_values: torch.FloatTensor,
+ bool_masked_pos: torch.BoolTensor,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[tuple, VideoMAEForPreTrainingOutput]:
+ r"""
+ bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, sequence_length)`):
+ Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Each video in the
+ batch must have the same number of masked patches. Sequence length is `(num_frames // tubelet_size) *
+ (image_size // patch_size) ** 2`.
+
+ Returns:
+
+ Examples:
+ ```python
+ >>> from transformers import AutoImageProcessor, VideoMAEForPreTraining
+ >>> import numpy as np
+ >>> import torch
+
+ >>> num_frames = 16
+ >>> video = list(np.random.randint(0, 256, (num_frames, 3, 224, 224)))
+
+ >>> image_processor = AutoImageProcessor.from_pretrained("MCG-NJU/videomae-base")
+ >>> model = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base")
+
+ >>> pixel_values = image_processor(video, return_tensors="pt").pixel_values
+
+ >>> num_patches_per_frame = (model.config.image_size // model.config.patch_size) ** 2
+ >>> seq_length = (num_frames // model.config.tubelet_size) * num_patches_per_frame
+ >>> bool_masked_pos = torch.randint(0, 2, (1, seq_length)).bool()
+
+ >>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos)
+ >>> loss = outputs.loss
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.videomae(
+ pixel_values,
+ bool_masked_pos=bool_masked_pos,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+ sequence_output = self.encoder_to_decoder(
+ sequence_output
+ ) # [batch_size, num_visible_patches, decoder_hidden_size]
+ batch_size, seq_len, num_channels = sequence_output.shape
+
+ # we don't unshuffle the correct visible token order, but shuffle the position embeddings accordingly.
+ if bool_masked_pos is None:
+ raise ValueError("One must provided a boolean mask ")
+ expanded_position_embeddings = self.position_embeddings.expand(batch_size, -1, -1).type_as(pixel_values)
+ expanded_position_embeddings = expanded_position_embeddings.to(pixel_values.device).clone().detach()
+ pos_emb_visible = expanded_position_embeddings[~bool_masked_pos].reshape(batch_size, -1, num_channels)
+ pos_emb_mask = expanded_position_embeddings[bool_masked_pos].reshape(batch_size, -1, num_channels)
+
+ # [batch_size, num_patches, decoder_hidden_size]
+ x_full = torch.cat([sequence_output + pos_emb_visible, self.mask_token + pos_emb_mask], dim=1)
+
+ # [batch_size, num_masked_patches, num_channels * patch_size * patch_size]
+ decoder_outputs = self.decoder(x_full, pos_emb_mask.shape[1])
+ logits = decoder_outputs.logits
+
+ loss = None
+ with torch.no_grad():
+ # calculate the labels to be predicted
+ if self.config.num_channels != 3:
+ # Can't unnormalize with default means/stds
+ frames = pixel_values
+ else:
+ # first, unnormalize the frames
+ device = pixel_values.device
+ dtype = pixel_values.dtype
+ mean = torch.as_tensor(IMAGENET_DEFAULT_MEAN).to(device=device, dtype=dtype)[None, None, :, None, None]
+ std = torch.as_tensor(IMAGENET_DEFAULT_STD).to(device=device, dtype=dtype)[None, None, :, None, None]
+ frames = pixel_values * std + mean # in [0, 1]
+
+ batch_size, time, num_channels, height, width = frames.shape
+ tubelet_size, patch_size = self.config.tubelet_size, self.config.patch_size
+ if self.config.norm_pix_loss:
+ # step 1: split up dimensions (time by tubelet_size, height by patch_size, width by patch_size)
+ frames = frames.view(
+ batch_size,
+ time // tubelet_size,
+ tubelet_size,
+ num_channels,
+ height // patch_size,
+ patch_size,
+ width // patch_size,
+ patch_size,
+ )
+ # step 2: move dimensions to concatenate:
+ frames = frames.permute(0, 1, 4, 6, 2, 5, 7, 3).contiguous()
+ # step 3: concatenate:
+ frames = frames.view(
+ batch_size,
+ time // tubelet_size * height // patch_size * width // patch_size,
+ tubelet_size * patch_size * patch_size,
+ num_channels,
+ )
+ # step 4: normalize. The authors find that the mean is about 0.48 and standard deviation is about 0.08.
+ frames_norm = (frames - frames.mean(dim=-2, keepdim=True)) / (
+ frames.var(dim=-2, unbiased=True, keepdim=True).sqrt() + 1e-6
+ )
+ # step 5: reshape to (batch_size, T//ts * H//ps * W//ps, ts * ps * ps * C)
+ videos_patch = frames_norm.view(
+ batch_size,
+ time // tubelet_size * height // patch_size * width // patch_size,
+ tubelet_size * patch_size * patch_size * num_channels,
+ )
+ else:
+ if self.config.num_channels != 3:
+ raise ValueError(
+ "Can't unnormalize non-RGB images. Consider setting config.norm_pix_loss to False."
+ )
+ # step 1: split up dimensions (time by tubelet_size, height by patch_size, width by patch_size)
+ frames = frames.view(
+ batch_size,
+ time // tubelet_size,
+ tubelet_size,
+ num_channels,
+ height // patch_size,
+ patch_size,
+ width // patch_size,
+ patch_size,
+ )
+ # step 2: move dimensions to concatenate: (batch_size, T//ts, H//ps, W//ps, ts, ps, ps, C)
+ frames = frames.permute(0, 1, 4, 6, 2, 5, 7, 3).contiguous()
+ # step 3: concatenate
+ videos_patch = frames.view(
+ batch_size,
+ time // tubelet_size * height // patch_size * width // patch_size,
+ tubelet_size * patch_size * patch_size * num_channels,
+ )
+
+ batch_size, _, num_channels = videos_patch.shape
+ labels = videos_patch[bool_masked_pos].reshape(batch_size, -1, num_channels)
+
+ loss_fct = MSELoss()
+ loss = loss_fct(logits, labels)
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return VideoMAEForPreTrainingOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """VideoMAE Model transformer with a video classification head on top (a linear layer on top of the average pooled hidden
+ states of all tokens) e.g. for ImageNet.""",
+ VIDEOMAE_START_DOCSTRING,
+)
+class VideoMAEForVideoClassification(VideoMAEPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.num_labels = config.num_labels
+ self.videomae = VideoMAEModel(config)
+
+ # Classifier head
+ self.fc_norm = nn.LayerNorm(config.hidden_size) if config.use_mean_pooling else None
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(VIDEOMAE_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=ImageClassifierOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ pixel_values: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, ImageClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> import av
+ >>> import torch
+ >>> import numpy as np
+
+ >>> from transformers import AutoImageProcessor, VideoMAEForVideoClassification
+ >>> from huggingface_hub import hf_hub_download
+
+ >>> np.random.seed(0)
+
+
+ >>> def read_video_pyav(container, indices):
+ ... '''
+ ... Decode the video with PyAV decoder.
+ ... Args:
+ ... container (`av.container.input.InputContainer`): PyAV container.
+ ... indices (`List[int]`): List of frame indices to decode.
+ ... Returns:
+ ... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
+ ... '''
+ ... frames = []
+ ... container.seek(0)
+ ... start_index = indices[0]
+ ... end_index = indices[-1]
+ ... for i, frame in enumerate(container.decode(video=0)):
+ ... if i > end_index:
+ ... break
+ ... if i >= start_index and i in indices:
+ ... frames.append(frame)
+ ... return np.stack([x.to_ndarray(format="rgb24") for x in frames])
+
+
+ >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
+ ... '''
+ ... Sample a given number of frame indices from the video.
+ ... Args:
+ ... clip_len (`int`): Total number of frames to sample.
+ ... frame_sample_rate (`int`): Sample every n-th frame.
+ ... seg_len (`int`): Maximum allowed index of sample's last frame.
+ ... Returns:
+ ... indices (`List[int]`): List of sampled frame indices
+ ... '''
+ ... converted_len = int(clip_len * frame_sample_rate)
+ ... end_idx = np.random.randint(converted_len, seg_len)
+ ... start_idx = end_idx - converted_len
+ ... indices = np.linspace(start_idx, end_idx, num=clip_len)
+ ... indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
+ ... return indices
+
+
+ >>> # video clip consists of 300 frames (10 seconds at 30 FPS)
+ >>> file_path = hf_hub_download(
+ ... repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
+ ... )
+ >>> container = av.open(file_path)
+
+ >>> # sample 16 frames
+ >>> indices = sample_frame_indices(clip_len=16, frame_sample_rate=1, seg_len=container.streams.video[0].frames)
+ >>> video = read_video_pyav(container, indices)
+
+ >>> image_processor = AutoImageProcessor.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics")
+ >>> model = VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics")
+
+ >>> inputs = image_processor(list(video), return_tensors="pt")
+
+ >>> with torch.no_grad():
+ ... outputs = model(**inputs)
+ ... logits = outputs.logits
+
+ >>> # model predicts one of the 400 Kinetics-400 classes
+ >>> predicted_label = logits.argmax(-1).item()
+ >>> print(model.config.id2label[predicted_label])
+ eating spaghetti
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.videomae(
+ pixel_values,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ if self.fc_norm is not None:
+ sequence_output = self.fc_norm(sequence_output.mean(1))
+ else:
+ sequence_output = sequence_output[:, 0]
+
+ logits = self.classifier(sequence_output)
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return ImageClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/xlm_roberta/__init__.py b/llava_next/lib/python3.10/site-packages/transformers/models/xlm_roberta/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..813cba9fe17c1df2f3cef3d2a523fd93f99348f0
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/transformers/models/xlm_roberta/__init__.py
@@ -0,0 +1,186 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_flax_available,
+ is_sentencepiece_available,
+ is_tf_available,
+ is_tokenizers_available,
+ is_torch_available,
+)
+
+
+_import_structure = {
+ "configuration_xlm_roberta": [
+ "XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP",
+ "XLMRobertaConfig",
+ "XLMRobertaOnnxConfig",
+ ],
+}
+
+try:
+ if not is_sentencepiece_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_xlm_roberta"] = ["XLMRobertaTokenizer"]
+
+try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_xlm_roberta_fast"] = ["XLMRobertaTokenizerFast"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_xlm_roberta"] = [
+ "XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "XLMRobertaForCausalLM",
+ "XLMRobertaForMaskedLM",
+ "XLMRobertaForMultipleChoice",
+ "XLMRobertaForQuestionAnswering",
+ "XLMRobertaForSequenceClassification",
+ "XLMRobertaForTokenClassification",
+ "XLMRobertaModel",
+ "XLMRobertaPreTrainedModel",
+ ]
+
+try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_tf_xlm_roberta"] = [
+ "TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "TFXLMRobertaForCausalLM",
+ "TFXLMRobertaForMaskedLM",
+ "TFXLMRobertaForMultipleChoice",
+ "TFXLMRobertaForQuestionAnswering",
+ "TFXLMRobertaForSequenceClassification",
+ "TFXLMRobertaForTokenClassification",
+ "TFXLMRobertaModel",
+ "TFXLMRobertaPreTrainedModel",
+ ]
+
+try:
+ if not is_flax_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_flax_xlm_roberta"] = [
+ "FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "FlaxXLMRobertaForMaskedLM",
+ "FlaxXLMRobertaForCausalLM",
+ "FlaxXLMRobertaForMultipleChoice",
+ "FlaxXLMRobertaForQuestionAnswering",
+ "FlaxXLMRobertaForSequenceClassification",
+ "FlaxXLMRobertaForTokenClassification",
+ "FlaxXLMRobertaModel",
+ "FlaxXLMRobertaPreTrainedModel",
+ ]
+
+if TYPE_CHECKING:
+ from .configuration_xlm_roberta import (
+ XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
+ XLMRobertaConfig,
+ XLMRobertaOnnxConfig,
+ )
+
+ try:
+ if not is_sentencepiece_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_xlm_roberta import XLMRobertaTokenizer
+
+ try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_xlm_roberta import (
+ XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
+ XLMRobertaForCausalLM,
+ XLMRobertaForMaskedLM,
+ XLMRobertaForMultipleChoice,
+ XLMRobertaForQuestionAnswering,
+ XLMRobertaForSequenceClassification,
+ XLMRobertaForTokenClassification,
+ XLMRobertaModel,
+ XLMRobertaPreTrainedModel,
+ )
+
+ try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_tf_xlm_roberta import (
+ TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
+ TFXLMRobertaForCausalLM,
+ TFXLMRobertaForMaskedLM,
+ TFXLMRobertaForMultipleChoice,
+ TFXLMRobertaForQuestionAnswering,
+ TFXLMRobertaForSequenceClassification,
+ TFXLMRobertaForTokenClassification,
+ TFXLMRobertaModel,
+ TFXLMRobertaPreTrainedModel,
+ )
+
+ try:
+ if not is_flax_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_flax_xlm_roberta import (
+ FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
+ FlaxXLMRobertaForCausalLM,
+ FlaxXLMRobertaForMaskedLM,
+ FlaxXLMRobertaForMultipleChoice,
+ FlaxXLMRobertaForQuestionAnswering,
+ FlaxXLMRobertaForSequenceClassification,
+ FlaxXLMRobertaForTokenClassification,
+ FlaxXLMRobertaModel,
+ FlaxXLMRobertaPreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/xlm_roberta/__pycache__/__init__.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/xlm_roberta/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..87c67702cbbf07cd187916bfa358b8c3138774f2
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/xlm_roberta/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/xlm_roberta/__pycache__/configuration_xlm_roberta.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/xlm_roberta/__pycache__/configuration_xlm_roberta.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f0d630f4487cf0b4c6d57ca95d4ff4ddca1d1685
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/xlm_roberta/__pycache__/configuration_xlm_roberta.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/xlm_roberta/__pycache__/modeling_flax_xlm_roberta.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/xlm_roberta/__pycache__/modeling_flax_xlm_roberta.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8893fc17ef1b3f7e7b7bb5ebf9e431114ea4a496
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/xlm_roberta/__pycache__/modeling_flax_xlm_roberta.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/xlm_roberta/__pycache__/modeling_tf_xlm_roberta.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/xlm_roberta/__pycache__/modeling_tf_xlm_roberta.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..99201853f85f408c355a7f9620fd0a29935e6f4a
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/xlm_roberta/__pycache__/modeling_tf_xlm_roberta.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/xlm_roberta/__pycache__/modeling_xlm_roberta.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/xlm_roberta/__pycache__/modeling_xlm_roberta.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..324267539e53424b76c9843961ec0b24caec577e
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/xlm_roberta/__pycache__/modeling_xlm_roberta.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/xlm_roberta/__pycache__/tokenization_xlm_roberta.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/xlm_roberta/__pycache__/tokenization_xlm_roberta.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6796f6be9beb1a1e77e258a512c57e00646cd3a4
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/xlm_roberta/__pycache__/tokenization_xlm_roberta.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/xlm_roberta/__pycache__/tokenization_xlm_roberta_fast.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/transformers/models/xlm_roberta/__pycache__/tokenization_xlm_roberta_fast.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..693cfc8b7c8e1a749a2094773a1a40710abdc6c8
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/transformers/models/xlm_roberta/__pycache__/tokenization_xlm_roberta_fast.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/xlm_roberta/configuration_xlm_roberta.py b/llava_next/lib/python3.10/site-packages/transformers/models/xlm_roberta/configuration_xlm_roberta.py
new file mode 100644
index 0000000000000000000000000000000000000000..517b751f4220031463f86c08bbd3e2005d79922c
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/transformers/models/xlm_roberta/configuration_xlm_roberta.py
@@ -0,0 +1,170 @@
+# coding=utf-8
+# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" XLM-RoBERTa configuration"""
+from collections import OrderedDict
+from typing import Mapping
+
+from ...configuration_utils import PretrainedConfig
+from ...onnx import OnnxConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP = {
+ "xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/config.json",
+ "xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/config.json",
+ "xlm-roberta-large-finetuned-conll02-dutch": (
+ "https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"
+ ),
+ "xlm-roberta-large-finetuned-conll02-spanish": (
+ "https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"
+ ),
+ "xlm-roberta-large-finetuned-conll03-english": (
+ "https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"
+ ),
+ "xlm-roberta-large-finetuned-conll03-german": (
+ "https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"
+ ),
+}
+
+
+class XLMRobertaConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`XLMRobertaModel`] or a [`TFXLMRobertaModel`]. It
+ is used to instantiate a XLM-RoBERTa model according to the specified arguments, defining the model architecture.
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the XLMRoBERTa
+ [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 30522):
+ Vocabulary size of the XLM-RoBERTa model. Defines the number of different tokens that can be represented by
+ the `inputs_ids` passed when calling [`XLMRobertaModel`] or [`TFXLMRobertaModel`].
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimensionality of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ intermediate_size (`int`, *optional*, defaults to 3072):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention probabilities.
+ max_position_embeddings (`int`, *optional*, defaults to 512):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ type_vocab_size (`int`, *optional*, defaults to 2):
+ The vocabulary size of the `token_type_ids` passed when calling [`XLMRobertaModel`] or
+ [`TFXLMRobertaModel`].
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
+ is_decoder (`bool`, *optional*, defaults to `False`):
+ Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
+ relevant if `config.is_decoder=True`.
+ classifier_dropout (`float`, *optional*):
+ The dropout ratio for the classification head.
+
+ Examples:
+
+ ```python
+ >>> from transformers import XLMRobertaConfig, XLMRobertaModel
+
+ >>> # Initializing a XLM-RoBERTa xlm-roberta-base style configuration
+ >>> configuration = XLMRobertaConfig()
+
+ >>> # Initializing a model (with random weights) from the xlm-roberta-base style configuration
+ >>> model = XLMRobertaModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "xlm-roberta"
+
+ def __init__(
+ self,
+ vocab_size=30522,
+ hidden_size=768,
+ num_hidden_layers=12,
+ num_attention_heads=12,
+ intermediate_size=3072,
+ hidden_act="gelu",
+ hidden_dropout_prob=0.1,
+ attention_probs_dropout_prob=0.1,
+ max_position_embeddings=512,
+ type_vocab_size=2,
+ initializer_range=0.02,
+ layer_norm_eps=1e-12,
+ pad_token_id=1,
+ bos_token_id=0,
+ eos_token_id=2,
+ position_embedding_type="absolute",
+ use_cache=True,
+ classifier_dropout=None,
+ **kwargs,
+ ):
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
+
+ self.vocab_size = vocab_size
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.hidden_act = hidden_act
+ self.intermediate_size = intermediate_size
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.max_position_embeddings = max_position_embeddings
+ self.type_vocab_size = type_vocab_size
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+ self.position_embedding_type = position_embedding_type
+ self.use_cache = use_cache
+ self.classifier_dropout = classifier_dropout
+
+
+# Copied from transformers.models.roberta.configuration_roberta.RobertaOnnxConfig with Roberta->XLMRoberta
+class XLMRobertaOnnxConfig(OnnxConfig):
+ @property
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
+ if self.task == "multiple-choice":
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
+ else:
+ dynamic_axis = {0: "batch", 1: "sequence"}
+ return OrderedDict(
+ [
+ ("input_ids", dynamic_axis),
+ ("attention_mask", dynamic_axis),
+ ]
+ )
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py b/llava_next/lib/python3.10/site-packages/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py
new file mode 100644
index 0000000000000000000000000000000000000000..e8247b3f28de3904ed7903905a96f6aa41095993
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py
@@ -0,0 +1,1505 @@
+# coding=utf-8
+# Copyright 2022 Facebook AI Research and the HuggingFace Inc. team.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Flax XLM-RoBERTa model."""
+
+from typing import Callable, Optional, Tuple
+
+import flax.linen as nn
+import jax
+import jax.numpy as jnp
+import numpy as np
+from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
+from flax.linen import combine_masks, make_causal_mask
+from flax.linen import partitioning as nn_partitioning
+from flax.linen.attention import dot_product_attention_weights
+from flax.traverse_util import flatten_dict, unflatten_dict
+from jax import lax
+
+from ...modeling_flax_outputs import (
+ FlaxBaseModelOutputWithPastAndCrossAttentions,
+ FlaxBaseModelOutputWithPooling,
+ FlaxBaseModelOutputWithPoolingAndCrossAttentions,
+ FlaxCausalLMOutputWithCrossAttentions,
+ FlaxMaskedLMOutput,
+ FlaxMultipleChoiceModelOutput,
+ FlaxQuestionAnsweringModelOutput,
+ FlaxSequenceClassifierOutput,
+ FlaxTokenClassifierOutput,
+)
+from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring, overwrite_call_docstring
+from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging
+from .configuration_xlm_roberta import XLMRobertaConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "xlm-roberta-base"
+_CONFIG_FOR_DOC = "XLMRobertaConfig"
+
+remat = nn_partitioning.remat
+
+FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = [
+ "xlm-roberta-base",
+ "xlm-roberta-large",
+ # See all XLM-RoBERTa models at https://huggingface.co/models?filter=xlm-roberta
+]
+
+
+# Copied from transformers.models.roberta.modeling_flax_roberta.create_position_ids_from_input_ids
+def create_position_ids_from_input_ids(input_ids, padding_idx):
+ """
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
+ are ignored. This is modified from fairseq's `utils.make_positions`.
+
+ Args:
+ input_ids: jnp.ndarray
+ padding_idx: int
+
+ Returns: jnp.ndarray
+ """
+ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
+ mask = (input_ids != padding_idx).astype("i4")
+
+ if mask.ndim > 2:
+ mask = mask.reshape((-1, mask.shape[-1]))
+ incremental_indices = jnp.cumsum(mask, axis=1).astype("i4") * mask
+ incremental_indices = incremental_indices.reshape(input_ids.shape)
+ else:
+ incremental_indices = jnp.cumsum(mask, axis=1).astype("i4") * mask
+
+ return incremental_indices.astype("i4") + padding_idx
+
+
+XLM_ROBERTA_START_DOCSTRING = r"""
+
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading, saving and converting weights from PyTorch models)
+
+ This model is also a
+ [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as
+ a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and
+ behavior.
+
+ Finally, this model supports inherent JAX features such as:
+
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
+
+ Parameters:
+ config ([`XLMRobertaConfig`]): Model configuration class with all the parameters of the
+ model. Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+XLM_ROBERTA_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`numpy.ndarray` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`numpy.ndarray` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ token_type_ids (`numpy.ndarray` of shape `({0})`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ position_ids (`numpy.ndarray` of shape `({0})`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+ head_mask (`numpy.ndarray` of shape `({0})`, `optional):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertEmbeddings with Bert->XLMRoberta
+class FlaxXLMRobertaEmbeddings(nn.Module):
+ """Construct the embeddings from word, position and token_type embeddings."""
+
+ config: XLMRobertaConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.word_embeddings = nn.Embed(
+ self.config.vocab_size,
+ self.config.hidden_size,
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ dtype=self.dtype,
+ )
+ self.position_embeddings = nn.Embed(
+ self.config.max_position_embeddings,
+ self.config.hidden_size,
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ dtype=self.dtype,
+ )
+ self.token_type_embeddings = nn.Embed(
+ self.config.type_vocab_size,
+ self.config.hidden_size,
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ dtype=self.dtype,
+ )
+ self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
+
+ def __call__(self, input_ids, token_type_ids, position_ids, attention_mask, deterministic: bool = True):
+ # Embed
+ inputs_embeds = self.word_embeddings(input_ids.astype("i4"))
+ position_embeds = self.position_embeddings(position_ids.astype("i4"))
+ token_type_embeddings = self.token_type_embeddings(token_type_ids.astype("i4"))
+
+ # Sum all embeddings
+ hidden_states = inputs_embeds + token_type_embeddings + position_embeds
+
+ # Layer Norm
+ hidden_states = self.LayerNorm(hidden_states)
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertSelfAttention with Bert->XLMRoberta
+class FlaxXLMRobertaSelfAttention(nn.Module):
+ config: XLMRobertaConfig
+ causal: bool = False
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.head_dim = self.config.hidden_size // self.config.num_attention_heads
+ if self.config.hidden_size % self.config.num_attention_heads != 0:
+ raise ValueError(
+ "`config.hidden_size`: {self.config.hidden_size} has to be a multiple of `config.num_attention_heads` "
+ " : {self.config.num_attention_heads}"
+ )
+
+ self.query = nn.Dense(
+ self.config.hidden_size,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
+ )
+ self.key = nn.Dense(
+ self.config.hidden_size,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
+ )
+ self.value = nn.Dense(
+ self.config.hidden_size,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
+ )
+
+ if self.causal:
+ self.causal_mask = make_causal_mask(
+ jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool"
+ )
+
+ def _split_heads(self, hidden_states):
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.config.num_attention_heads, self.head_dim))
+
+ def _merge_heads(self, hidden_states):
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.config.hidden_size,))
+
+ @nn.compact
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartAttention._concatenate_to_cache
+ def _concatenate_to_cache(self, key, value, query, attention_mask):
+ """
+ This function takes projected key, value states from a single input token and concatenates the states to cached
+ states from previous steps. This function is slighly adapted from the official Flax repository:
+ https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
+ """
+ # detect if we're initializing by absence of existing cache data.
+ is_initialized = self.has_variable("cache", "cached_key")
+ cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
+ cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
+ cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
+
+ if is_initialized:
+ *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
+ # update key, value caches with our new 1d spatial slices
+ cur_index = cache_index.value
+ indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
+ key = lax.dynamic_update_slice(cached_key.value, key, indices)
+ value = lax.dynamic_update_slice(cached_value.value, value, indices)
+ cached_key.value = key
+ cached_value.value = value
+ num_updated_cache_vectors = query.shape[1]
+ cache_index.value = cache_index.value + num_updated_cache_vectors
+ # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
+ pad_mask = jnp.broadcast_to(
+ jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
+ tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
+ )
+ attention_mask = combine_masks(pad_mask, attention_mask)
+ return key, value, attention_mask
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ key_value_states: Optional[jnp.ndarray] = None,
+ init_cache: bool = False,
+ deterministic=True,
+ output_attentions: bool = False,
+ ):
+ # if key_value_states are provided this layer is used as a cross-attention layer
+ # for the decoder
+ is_cross_attention = key_value_states is not None
+ batch_size = hidden_states.shape[0]
+
+ # get query proj
+ query_states = self.query(hidden_states)
+ # get key, value proj
+ if is_cross_attention:
+ # cross_attentions
+ key_states = self.key(key_value_states)
+ value_states = self.value(key_value_states)
+ else:
+ # self_attention
+ key_states = self.key(hidden_states)
+ value_states = self.value(hidden_states)
+
+ query_states = self._split_heads(query_states)
+ key_states = self._split_heads(key_states)
+ value_states = self._split_heads(value_states)
+
+ # handle cache prepare causal attention mask
+ if self.causal:
+ query_length, key_length = query_states.shape[1], key_states.shape[1]
+ if self.has_variable("cache", "cached_key"):
+ mask_shift = self.variables["cache"]["cache_index"]
+ max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
+ causal_mask = lax.dynamic_slice(
+ self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)
+ )
+ else:
+ causal_mask = self.causal_mask[:, :, :query_length, :key_length]
+ causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:])
+
+ # combine masks if needed
+ if attention_mask is not None and self.causal:
+ attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape)
+ attention_mask = combine_masks(attention_mask, causal_mask)
+ elif self.causal:
+ attention_mask = causal_mask
+ elif attention_mask is not None:
+ attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
+
+ # During fast autoregressive decoding, we feed one position at a time,
+ # and cache the keys and values step by step.
+ if self.causal and (self.has_variable("cache", "cached_key") or init_cache):
+ key_states, value_states, attention_mask = self._concatenate_to_cache(
+ key_states, value_states, query_states, attention_mask
+ )
+
+ # Convert the boolean attention mask to an attention bias.
+ if attention_mask is not None:
+ # attention mask in the form of attention bias
+ attention_bias = lax.select(
+ attention_mask > 0,
+ jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
+ jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype),
+ )
+ else:
+ attention_bias = None
+
+ dropout_rng = None
+ if not deterministic and self.config.attention_probs_dropout_prob > 0.0:
+ dropout_rng = self.make_rng("dropout")
+
+ attn_weights = dot_product_attention_weights(
+ query_states,
+ key_states,
+ bias=attention_bias,
+ dropout_rng=dropout_rng,
+ dropout_rate=self.config.attention_probs_dropout_prob,
+ broadcast_dropout=True,
+ deterministic=deterministic,
+ dtype=self.dtype,
+ precision=None,
+ )
+
+ # Mask heads if we want to
+ if layer_head_mask is not None:
+ attn_weights = jnp.einsum("...hqk,h->...hqk", attn_weights, layer_head_mask)
+
+ attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
+ attn_output = attn_output.reshape(attn_output.shape[:2] + (-1,))
+
+ outputs = (attn_output, attn_weights) if output_attentions else (attn_output,)
+ return outputs
+
+
+# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertSelfOutput with Bert->XLMRoberta
+class FlaxXLMRobertaSelfOutput(nn.Module):
+ config: XLMRobertaConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.dense = nn.Dense(
+ self.config.hidden_size,
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
+ dtype=self.dtype,
+ )
+ self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
+
+ def __call__(self, hidden_states, input_tensor, deterministic: bool = True):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertAttention with Bert->XLMRoberta
+class FlaxXLMRobertaAttention(nn.Module):
+ config: XLMRobertaConfig
+ causal: bool = False
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ self.self = FlaxXLMRobertaSelfAttention(self.config, causal=self.causal, dtype=self.dtype)
+ self.output = FlaxXLMRobertaSelfOutput(self.config, dtype=self.dtype)
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ key_value_states=None,
+ init_cache=False,
+ deterministic=True,
+ output_attentions: bool = False,
+ ):
+ # Attention mask comes in as attention_mask.shape == (*batch_sizes, kv_length)
+ # FLAX expects: attention_mask.shape == (*batch_sizes, 1, 1, kv_length) such that it is broadcastable
+ # with attn_weights.shape == (*batch_sizes, num_heads, q_length, kv_length)
+ attn_outputs = self.self(
+ hidden_states,
+ attention_mask,
+ layer_head_mask=layer_head_mask,
+ key_value_states=key_value_states,
+ init_cache=init_cache,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ )
+ attn_output = attn_outputs[0]
+ hidden_states = self.output(attn_output, hidden_states, deterministic=deterministic)
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attn_outputs[1],)
+
+ return outputs
+
+
+# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertIntermediate with Bert->XLMRoberta
+class FlaxXLMRobertaIntermediate(nn.Module):
+ config: XLMRobertaConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.dense = nn.Dense(
+ self.config.intermediate_size,
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
+ dtype=self.dtype,
+ )
+ self.activation = ACT2FN[self.config.hidden_act]
+
+ def __call__(self, hidden_states):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.activation(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertOutput with Bert->XLMRoberta
+class FlaxXLMRobertaOutput(nn.Module):
+ config: XLMRobertaConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.dense = nn.Dense(
+ self.config.hidden_size,
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
+ dtype=self.dtype,
+ )
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
+ self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
+
+ def __call__(self, hidden_states, attention_output, deterministic: bool = True):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
+ hidden_states = self.LayerNorm(hidden_states + attention_output)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertLayer with Bert->XLMRoberta
+class FlaxXLMRobertaLayer(nn.Module):
+ config: XLMRobertaConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.attention = FlaxXLMRobertaAttention(self.config, causal=self.config.is_decoder, dtype=self.dtype)
+ self.intermediate = FlaxXLMRobertaIntermediate(self.config, dtype=self.dtype)
+ self.output = FlaxXLMRobertaOutput(self.config, dtype=self.dtype)
+ if self.config.add_cross_attention:
+ self.crossattention = FlaxXLMRobertaAttention(self.config, causal=False, dtype=self.dtype)
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
+ init_cache: bool = False,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ ):
+ # Self Attention
+ attention_outputs = self.attention(
+ hidden_states,
+ attention_mask,
+ layer_head_mask=layer_head_mask,
+ init_cache=init_cache,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ )
+ attention_output = attention_outputs[0]
+
+ # Cross-Attention Block
+ if encoder_hidden_states is not None:
+ cross_attention_outputs = self.crossattention(
+ attention_output,
+ attention_mask=encoder_attention_mask,
+ layer_head_mask=layer_head_mask,
+ key_value_states=encoder_hidden_states,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ )
+ attention_output = cross_attention_outputs[0]
+
+ hidden_states = self.intermediate(attention_output)
+ hidden_states = self.output(hidden_states, attention_output, deterministic=deterministic)
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attention_outputs[1],)
+ if encoder_hidden_states is not None:
+ outputs += (cross_attention_outputs[1],)
+ return outputs
+
+
+# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertLayerCollection with Bert->XLMRoberta
+class FlaxXLMRobertaLayerCollection(nn.Module):
+ config: XLMRobertaConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+ gradient_checkpointing: bool = False
+
+ def setup(self):
+ if self.gradient_checkpointing:
+ FlaxXLMRobertaCheckpointLayer = remat(FlaxXLMRobertaLayer, static_argnums=(5, 6, 7))
+ self.layers = [
+ FlaxXLMRobertaCheckpointLayer(self.config, name=str(i), dtype=self.dtype)
+ for i in range(self.config.num_hidden_layers)
+ ]
+ else:
+ self.layers = [
+ FlaxXLMRobertaLayer(self.config, name=str(i), dtype=self.dtype)
+ for i in range(self.config.num_hidden_layers)
+ ]
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask,
+ head_mask,
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
+ init_cache: bool = False,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ all_attentions = () if output_attentions else None
+ all_hidden_states = () if output_hidden_states else None
+ all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
+
+ # Check if head_mask has a correct number of layers specified if desired
+ if head_mask is not None:
+ if head_mask.shape[0] != (len(self.layers)):
+ raise ValueError(
+ f"The head_mask should be specified for {len(self.layers)} layers, but it is for "
+ f" {head_mask.shape[0]}."
+ )
+
+ for i, layer in enumerate(self.layers):
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ layer_outputs = layer(
+ hidden_states,
+ attention_mask,
+ head_mask[i] if head_mask is not None else None,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ init_cache,
+ deterministic,
+ output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_attentions += (layer_outputs[1],)
+
+ if encoder_hidden_states is not None:
+ all_cross_attentions += (layer_outputs[2],)
+
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ outputs = (hidden_states, all_hidden_states, all_attentions, all_cross_attentions)
+
+ if not return_dict:
+ return tuple(v for v in outputs if v is not None)
+
+ return FlaxBaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_attentions,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertEncoder with Bert->XLMRoberta
+class FlaxXLMRobertaEncoder(nn.Module):
+ config: XLMRobertaConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+ gradient_checkpointing: bool = False
+
+ def setup(self):
+ self.layer = FlaxXLMRobertaLayerCollection(
+ self.config,
+ dtype=self.dtype,
+ gradient_checkpointing=self.gradient_checkpointing,
+ )
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask,
+ head_mask,
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
+ init_cache: bool = False,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ return self.layer(
+ hidden_states,
+ attention_mask,
+ head_mask=head_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ init_cache=init_cache,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+
+# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertPooler with Bert->XLMRoberta
+class FlaxXLMRobertaPooler(nn.Module):
+ config: XLMRobertaConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.dense = nn.Dense(
+ self.config.hidden_size,
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
+ dtype=self.dtype,
+ )
+
+ def __call__(self, hidden_states):
+ cls_hidden_state = hidden_states[:, 0]
+ cls_hidden_state = self.dense(cls_hidden_state)
+ return nn.tanh(cls_hidden_state)
+
+
+# Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaLMHead with Roberta->XLMRoberta
+class FlaxXLMRobertaLMHead(nn.Module):
+ config: XLMRobertaConfig
+ dtype: jnp.dtype = jnp.float32
+ bias_init: Callable[..., np.ndarray] = jax.nn.initializers.zeros
+
+ def setup(self):
+ self.dense = nn.Dense(
+ self.config.hidden_size,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
+ )
+ self.layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
+ self.decoder = nn.Dense(
+ self.config.vocab_size,
+ dtype=self.dtype,
+ use_bias=False,
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
+ )
+ self.bias = self.param("bias", self.bias_init, (self.config.vocab_size,))
+
+ def __call__(self, hidden_states, shared_embedding=None):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = ACT2FN["gelu"](hidden_states)
+ hidden_states = self.layer_norm(hidden_states)
+
+ if shared_embedding is not None:
+ hidden_states = self.decoder.apply({"params": {"kernel": shared_embedding.T}}, hidden_states)
+ else:
+ hidden_states = self.decoder(hidden_states)
+
+ bias = jnp.asarray(self.bias, self.dtype)
+ hidden_states += bias
+ return hidden_states
+
+
+# Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaClassificationHead with Roberta->XLMRoberta
+class FlaxXLMRobertaClassificationHead(nn.Module):
+ config: XLMRobertaConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ self.dense = nn.Dense(
+ self.config.hidden_size,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
+ )
+ classifier_dropout = (
+ self.config.classifier_dropout
+ if self.config.classifier_dropout is not None
+ else self.config.hidden_dropout_prob
+ )
+ self.dropout = nn.Dropout(rate=classifier_dropout)
+ self.out_proj = nn.Dense(
+ self.config.num_labels,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
+ )
+
+ def __call__(self, hidden_states, deterministic=True):
+ hidden_states = hidden_states[:, 0, :] # take token (equiv. to [CLS])
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
+ hidden_states = self.dense(hidden_states)
+ hidden_states = nn.tanh(hidden_states)
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
+ hidden_states = self.out_proj(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaPreTrainedModel with Roberta->XLMRoberta, roberta->xlm-roberta, ROBERTA->XLM_ROBERTA
+class FlaxXLMRobertaPreTrainedModel(FlaxPreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = XLMRobertaConfig
+ base_model_prefix = "xlm-roberta"
+
+ module_class: nn.Module = None
+
+ def __init__(
+ self,
+ config: XLMRobertaConfig,
+ input_shape: Tuple = (1, 1),
+ seed: int = 0,
+ dtype: jnp.dtype = jnp.float32,
+ _do_init: bool = True,
+ gradient_checkpointing: bool = False,
+ **kwargs,
+ ):
+ module = self.module_class(config=config, dtype=dtype, gradient_checkpointing=gradient_checkpointing, **kwargs)
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
+
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertPreTrainedModel.enable_gradient_checkpointing
+ def enable_gradient_checkpointing(self):
+ self._module = self.module_class(
+ config=self.config,
+ dtype=self.dtype,
+ gradient_checkpointing=True,
+ )
+
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
+ # init input tensors
+ input_ids = jnp.zeros(input_shape, dtype="i4")
+ token_type_ids = jnp.ones_like(input_ids)
+ position_ids = create_position_ids_from_input_ids(input_ids, self.config.pad_token_id)
+ attention_mask = jnp.ones_like(input_ids)
+ head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads))
+
+ params_rng, dropout_rng = jax.random.split(rng)
+ rngs = {"params": params_rng, "dropout": dropout_rng}
+
+ if self.config.add_cross_attention:
+ encoder_hidden_states = jnp.zeros(input_shape + (self.config.hidden_size,))
+ encoder_attention_mask = attention_mask
+ module_init_outputs = self.module.init(
+ rngs,
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ position_ids,
+ head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ return_dict=False,
+ )
+ else:
+ module_init_outputs = self.module.init(
+ rngs, input_ids, attention_mask, token_type_ids, position_ids, head_mask, return_dict=False
+ )
+
+ random_params = module_init_outputs["params"]
+
+ if params is not None:
+ random_params = flatten_dict(unfreeze(random_params))
+ params = flatten_dict(unfreeze(params))
+ for missing_key in self._missing_keys:
+ params[missing_key] = random_params[missing_key]
+ self._missing_keys = set()
+ return freeze(unflatten_dict(params))
+ else:
+ return random_params
+
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartDecoderPreTrainedModel.init_cache
+ def init_cache(self, batch_size, max_length):
+ r"""
+ Args:
+ batch_size (`int`):
+ batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
+ max_length (`int`):
+ maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
+ cache.
+ """
+ # init input variables to retrieve cache
+ input_ids = jnp.ones((batch_size, max_length), dtype="i4")
+ attention_mask = jnp.ones_like(input_ids, dtype="i4")
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
+
+ init_variables = self.module.init(
+ jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True
+ )
+ return unfreeze(init_variables["cache"])
+
+ @add_start_docstrings_to_model_forward(XLM_ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ def __call__(
+ self,
+ input_ids,
+ attention_mask=None,
+ token_type_ids=None,
+ position_ids=None,
+ head_mask=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ params: dict = None,
+ dropout_rng: jax.random.PRNGKey = None,
+ train: bool = False,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ past_key_values: dict = None,
+ ):
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ # init input tensors if not passed
+ if token_type_ids is None:
+ token_type_ids = jnp.zeros_like(input_ids)
+
+ if position_ids is None:
+ position_ids = create_position_ids_from_input_ids(input_ids, self.config.pad_token_id)
+
+ if attention_mask is None:
+ attention_mask = jnp.ones_like(input_ids)
+
+ if head_mask is None:
+ head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads))
+
+ # Handle any PRNG if needed
+ rngs = {}
+ if dropout_rng is not None:
+ rngs["dropout"] = dropout_rng
+
+ inputs = {"params": params or self.params}
+
+ if self.config.add_cross_attention:
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed
+ # down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be
+ # changed by FlaxXLMRobertaAttention module
+ if past_key_values:
+ inputs["cache"] = past_key_values
+ mutable = ["cache"]
+ else:
+ mutable = False
+
+ outputs = self.module.apply(
+ inputs,
+ jnp.array(input_ids, dtype="i4"),
+ jnp.array(attention_mask, dtype="i4"),
+ token_type_ids=jnp.array(token_type_ids, dtype="i4"),
+ position_ids=jnp.array(position_ids, dtype="i4"),
+ head_mask=jnp.array(head_mask, dtype="i4"),
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ deterministic=not train,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ rngs=rngs,
+ mutable=mutable,
+ )
+
+ # add updated cache to model output
+ if past_key_values is not None and return_dict:
+ outputs, past_key_values = outputs
+ outputs["past_key_values"] = unfreeze(past_key_values["cache"])
+ return outputs
+ elif past_key_values is not None and not return_dict:
+ outputs, past_key_values = outputs
+ outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:]
+
+ else:
+ outputs = self.module.apply(
+ inputs,
+ jnp.array(input_ids, dtype="i4"),
+ jnp.array(attention_mask, dtype="i4"),
+ token_type_ids=jnp.array(token_type_ids, dtype="i4"),
+ position_ids=jnp.array(position_ids, dtype="i4"),
+ head_mask=jnp.array(head_mask, dtype="i4"),
+ deterministic=not train,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ rngs=rngs,
+ )
+
+ return outputs
+
+
+# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertModule with Bert->XLMRoberta
+class FlaxXLMRobertaModule(nn.Module):
+ config: XLMRobertaConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+ add_pooling_layer: bool = True
+ gradient_checkpointing: bool = False
+
+ def setup(self):
+ self.embeddings = FlaxXLMRobertaEmbeddings(self.config, dtype=self.dtype)
+ self.encoder = FlaxXLMRobertaEncoder(
+ self.config,
+ dtype=self.dtype,
+ gradient_checkpointing=self.gradient_checkpointing,
+ )
+ self.pooler = FlaxXLMRobertaPooler(self.config, dtype=self.dtype)
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ token_type_ids: Optional[jnp.ndarray] = None,
+ position_ids: Optional[jnp.ndarray] = None,
+ head_mask: Optional[jnp.ndarray] = None,
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
+ init_cache: bool = False,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ # make sure `token_type_ids` is correctly initialized when not passed
+ if token_type_ids is None:
+ token_type_ids = jnp.zeros_like(input_ids)
+
+ # make sure `position_ids` is correctly initialized when not passed
+ if position_ids is None:
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
+
+ hidden_states = self.embeddings(
+ input_ids, token_type_ids, position_ids, attention_mask, deterministic=deterministic
+ )
+ outputs = self.encoder(
+ hidden_states,
+ attention_mask,
+ head_mask=head_mask,
+ deterministic=deterministic,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ init_cache=init_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_states = outputs[0]
+ pooled = self.pooler(hidden_states) if self.add_pooling_layer else None
+
+ if not return_dict:
+ # if pooled is None, don't return it
+ if pooled is None:
+ return (hidden_states,) + outputs[1:]
+ return (hidden_states, pooled) + outputs[1:]
+
+ return FlaxBaseModelOutputWithPoolingAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ pooler_output=pooled,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ cross_attentions=outputs.cross_attentions,
+ )
+
+
+@add_start_docstrings(
+ "The bare XLM RoBERTa Model transformer outputting raw hidden-states without any specific head on top.",
+ XLM_ROBERTA_START_DOCSTRING,
+)
+class FlaxXLMRobertaModel(FlaxXLMRobertaPreTrainedModel):
+ module_class = FlaxXLMRobertaModule
+
+
+append_call_sample_docstring(FlaxXLMRobertaModel, _CHECKPOINT_FOR_DOC, FlaxBaseModelOutputWithPooling, _CONFIG_FOR_DOC)
+
+
+# Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaForMaskedLMModule with Roberta->XLMRoberta
+class FlaxXLMRobertaForMaskedLMModule(nn.Module):
+ config: XLMRobertaConfig
+ dtype: jnp.dtype = jnp.float32
+ gradient_checkpointing: bool = False
+
+ def setup(self):
+ self.roberta = FlaxXLMRobertaModule(
+ config=self.config,
+ add_pooling_layer=False,
+ dtype=self.dtype,
+ gradient_checkpointing=self.gradient_checkpointing,
+ )
+ self.lm_head = FlaxXLMRobertaLMHead(config=self.config, dtype=self.dtype)
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ position_ids,
+ head_mask,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ # Model
+ outputs = self.roberta(
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ position_ids,
+ head_mask,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = outputs[0]
+ if self.config.tie_word_embeddings:
+ shared_embedding = self.roberta.variables["params"]["embeddings"]["word_embeddings"]["embedding"]
+ else:
+ shared_embedding = None
+
+ # Compute the prediction scores
+ logits = self.lm_head(hidden_states, shared_embedding=shared_embedding)
+
+ if not return_dict:
+ return (logits,) + outputs[1:]
+
+ return FlaxMaskedLMOutput(
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings("""XLM RoBERTa Model with a `language modeling` head on top.""", XLM_ROBERTA_START_DOCSTRING)
+class FlaxXLMRobertaForMaskedLM(FlaxXLMRobertaPreTrainedModel):
+ module_class = FlaxXLMRobertaForMaskedLMModule
+
+
+append_call_sample_docstring(
+ FlaxXLMRobertaForMaskedLM,
+ _CHECKPOINT_FOR_DOC,
+ FlaxBaseModelOutputWithPooling,
+ _CONFIG_FOR_DOC,
+ mask="",
+)
+
+
+# Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaForSequenceClassificationModule with Roberta->XLMRoberta
+class FlaxXLMRobertaForSequenceClassificationModule(nn.Module):
+ config: XLMRobertaConfig
+ dtype: jnp.dtype = jnp.float32
+ gradient_checkpointing: bool = False
+
+ def setup(self):
+ self.roberta = FlaxXLMRobertaModule(
+ config=self.config,
+ dtype=self.dtype,
+ add_pooling_layer=False,
+ gradient_checkpointing=self.gradient_checkpointing,
+ )
+ self.classifier = FlaxXLMRobertaClassificationHead(config=self.config, dtype=self.dtype)
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ position_ids,
+ head_mask,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ # Model
+ outputs = self.roberta(
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ position_ids,
+ head_mask,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+ logits = self.classifier(sequence_output, deterministic=deterministic)
+
+ if not return_dict:
+ return (logits,) + outputs[1:]
+
+ return FlaxSequenceClassifierOutput(
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ XLM Roberta Model transformer with a sequence classification/regression head on top (a linear layer on top of the
+ pooled output) e.g. for GLUE tasks.
+ """,
+ XLM_ROBERTA_START_DOCSTRING,
+)
+class FlaxXLMRobertaForSequenceClassification(FlaxXLMRobertaPreTrainedModel):
+ module_class = FlaxXLMRobertaForSequenceClassificationModule
+
+
+append_call_sample_docstring(
+ FlaxXLMRobertaForSequenceClassification,
+ _CHECKPOINT_FOR_DOC,
+ FlaxSequenceClassifierOutput,
+ _CONFIG_FOR_DOC,
+)
+
+
+# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertForMultipleChoiceModule with Bert->XLMRoberta, with self.bert->self.roberta
+class FlaxXLMRobertaForMultipleChoiceModule(nn.Module):
+ config: XLMRobertaConfig
+ dtype: jnp.dtype = jnp.float32
+ gradient_checkpointing: bool = False
+
+ def setup(self):
+ self.roberta = FlaxXLMRobertaModule(
+ config=self.config,
+ dtype=self.dtype,
+ gradient_checkpointing=self.gradient_checkpointing,
+ )
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
+ self.classifier = nn.Dense(1, dtype=self.dtype)
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ position_ids,
+ head_mask,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ num_choices = input_ids.shape[1]
+ input_ids = input_ids.reshape(-1, input_ids.shape[-1]) if input_ids is not None else None
+ attention_mask = attention_mask.reshape(-1, attention_mask.shape[-1]) if attention_mask is not None else None
+ token_type_ids = token_type_ids.reshape(-1, token_type_ids.shape[-1]) if token_type_ids is not None else None
+ position_ids = position_ids.reshape(-1, position_ids.shape[-1]) if position_ids is not None else None
+
+ # Model
+ outputs = self.roberta(
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ position_ids,
+ head_mask,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ pooled_output = outputs[1]
+ pooled_output = self.dropout(pooled_output, deterministic=deterministic)
+ logits = self.classifier(pooled_output)
+
+ reshaped_logits = logits.reshape(-1, num_choices)
+
+ if not return_dict:
+ return (reshaped_logits,) + outputs[2:]
+
+ return FlaxMultipleChoiceModelOutput(
+ logits=reshaped_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ XLM Roberta Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
+ a softmax) e.g. for RocStories/SWAG tasks.
+ """,
+ XLM_ROBERTA_START_DOCSTRING,
+)
+class FlaxXLMRobertaForMultipleChoice(FlaxXLMRobertaPreTrainedModel):
+ module_class = FlaxXLMRobertaForMultipleChoiceModule
+
+
+overwrite_call_docstring(
+ FlaxXLMRobertaForMultipleChoice, XLM_ROBERTA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
+)
+append_call_sample_docstring(
+ FlaxXLMRobertaForMultipleChoice,
+ _CHECKPOINT_FOR_DOC,
+ FlaxMultipleChoiceModelOutput,
+ _CONFIG_FOR_DOC,
+)
+
+
+# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertForTokenClassificationModule with Bert->XLMRoberta, with self.bert->self.roberta
+class FlaxXLMRobertaForTokenClassificationModule(nn.Module):
+ config: XLMRobertaConfig
+ dtype: jnp.dtype = jnp.float32
+ gradient_checkpointing: bool = False
+
+ def setup(self):
+ self.roberta = FlaxXLMRobertaModule(
+ config=self.config,
+ dtype=self.dtype,
+ add_pooling_layer=False,
+ gradient_checkpointing=self.gradient_checkpointing,
+ )
+ classifier_dropout = (
+ self.config.classifier_dropout
+ if self.config.classifier_dropout is not None
+ else self.config.hidden_dropout_prob
+ )
+ self.dropout = nn.Dropout(rate=classifier_dropout)
+ self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype)
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ position_ids,
+ head_mask,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ # Model
+ outputs = self.roberta(
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ position_ids,
+ head_mask,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = outputs[0]
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
+ logits = self.classifier(hidden_states)
+
+ if not return_dict:
+ return (logits,) + outputs[1:]
+
+ return FlaxTokenClassifierOutput(
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ XLM Roberta Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
+ for Named-Entity-Recognition (NER) tasks.
+ """,
+ XLM_ROBERTA_START_DOCSTRING,
+)
+class FlaxXLMRobertaForTokenClassification(FlaxXLMRobertaPreTrainedModel):
+ module_class = FlaxXLMRobertaForTokenClassificationModule
+
+
+append_call_sample_docstring(
+ FlaxXLMRobertaForTokenClassification,
+ _CHECKPOINT_FOR_DOC,
+ FlaxTokenClassifierOutput,
+ _CONFIG_FOR_DOC,
+)
+
+
+# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertForQuestionAnsweringModule with Bert->XLMRoberta, with self.bert->self.roberta
+class FlaxXLMRobertaForQuestionAnsweringModule(nn.Module):
+ config: XLMRobertaConfig
+ dtype: jnp.dtype = jnp.float32
+ gradient_checkpointing: bool = False
+
+ def setup(self):
+ self.roberta = FlaxXLMRobertaModule(
+ config=self.config,
+ dtype=self.dtype,
+ add_pooling_layer=False,
+ gradient_checkpointing=self.gradient_checkpointing,
+ )
+ self.qa_outputs = nn.Dense(self.config.num_labels, dtype=self.dtype)
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ position_ids,
+ head_mask,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ # Model
+ outputs = self.roberta(
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ position_ids,
+ head_mask,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = outputs[0]
+
+ logits = self.qa_outputs(hidden_states)
+ start_logits, end_logits = jnp.split(logits, self.config.num_labels, axis=-1)
+ start_logits = start_logits.squeeze(-1)
+ end_logits = end_logits.squeeze(-1)
+
+ if not return_dict:
+ return (start_logits, end_logits) + outputs[1:]
+
+ return FlaxQuestionAnsweringModelOutput(
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ XLM Roberta Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
+ linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ XLM_ROBERTA_START_DOCSTRING,
+)
+class FlaxXLMRobertaForQuestionAnswering(FlaxXLMRobertaPreTrainedModel):
+ module_class = FlaxXLMRobertaForQuestionAnsweringModule
+
+
+append_call_sample_docstring(
+ FlaxXLMRobertaForQuestionAnswering,
+ _CHECKPOINT_FOR_DOC,
+ FlaxQuestionAnsweringModelOutput,
+ _CONFIG_FOR_DOC,
+)
+
+
+# Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaForCausalLMModule with Roberta->XLMRoberta
+class FlaxXLMRobertaForCausalLMModule(nn.Module):
+ config: XLMRobertaConfig
+ dtype: jnp.dtype = jnp.float32
+ gradient_checkpointing: bool = False
+
+ def setup(self):
+ self.roberta = FlaxXLMRobertaModule(
+ config=self.config,
+ add_pooling_layer=False,
+ dtype=self.dtype,
+ gradient_checkpointing=self.gradient_checkpointing,
+ )
+ self.lm_head = FlaxXLMRobertaLMHead(config=self.config, dtype=self.dtype)
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ position_ids,
+ token_type_ids: Optional[jnp.ndarray] = None,
+ head_mask: Optional[jnp.ndarray] = None,
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
+ init_cache: bool = False,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ # Model
+ outputs = self.roberta(
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ position_ids,
+ head_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ init_cache=init_cache,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = outputs[0]
+ if self.config.tie_word_embeddings:
+ shared_embedding = self.roberta.variables["params"]["embeddings"]["word_embeddings"]["embedding"]
+ else:
+ shared_embedding = None
+
+ # Compute the prediction scores
+ logits = self.lm_head(hidden_states, shared_embedding=shared_embedding)
+
+ if not return_dict:
+ return (logits,) + outputs[1:]
+
+ return FlaxCausalLMOutputWithCrossAttentions(
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ cross_attentions=outputs.cross_attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ XLM Roberta Model with a language modeling head on top (a linear layer on top of the hidden-states output) e.g for
+ autoregressive tasks.
+ """,
+ XLM_ROBERTA_START_DOCSTRING,
+)
+# Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaForCausalLM with Roberta->XLMRoberta
+class FlaxXLMRobertaForCausalLM(FlaxXLMRobertaPreTrainedModel):
+ module_class = FlaxXLMRobertaForCausalLMModule
+
+ def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array] = None):
+ # initializing the cache
+ batch_size, seq_length = input_ids.shape
+
+ past_key_values = self.init_cache(batch_size, max_length)
+ # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
+ # But since the decoder uses a causal mask, those positions are masked anyway.
+ # Thus, we can create a single static attention_mask here, which is more efficient for compilation
+ extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
+ if attention_mask is not None:
+ position_ids = attention_mask.cumsum(axis=-1) - 1
+ extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0))
+ else:
+ position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length))
+
+ return {
+ "past_key_values": past_key_values,
+ "attention_mask": extended_attention_mask,
+ "position_ids": position_ids,
+ }
+
+ def update_inputs_for_generation(self, model_outputs, model_kwargs):
+ model_kwargs["past_key_values"] = model_outputs.past_key_values
+ model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1
+ return model_kwargs
+
+
+append_call_sample_docstring(
+ FlaxXLMRobertaForCausalLM,
+ _CHECKPOINT_FOR_DOC,
+ FlaxCausalLMOutputWithCrossAttentions,
+ _CONFIG_FOR_DOC,
+)
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py b/llava_next/lib/python3.10/site-packages/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py
new file mode 100644
index 0000000000000000000000000000000000000000..b6003f4284a580b1837b8c9d3cfed90ee8cb9fb7
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py
@@ -0,0 +1,1786 @@
+# coding=utf-8
+# Copyright 2019 Facebook AI Research and the HuggingFace Inc. team.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" TF 2.0 XLM-RoBERTa model."""
+
+
+from __future__ import annotations
+
+import math
+import warnings
+from typing import Optional, Tuple, Union
+
+import numpy as np
+import tensorflow as tf
+
+from ...activations_tf import get_tf_activation
+from ...modeling_tf_outputs import (
+ TFBaseModelOutputWithPastAndCrossAttentions,
+ TFBaseModelOutputWithPoolingAndCrossAttentions,
+ TFCausalLMOutputWithCrossAttentions,
+ TFMaskedLMOutput,
+ TFMultipleChoiceModelOutput,
+ TFQuestionAnsweringModelOutput,
+ TFSequenceClassifierOutput,
+ TFTokenClassifierOutput,
+)
+from ...modeling_tf_utils import (
+ TFCausalLanguageModelingLoss,
+ TFMaskedLanguageModelingLoss,
+ TFModelInputType,
+ TFMultipleChoiceLoss,
+ TFPreTrainedModel,
+ TFQuestionAnsweringLoss,
+ TFSequenceClassificationLoss,
+ TFTokenClassificationLoss,
+ get_initializer,
+ keras_serializable,
+ unpack_inputs,
+)
+from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
+from ...utils import (
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+)
+from .configuration_xlm_roberta import XLMRobertaConfig
+
+
+logger = logging.get_logger(__name__)
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "xlm-roberta-base"
+_CONFIG_FOR_DOC = "XLMRobertaConfig"
+
+TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = [
+ "xlm-roberta-base",
+ "xlm-roberta-large",
+ "joeddav/xlm-roberta-large-xnli",
+ "cardiffnlp/twitter-xlm-roberta-base-sentiment",
+ # See all XLM-RoBERTa models at https://huggingface.co/models?filter=xlm-roberta
+]
+
+XLM_ROBERTA_START_DOCSTRING = r"""
+
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
+ behavior.
+
+
+
+ TensorFlow models and layers in `transformers` accept two formats as input:
+
+ - having all inputs as keyword arguments (like PyTorch models), or
+ - having all inputs as a list, tuple or dict in the first positional argument.
+
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
+ positional argument:
+
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
+
+ Note that when creating models and layers with
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
+ about any of this, as you can just pass inputs like you would to any other Python function!
+
+
+
+ Parameters:
+ config ([`XLMRobertaConfig`]): Model configuration class with all the parameters of the
+ model. Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+XLM_ROBERTA_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See
+ [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input
+ IDs?](../glossary#input-ids)
+ attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+ [What are attention masks?](../glossary#attention-mask)
+ token_type_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+ [What are token type IDs?](../glossary#token-type-ids)
+ position_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids)
+ head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+ inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
+ config will be used instead.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
+ used instead.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
+ eager mode, in graph mode the value will always be set to True.
+ training (`bool`, *optional*, defaults to `False`):
+ Whether or not to use the model in training mode (some modules like dropout modules have different
+ behaviors between training and evaluation).
+"""
+
+
+# Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaEmbeddings with Roberta->XLMRoberta
+class TFXLMRobertaEmbeddings(tf.keras.layers.Layer):
+ """
+ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
+ """
+
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+
+ self.padding_idx = 1
+ self.config = config
+ self.hidden_size = config.hidden_size
+ self.max_position_embeddings = config.max_position_embeddings
+ self.initializer_range = config.initializer_range
+ self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
+ self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
+
+ def build(self, input_shape=None):
+ with tf.name_scope("word_embeddings"):
+ self.weight = self.add_weight(
+ name="weight",
+ shape=[self.config.vocab_size, self.hidden_size],
+ initializer=get_initializer(self.initializer_range),
+ )
+
+ with tf.name_scope("token_type_embeddings"):
+ self.token_type_embeddings = self.add_weight(
+ name="embeddings",
+ shape=[self.config.type_vocab_size, self.hidden_size],
+ initializer=get_initializer(self.initializer_range),
+ )
+
+ with tf.name_scope("position_embeddings"):
+ self.position_embeddings = self.add_weight(
+ name="embeddings",
+ shape=[self.max_position_embeddings, self.hidden_size],
+ initializer=get_initializer(self.initializer_range),
+ )
+
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "LayerNorm", None) is not None:
+ with tf.name_scope(self.LayerNorm.name):
+ self.LayerNorm.build([None, None, self.config.hidden_size])
+
+ def create_position_ids_from_input_ids(self, input_ids, past_key_values_length=0):
+ """
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding
+ symbols are ignored. This is modified from fairseq's `utils.make_positions`.
+
+ Args:
+ input_ids: tf.Tensor
+ Returns: tf.Tensor
+ """
+ mask = tf.cast(tf.math.not_equal(input_ids, self.padding_idx), dtype=input_ids.dtype)
+ incremental_indices = (tf.math.cumsum(mask, axis=1) + past_key_values_length) * mask
+
+ return incremental_indices + self.padding_idx
+
+ def call(
+ self,
+ input_ids=None,
+ position_ids=None,
+ token_type_ids=None,
+ inputs_embeds=None,
+ past_key_values_length=0,
+ training=False,
+ ):
+ """
+ Applies embedding based on inputs tensor.
+
+ Returns:
+ final_embeddings (`tf.Tensor`): output embedding tensor.
+ """
+ assert not (input_ids is None and inputs_embeds is None)
+
+ if input_ids is not None:
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
+
+ input_shape = shape_list(inputs_embeds)[:-1]
+
+ if token_type_ids is None:
+ token_type_ids = tf.fill(dims=input_shape, value=0)
+
+ if position_ids is None:
+ if input_ids is not None:
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
+ position_ids = self.create_position_ids_from_input_ids(
+ input_ids=input_ids, past_key_values_length=past_key_values_length
+ )
+ else:
+ position_ids = tf.expand_dims(
+ tf.range(start=self.padding_idx + 1, limit=input_shape[-1] + self.padding_idx + 1), axis=0
+ )
+
+ position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
+ token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
+ final_embeddings = inputs_embeds + position_embeds + token_type_embeds
+ final_embeddings = self.LayerNorm(inputs=final_embeddings)
+ final_embeddings = self.dropout(inputs=final_embeddings, training=training)
+
+ return final_embeddings
+
+
+# Copied from transformers.models.bert.modeling_tf_bert.TFBertPooler with Bert->XLMRoberta
+class TFXLMRobertaPooler(tf.keras.layers.Layer):
+ def __init__(self, config: XLMRobertaConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.dense = tf.keras.layers.Dense(
+ units=config.hidden_size,
+ kernel_initializer=get_initializer(config.initializer_range),
+ activation="tanh",
+ name="dense",
+ )
+ self.config = config
+
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
+ # We "pool" the model by simply taking the hidden state corresponding
+ # to the first token.
+ first_token_tensor = hidden_states[:, 0]
+ pooled_output = self.dense(inputs=first_token_tensor)
+
+ return pooled_output
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+
+
+# Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfAttention with Bert->XLMRoberta
+class TFXLMRobertaSelfAttention(tf.keras.layers.Layer):
+ def __init__(self, config: XLMRobertaConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ if config.hidden_size % config.num_attention_heads != 0:
+ raise ValueError(
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number "
+ f"of attention heads ({config.num_attention_heads})"
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+ self.sqrt_att_head_size = math.sqrt(self.attention_head_size)
+
+ self.query = tf.keras.layers.Dense(
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
+ )
+ self.key = tf.keras.layers.Dense(
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key"
+ )
+ self.value = tf.keras.layers.Dense(
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
+ )
+ self.dropout = tf.keras.layers.Dropout(rate=config.attention_probs_dropout_prob)
+
+ self.is_decoder = config.is_decoder
+ self.config = config
+
+ def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor:
+ # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
+ tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size))
+
+ # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size]
+ return tf.transpose(tensor, perm=[0, 2, 1, 3])
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ attention_mask: tf.Tensor,
+ head_mask: tf.Tensor,
+ encoder_hidden_states: tf.Tensor,
+ encoder_attention_mask: tf.Tensor,
+ past_key_value: Tuple[tf.Tensor],
+ output_attentions: bool,
+ training: bool = False,
+ ) -> Tuple[tf.Tensor]:
+ batch_size = shape_list(hidden_states)[0]
+ mixed_query_layer = self.query(inputs=hidden_states)
+
+ # If this is instantiated as a cross-attention module, the keys
+ # and values come from an encoder; the attention mask needs to be
+ # such that the encoder's padding tokens are not attended to.
+ is_cross_attention = encoder_hidden_states is not None
+
+ if is_cross_attention and past_key_value is not None:
+ # reuse k,v, cross_attentions
+ key_layer = past_key_value[0]
+ value_layer = past_key_value[1]
+ attention_mask = encoder_attention_mask
+ elif is_cross_attention:
+ key_layer = self.transpose_for_scores(self.key(inputs=encoder_hidden_states), batch_size)
+ value_layer = self.transpose_for_scores(self.value(inputs=encoder_hidden_states), batch_size)
+ attention_mask = encoder_attention_mask
+ elif past_key_value is not None:
+ key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size)
+ value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size)
+ key_layer = tf.concat([past_key_value[0], key_layer], axis=2)
+ value_layer = tf.concat([past_key_value[1], value_layer], axis=2)
+ else:
+ key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size)
+ value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size)
+
+ query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
+
+ if self.is_decoder:
+ # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_layer, value_layer)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ # (batch size, num_heads, seq_len_q, seq_len_k)
+ attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
+ dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype)
+ attention_scores = tf.divide(attention_scores, dk)
+
+ if attention_mask is not None:
+ # Apply the attention mask is (precomputed for all layers in TFXLMRobertaModel call() function)
+ attention_scores = tf.add(attention_scores, attention_mask)
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = stable_softmax(logits=attention_scores, axis=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(inputs=attention_probs, training=training)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = tf.multiply(attention_probs, head_mask)
+
+ attention_output = tf.matmul(attention_probs, value_layer)
+ attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3])
+
+ # (batch_size, seq_len_q, all_head_size)
+ attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.all_head_size))
+ outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
+
+ if self.is_decoder:
+ outputs = outputs + (past_key_value,)
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "query", None) is not None:
+ with tf.name_scope(self.query.name):
+ self.query.build([None, None, self.config.hidden_size])
+ if getattr(self, "key", None) is not None:
+ with tf.name_scope(self.key.name):
+ self.key.build([None, None, self.config.hidden_size])
+ if getattr(self, "value", None) is not None:
+ with tf.name_scope(self.value.name):
+ self.value.build([None, None, self.config.hidden_size])
+
+
+# Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfOutput with Bert->XLMRoberta
+class TFXLMRobertaSelfOutput(tf.keras.layers.Layer):
+ def __init__(self, config: XLMRobertaConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.dense = tf.keras.layers.Dense(
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
+ )
+ self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
+ self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
+ self.config = config
+
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
+ hidden_states = self.dense(inputs=hidden_states)
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
+ hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
+
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+ if getattr(self, "LayerNorm", None) is not None:
+ with tf.name_scope(self.LayerNorm.name):
+ self.LayerNorm.build([None, None, self.config.hidden_size])
+
+
+# Copied from transformers.models.bert.modeling_tf_bert.TFBertAttention with Bert->XLMRoberta
+class TFXLMRobertaAttention(tf.keras.layers.Layer):
+ def __init__(self, config: XLMRobertaConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.self_attention = TFXLMRobertaSelfAttention(config, name="self")
+ self.dense_output = TFXLMRobertaSelfOutput(config, name="output")
+
+ def prune_heads(self, heads):
+ raise NotImplementedError
+
+ def call(
+ self,
+ input_tensor: tf.Tensor,
+ attention_mask: tf.Tensor,
+ head_mask: tf.Tensor,
+ encoder_hidden_states: tf.Tensor,
+ encoder_attention_mask: tf.Tensor,
+ past_key_value: Tuple[tf.Tensor],
+ output_attentions: bool,
+ training: bool = False,
+ ) -> Tuple[tf.Tensor]:
+ self_outputs = self.self_attention(
+ hidden_states=input_tensor,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ training=training,
+ )
+ attention_output = self.dense_output(
+ hidden_states=self_outputs[0], input_tensor=input_tensor, training=training
+ )
+ # add attentions (possibly with past_key_value) if we output them
+ outputs = (attention_output,) + self_outputs[1:]
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "self_attention", None) is not None:
+ with tf.name_scope(self.self_attention.name):
+ self.self_attention.build(None)
+ if getattr(self, "dense_output", None) is not None:
+ with tf.name_scope(self.dense_output.name):
+ self.dense_output.build(None)
+
+
+# Copied from transformers.models.bert.modeling_tf_bert.TFBertIntermediate with Bert->XLMRoberta
+class TFXLMRobertaIntermediate(tf.keras.layers.Layer):
+ def __init__(self, config: XLMRobertaConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.dense = tf.keras.layers.Dense(
+ units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
+ )
+
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = get_tf_activation(config.hidden_act)
+ else:
+ self.intermediate_act_fn = config.hidden_act
+ self.config = config
+
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
+ hidden_states = self.dense(inputs=hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+
+
+# Copied from transformers.models.bert.modeling_tf_bert.TFBertOutput with Bert->XLMRoberta
+class TFXLMRobertaOutput(tf.keras.layers.Layer):
+ def __init__(self, config: XLMRobertaConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.dense = tf.keras.layers.Dense(
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
+ )
+ self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
+ self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
+ self.config = config
+
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
+ hidden_states = self.dense(inputs=hidden_states)
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
+ hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
+
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.intermediate_size])
+ if getattr(self, "LayerNorm", None) is not None:
+ with tf.name_scope(self.LayerNorm.name):
+ self.LayerNorm.build([None, None, self.config.hidden_size])
+
+
+# Copied from transformers.models.bert.modeling_tf_bert.TFBertLayer with Bert->XLMRoberta
+class TFXLMRobertaLayer(tf.keras.layers.Layer):
+ def __init__(self, config: XLMRobertaConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.attention = TFXLMRobertaAttention(config, name="attention")
+ self.is_decoder = config.is_decoder
+ self.add_cross_attention = config.add_cross_attention
+ if self.add_cross_attention:
+ if not self.is_decoder:
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
+ self.crossattention = TFXLMRobertaAttention(config, name="crossattention")
+ self.intermediate = TFXLMRobertaIntermediate(config, name="intermediate")
+ self.bert_output = TFXLMRobertaOutput(config, name="output")
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ attention_mask: tf.Tensor,
+ head_mask: tf.Tensor,
+ encoder_hidden_states: tf.Tensor | None,
+ encoder_attention_mask: tf.Tensor | None,
+ past_key_value: Tuple[tf.Tensor] | None,
+ output_attentions: bool,
+ training: bool = False,
+ ) -> Tuple[tf.Tensor]:
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ self_attention_outputs = self.attention(
+ input_tensor=hidden_states,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ past_key_value=self_attn_past_key_value,
+ output_attentions=output_attentions,
+ training=training,
+ )
+ attention_output = self_attention_outputs[0]
+
+ # if decoder, the last output is tuple of self-attn cache
+ if self.is_decoder:
+ outputs = self_attention_outputs[1:-1]
+ present_key_value = self_attention_outputs[-1]
+ else:
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
+
+ cross_attn_present_key_value = None
+ if self.is_decoder and encoder_hidden_states is not None:
+ if not hasattr(self, "crossattention"):
+ raise ValueError(
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
+ " by setting `config.add_cross_attention=True`"
+ )
+
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
+ cross_attention_outputs = self.crossattention(
+ input_tensor=attention_output,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ past_key_value=cross_attn_past_key_value,
+ output_attentions=output_attentions,
+ training=training,
+ )
+ attention_output = cross_attention_outputs[0]
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
+
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
+ cross_attn_present_key_value = cross_attention_outputs[-1]
+ present_key_value = present_key_value + cross_attn_present_key_value
+
+ intermediate_output = self.intermediate(hidden_states=attention_output)
+ layer_output = self.bert_output(
+ hidden_states=intermediate_output, input_tensor=attention_output, training=training
+ )
+ outputs = (layer_output,) + outputs # add attentions if we output them
+
+ # if decoder, return the attn key/values as the last output
+ if self.is_decoder:
+ outputs = outputs + (present_key_value,)
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "attention", None) is not None:
+ with tf.name_scope(self.attention.name):
+ self.attention.build(None)
+ if getattr(self, "intermediate", None) is not None:
+ with tf.name_scope(self.intermediate.name):
+ self.intermediate.build(None)
+ if getattr(self, "bert_output", None) is not None:
+ with tf.name_scope(self.bert_output.name):
+ self.bert_output.build(None)
+ if getattr(self, "crossattention", None) is not None:
+ with tf.name_scope(self.crossattention.name):
+ self.crossattention.build(None)
+
+
+# Copied from transformers.models.bert.modeling_tf_bert.TFBertEncoder with Bert->XLMRoberta
+class TFXLMRobertaEncoder(tf.keras.layers.Layer):
+ def __init__(self, config: XLMRobertaConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.config = config
+ self.layer = [TFXLMRobertaLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ attention_mask: tf.Tensor,
+ head_mask: tf.Tensor,
+ encoder_hidden_states: tf.Tensor | None,
+ encoder_attention_mask: tf.Tensor | None,
+ past_key_values: Tuple[Tuple[tf.Tensor]] | None,
+ use_cache: Optional[bool],
+ output_attentions: bool,
+ output_hidden_states: bool,
+ return_dict: bool,
+ training: bool = False,
+ ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]:
+ all_hidden_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
+
+ next_decoder_cache = () if use_cache else None
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ past_key_value = past_key_values[i] if past_key_values is not None else None
+
+ layer_outputs = layer_module(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ head_mask=head_mask[i],
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ training=training,
+ )
+ hidden_states = layer_outputs[0]
+
+ if use_cache:
+ next_decoder_cache += (layer_outputs[-1],)
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+ if self.config.add_cross_attention and encoder_hidden_states is not None:
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
+
+ # Add last layer
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(
+ v for v in [hidden_states, all_hidden_states, all_attentions, all_cross_attentions] if v is not None
+ )
+
+ return TFBaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=next_decoder_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_attentions,
+ cross_attentions=all_cross_attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "layer", None) is not None:
+ for layer in self.layer:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+
+@keras_serializable
+# Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaMainLayer with Roberta->XLMRoberta
+class TFXLMRobertaMainLayer(tf.keras.layers.Layer):
+ config_class = XLMRobertaConfig
+
+ def __init__(self, config, add_pooling_layer=True, **kwargs):
+ super().__init__(**kwargs)
+
+ self.config = config
+ self.is_decoder = config.is_decoder
+
+ self.num_hidden_layers = config.num_hidden_layers
+ self.initializer_range = config.initializer_range
+ self.output_attentions = config.output_attentions
+ self.output_hidden_states = config.output_hidden_states
+ self.return_dict = config.use_return_dict
+ self.encoder = TFXLMRobertaEncoder(config, name="encoder")
+ self.pooler = TFXLMRobertaPooler(config, name="pooler") if add_pooling_layer else None
+ # The embeddings must be the last declaration in order to follow the weights order
+ self.embeddings = TFXLMRobertaEmbeddings(config, name="embeddings")
+
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer.get_input_embeddings
+ def get_input_embeddings(self) -> tf.keras.layers.Layer:
+ return self.embeddings
+
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer.set_input_embeddings
+ def set_input_embeddings(self, value: tf.Variable):
+ self.embeddings.weight = value
+ self.embeddings.vocab_size = shape_list(value)[0]
+
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer._prune_heads
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ raise NotImplementedError
+
+ @unpack_inputs
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer.call
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> Union[TFBaseModelOutputWithPoolingAndCrossAttentions, Tuple[tf.Tensor]]:
+ if not self.config.is_decoder:
+ use_cache = False
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_shape = shape_list(input_ids)
+ elif inputs_embeds is not None:
+ input_shape = shape_list(inputs_embeds)[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ batch_size, seq_length = input_shape
+
+ if past_key_values is None:
+ past_key_values_length = 0
+ past_key_values = [None] * len(self.encoder.layer)
+ else:
+ past_key_values_length = shape_list(past_key_values[0][0])[-2]
+
+ if attention_mask is None:
+ attention_mask = tf.fill(dims=(batch_size, seq_length + past_key_values_length), value=1)
+
+ if token_type_ids is None:
+ token_type_ids = tf.fill(dims=input_shape, value=0)
+
+ embedding_output = self.embeddings(
+ input_ids=input_ids,
+ position_ids=position_ids,
+ token_type_ids=token_type_ids,
+ inputs_embeds=inputs_embeds,
+ past_key_values_length=past_key_values_length,
+ training=training,
+ )
+
+ # We create a 3D attention mask from a 2D tensor mask.
+ # Sizes are [batch_size, 1, 1, to_seq_length]
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
+ # this attention mask is more simple than the triangular masking of causal attention
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
+ attention_mask_shape = shape_list(attention_mask)
+
+ mask_seq_length = seq_length + past_key_values_length
+ # Copied from `modeling_tf_t5.py`
+ # Provided a padding mask of dimensions [batch_size, mask_seq_length]
+ # - if the model is a decoder, apply a causal mask in addition to the padding mask
+ # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length]
+ if self.is_decoder:
+ seq_ids = tf.range(mask_seq_length)
+ causal_mask = tf.less_equal(
+ tf.tile(seq_ids[None, None, :], (batch_size, mask_seq_length, 1)),
+ seq_ids[None, :, None],
+ )
+ causal_mask = tf.cast(causal_mask, dtype=attention_mask.dtype)
+ extended_attention_mask = causal_mask * attention_mask[:, None, :]
+ attention_mask_shape = shape_list(extended_attention_mask)
+ extended_attention_mask = tf.reshape(
+ extended_attention_mask, (attention_mask_shape[0], 1, attention_mask_shape[1], attention_mask_shape[2])
+ )
+ if past_key_values[0] is not None:
+ # attention_mask needs to be sliced to the shape `[batch_size, 1, from_seq_length - cached_seq_length, to_seq_length]
+ extended_attention_mask = extended_attention_mask[:, :, -seq_length:, :]
+ else:
+ extended_attention_mask = tf.reshape(
+ attention_mask, (attention_mask_shape[0], 1, 1, attention_mask_shape[1])
+ )
+
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
+ # masked positions, this operation will create a tensor which is 0.0 for
+ # positions we want to attend and -10000.0 for masked positions.
+ # Since we are adding it to the raw scores before the softmax, this is
+ # effectively the same as removing these entirely.
+ extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype)
+ one_cst = tf.constant(1.0, dtype=embedding_output.dtype)
+ ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype)
+ extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst)
+
+ # Copied from `modeling_tf_t5.py` with -1e9 -> -10000
+ if self.is_decoder and encoder_attention_mask is not None:
+ # If a 2D ou 3D attention mask is provided for the cross-attention
+ # we need to make broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length]
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
+ encoder_attention_mask = tf.cast(encoder_attention_mask, dtype=extended_attention_mask.dtype)
+ num_dims_encoder_attention_mask = len(shape_list(encoder_attention_mask))
+ if num_dims_encoder_attention_mask == 3:
+ encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
+ if num_dims_encoder_attention_mask == 2:
+ encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
+
+ # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
+ # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow/transformer/transformer_layers.py#L270
+ # encoder_extended_attention_mask = tf.math.equal(encoder_extended_attention_mask,
+ # tf.transpose(encoder_extended_attention_mask, perm=(-1, -2)))
+
+ encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0
+ else:
+ encoder_extended_attention_mask = None
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ if head_mask is not None:
+ raise NotImplementedError
+ else:
+ head_mask = [None] * self.config.num_hidden_layers
+
+ encoder_outputs = self.encoder(
+ hidden_states=embedding_output,
+ attention_mask=extended_attention_mask,
+ head_mask=head_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_extended_attention_mask,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ sequence_output = encoder_outputs[0]
+ pooled_output = self.pooler(hidden_states=sequence_output) if self.pooler is not None else None
+
+ if not return_dict:
+ return (
+ sequence_output,
+ pooled_output,
+ ) + encoder_outputs[1:]
+
+ return TFBaseModelOutputWithPoolingAndCrossAttentions(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ past_key_values=encoder_outputs.past_key_values,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ cross_attentions=encoder_outputs.cross_attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "encoder", None) is not None:
+ with tf.name_scope(self.encoder.name):
+ self.encoder.build(None)
+ if getattr(self, "pooler", None) is not None:
+ with tf.name_scope(self.pooler.name):
+ self.pooler.build(None)
+ if getattr(self, "embeddings", None) is not None:
+ with tf.name_scope(self.embeddings.name):
+ self.embeddings.build(None)
+
+
+# Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaPreTrainedModel with Roberta->XLMRoberta
+class TFXLMRobertaPreTrainedModel(TFPreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = XLMRobertaConfig
+ base_model_prefix = "roberta"
+
+
+@add_start_docstrings(
+ "The bare XLM RoBERTa Model transformer outputting raw hidden-states without any specific head on top.",
+ XLM_ROBERTA_START_DOCSTRING,
+)
+# Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaModel with Roberta->XLMRoberta, ROBERTA->XLM_ROBERTA
+class TFXLMRobertaModel(TFXLMRobertaPreTrainedModel):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.roberta = TFXLMRobertaMainLayer(config, name="roberta")
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(XLM_ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFBaseModelOutputWithPoolingAndCrossAttentions,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: Optional[bool] = False,
+ ) -> Union[Tuple, TFBaseModelOutputWithPoolingAndCrossAttentions]:
+ r"""
+ encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
+ the model is configured as a decoder.
+ encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
+ contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`). Set to `False` during training, `True` during generation
+ """
+ outputs = self.roberta(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "roberta", None) is not None:
+ with tf.name_scope(self.roberta.name):
+ self.roberta.build(None)
+
+
+# Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaLMHead with Roberta->XLMRoberta
+class TFXLMRobertaLMHead(tf.keras.layers.Layer):
+ """XLMRoberta Head for masked language modeling."""
+
+ def __init__(self, config, input_embeddings, **kwargs):
+ super().__init__(**kwargs)
+
+ self.config = config
+ self.hidden_size = config.hidden_size
+ self.dense = tf.keras.layers.Dense(
+ config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
+ )
+ self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
+ self.act = get_tf_activation("gelu")
+
+ # The output weights are the same as the input embeddings, but there is
+ # an output-only bias for each token.
+ self.decoder = input_embeddings
+
+ def build(self, input_shape=None):
+ self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
+
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+ if getattr(self, "layer_norm", None) is not None:
+ with tf.name_scope(self.layer_norm.name):
+ self.layer_norm.build([None, None, self.config.hidden_size])
+
+ def get_output_embeddings(self):
+ return self.decoder
+
+ def set_output_embeddings(self, value):
+ self.decoder.weight = value
+ self.decoder.vocab_size = shape_list(value)[0]
+
+ def get_bias(self):
+ return {"bias": self.bias}
+
+ def set_bias(self, value):
+ self.bias = value["bias"]
+ self.config.vocab_size = shape_list(value["bias"])[0]
+
+ def call(self, hidden_states):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.act(hidden_states)
+ hidden_states = self.layer_norm(hidden_states)
+
+ # project back to size of vocabulary with bias
+ seq_length = shape_list(tensor=hidden_states)[1]
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size])
+ hidden_states = tf.matmul(a=hidden_states, b=self.decoder.weight, transpose_b=True)
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
+ hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
+
+ return hidden_states
+
+
+@add_start_docstrings("""XLM RoBERTa Model with a `language modeling` head on top.""", XLM_ROBERTA_START_DOCSTRING)
+# Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaForMaskedLM with Roberta->XLMRoberta, ROBERTA->XLM_ROBERTA
+class TFXLMRobertaForMaskedLM(TFXLMRobertaPreTrainedModel, TFMaskedLanguageModelingLoss):
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
+ _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head.decoder.weight"]
+
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.roberta = TFXLMRobertaMainLayer(config, add_pooling_layer=False, name="roberta")
+ self.lm_head = TFXLMRobertaLMHead(config, self.roberta.embeddings, name="lm_head")
+
+ def get_lm_head(self):
+ return self.lm_head
+
+ def get_prefix_bias_name(self):
+ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
+ return self.name + "/" + self.lm_head.name
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(XLM_ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFMaskedLMOutput,
+ config_class=_CONFIG_FOR_DOC,
+ mask="",
+ expected_output="' Paris'",
+ expected_loss=0.1,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+ """
+ outputs = self.roberta(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ sequence_output = outputs[0]
+ prediction_scores = self.lm_head(sequence_output)
+
+ loss = None if labels is None else self.hf_compute_loss(labels, prediction_scores)
+
+ if not return_dict:
+ output = (prediction_scores,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFMaskedLMOutput(
+ loss=loss,
+ logits=prediction_scores,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "roberta", None) is not None:
+ with tf.name_scope(self.roberta.name):
+ self.roberta.build(None)
+ if getattr(self, "lm_head", None) is not None:
+ with tf.name_scope(self.lm_head.name):
+ self.lm_head.build(None)
+
+
+@add_start_docstrings(
+ "XLM-RoBERTa Model with a `language modeling` head on top for CLM fine-tuning.",
+ XLM_ROBERTA_START_DOCSTRING,
+)
+# Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaForCausalLM with Roberta->XLMRoberta, ROBERTA->XLM_ROBERTA
+class TFXLMRobertaForCausalLM(TFXLMRobertaPreTrainedModel, TFCausalLanguageModelingLoss):
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
+ _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head.decoder.weight"]
+
+ def __init__(self, config: XLMRobertaConfig, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ if not config.is_decoder:
+ logger.warning("If you want to use `TFXLMRobertaLMHeadModel` as a standalone, add `is_decoder=True.`")
+
+ self.roberta = TFXLMRobertaMainLayer(config, add_pooling_layer=False, name="roberta")
+ self.lm_head = TFXLMRobertaLMHead(config, input_embeddings=self.roberta.embeddings, name="lm_head")
+
+ def get_lm_head(self):
+ return self.lm_head
+
+ def get_prefix_bias_name(self):
+ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
+ return self.name + "/" + self.lm_head.name
+
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertLMHeadModel.prepare_inputs_for_generation
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs):
+ input_shape = input_ids.shape
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
+ if attention_mask is None:
+ attention_mask = tf.ones(input_shape)
+
+ # cut decoder_input_ids if past is used
+ if past_key_values is not None:
+ input_ids = input_ids[:, -1:]
+
+ return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values}
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(XLM_ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFCausalLMOutputWithCrossAttentions,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFCausalLMOutputWithCrossAttentions, Tuple[tf.Tensor]]:
+ r"""
+ encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
+ the model is configured as a decoder.
+ encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
+ contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`). Set to `False` during training, `True` during generation
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the cross entropy classification loss. Indices should be in `[0, ...,
+ config.vocab_size - 1]`.
+ """
+ outputs = self.roberta(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ sequence_output = outputs[0]
+ logits = self.lm_head(hidden_states=sequence_output, training=training)
+ loss = None
+
+ if labels is not None:
+ # shift labels to the left and cut last logit token
+ shifted_logits = logits[:, :-1]
+ labels = labels[:, 1:]
+ loss = self.hf_compute_loss(labels=labels, logits=shifted_logits)
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFCausalLMOutputWithCrossAttentions(
+ loss=loss,
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ cross_attentions=outputs.cross_attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "roberta", None) is not None:
+ with tf.name_scope(self.roberta.name):
+ self.roberta.build(None)
+ if getattr(self, "lm_head", None) is not None:
+ with tf.name_scope(self.lm_head.name):
+ self.lm_head.build(None)
+
+
+# Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaClassificationHead with Roberta->XLMRoberta
+class TFXLMRobertaClassificationHead(tf.keras.layers.Layer):
+ """Head for sentence-level classification tasks."""
+
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ self.dense = tf.keras.layers.Dense(
+ config.hidden_size,
+ kernel_initializer=get_initializer(config.initializer_range),
+ activation="tanh",
+ name="dense",
+ )
+ classifier_dropout = (
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
+ )
+ self.dropout = tf.keras.layers.Dropout(classifier_dropout)
+ self.out_proj = tf.keras.layers.Dense(
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="out_proj"
+ )
+ self.config = config
+
+ def call(self, features, training=False):
+ x = features[:, 0, :] # take token (equiv. to [CLS])
+ x = self.dropout(x, training=training)
+ x = self.dense(x)
+ x = self.dropout(x, training=training)
+ x = self.out_proj(x)
+ return x
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+ if getattr(self, "out_proj", None) is not None:
+ with tf.name_scope(self.out_proj.name):
+ self.out_proj.build([None, None, self.config.hidden_size])
+
+
+@add_start_docstrings(
+ """
+ XLM RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the
+ pooled output) e.g. for GLUE tasks.
+ """,
+ XLM_ROBERTA_START_DOCSTRING,
+)
+# Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaForSequenceClassification with Roberta->XLMRoberta, ROBERTA->XLM_ROBERTA
+class TFXLMRobertaForSequenceClassification(TFXLMRobertaPreTrainedModel, TFSequenceClassificationLoss):
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
+ _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head"]
+
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.num_labels = config.num_labels
+
+ self.roberta = TFXLMRobertaMainLayer(config, add_pooling_layer=False, name="roberta")
+ self.classifier = TFXLMRobertaClassificationHead(config, name="classifier")
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(XLM_ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint="cardiffnlp/twitter-roberta-base-emotion",
+ output_type=TFSequenceClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output="'optimism'",
+ expected_loss=0.08,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ outputs = self.roberta(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ sequence_output = outputs[0]
+ logits = self.classifier(sequence_output, training=training)
+
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFSequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "roberta", None) is not None:
+ with tf.name_scope(self.roberta.name):
+ self.roberta.build(None)
+ if getattr(self, "classifier", None) is not None:
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build(None)
+
+
+@add_start_docstrings(
+ """
+ XLM Roberta Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
+ a softmax) e.g. for RocStories/SWAG tasks.
+ """,
+ XLM_ROBERTA_START_DOCSTRING,
+)
+# Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaForMultipleChoice with Roberta->XLMRoberta, ROBERTA->XLM_ROBERTA
+class TFXLMRobertaForMultipleChoice(TFXLMRobertaPreTrainedModel, TFMultipleChoiceLoss):
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
+ _keys_to_ignore_on_load_unexpected = [r"lm_head"]
+ _keys_to_ignore_on_load_missing = [r"dropout"]
+
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.roberta = TFXLMRobertaMainLayer(config, name="roberta")
+ self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
+ self.classifier = tf.keras.layers.Dense(
+ 1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
+ )
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(
+ XLM_ROBERTA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
+ )
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFMultipleChoiceModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
+ where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)
+ """
+
+ if input_ids is not None:
+ num_choices = shape_list(input_ids)[1]
+ seq_length = shape_list(input_ids)[2]
+ else:
+ num_choices = shape_list(inputs_embeds)[1]
+ seq_length = shape_list(inputs_embeds)[2]
+
+ flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None
+ flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None
+ flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None
+ flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None
+ outputs = self.roberta(
+ flat_input_ids,
+ flat_attention_mask,
+ flat_token_type_ids,
+ flat_position_ids,
+ head_mask,
+ inputs_embeds,
+ output_attentions,
+ output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ pooled_output = outputs[1]
+ pooled_output = self.dropout(pooled_output, training=training)
+ logits = self.classifier(pooled_output)
+ reshaped_logits = tf.reshape(logits, (-1, num_choices))
+
+ loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits)
+
+ if not return_dict:
+ output = (reshaped_logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFMultipleChoiceModelOutput(
+ loss=loss,
+ logits=reshaped_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "roberta", None) is not None:
+ with tf.name_scope(self.roberta.name):
+ self.roberta.build(None)
+ if getattr(self, "classifier", None) is not None:
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build([None, None, self.config.hidden_size])
+
+
+@add_start_docstrings(
+ """
+ XLM RoBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
+ for Named-Entity-Recognition (NER) tasks.
+ """,
+ XLM_ROBERTA_START_DOCSTRING,
+)
+# Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaForTokenClassification with Roberta->XLMRoberta, ROBERTA->XLM_ROBERTA
+class TFXLMRobertaForTokenClassification(TFXLMRobertaPreTrainedModel, TFTokenClassificationLoss):
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
+ _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head"]
+ _keys_to_ignore_on_load_missing = [r"dropout"]
+
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.num_labels = config.num_labels
+
+ self.roberta = TFXLMRobertaMainLayer(config, add_pooling_layer=False, name="roberta")
+ classifier_dropout = (
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
+ )
+ self.dropout = tf.keras.layers.Dropout(classifier_dropout)
+ self.classifier = tf.keras.layers.Dense(
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
+ )
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(XLM_ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint="ydshieh/roberta-large-ner-english",
+ output_type=TFTokenClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output="['O', 'ORG', 'ORG', 'O', 'O', 'O', 'O', 'O', 'LOC', 'O', 'LOC', 'LOC']",
+ expected_loss=0.01,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
+ """
+ outputs = self.roberta(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ sequence_output = outputs[0]
+
+ sequence_output = self.dropout(sequence_output, training=training)
+ logits = self.classifier(sequence_output)
+
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFTokenClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "roberta", None) is not None:
+ with tf.name_scope(self.roberta.name):
+ self.roberta.build(None)
+ if getattr(self, "classifier", None) is not None:
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build([None, None, self.config.hidden_size])
+
+
+@add_start_docstrings(
+ """
+ XLM RoBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
+ linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ XLM_ROBERTA_START_DOCSTRING,
+)
+# Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaForQuestionAnswering with Roberta->XLMRoberta, ROBERTA->XLM_ROBERTA
+class TFXLMRobertaForQuestionAnswering(TFXLMRobertaPreTrainedModel, TFQuestionAnsweringLoss):
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
+ _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head"]
+
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.num_labels = config.num_labels
+
+ self.roberta = TFXLMRobertaMainLayer(config, add_pooling_layer=False, name="roberta")
+ self.qa_outputs = tf.keras.layers.Dense(
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
+ )
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(XLM_ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint="ydshieh/roberta-base-squad2",
+ output_type=TFQuestionAnsweringModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output="' puppet'",
+ expected_loss=0.86,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ start_positions: np.ndarray | tf.Tensor | None = None,
+ end_positions: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
+ r"""
+ start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ """
+ outputs = self.roberta(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ sequence_output = outputs[0]
+
+ logits = self.qa_outputs(sequence_output)
+ start_logits, end_logits = tf.split(logits, 2, axis=-1)
+ start_logits = tf.squeeze(start_logits, axis=-1)
+ end_logits = tf.squeeze(end_logits, axis=-1)
+
+ loss = None
+ if start_positions is not None and end_positions is not None:
+ labels = {"start_position": start_positions}
+ labels["end_position"] = end_positions
+ loss = self.hf_compute_loss(labels, (start_logits, end_logits))
+
+ if not return_dict:
+ output = (start_logits, end_logits) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFQuestionAnsweringModelOutput(
+ loss=loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "roberta", None) is not None:
+ with tf.name_scope(self.roberta.name):
+ self.roberta.build(None)
+ if getattr(self, "qa_outputs", None) is not None:
+ with tf.name_scope(self.qa_outputs.name):
+ self.qa_outputs.build([None, None, self.config.hidden_size])
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/xlm_roberta/modeling_xlm_roberta.py b/llava_next/lib/python3.10/site-packages/transformers/models/xlm_roberta/modeling_xlm_roberta.py
new file mode 100644
index 0000000000000000000000000000000000000000..95ea2e7dca7bd16b4e65d10d309ee8656c3ae56c
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/transformers/models/xlm_roberta/modeling_xlm_roberta.py
@@ -0,0 +1,1579 @@
+# coding=utf-8
+# Copyright 2019 Facebook AI Research and the HuggingFace Inc. team.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch XLM-RoBERTa model."""
+
+import math
+from typing import List, Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN, gelu
+from ...modeling_outputs import (
+ BaseModelOutputWithPastAndCrossAttentions,
+ BaseModelOutputWithPoolingAndCrossAttentions,
+ CausalLMOutputWithCrossAttentions,
+ MaskedLMOutput,
+ MultipleChoiceModelOutput,
+ QuestionAnsweringModelOutput,
+ SequenceClassifierOutput,
+ TokenClassifierOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
+from ...utils import (
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_xlm_roberta import XLMRobertaConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "xlm-roberta-base"
+_CONFIG_FOR_DOC = "XLMRobertaConfig"
+
+XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = [
+ "xlm-roberta-base",
+ "xlm-roberta-large",
+ "xlm-roberta-large-finetuned-conll02-dutch",
+ "xlm-roberta-large-finetuned-conll02-spanish",
+ "xlm-roberta-large-finetuned-conll03-english",
+ "xlm-roberta-large-finetuned-conll03-german",
+ # See all XLM-RoBERTa models at https://huggingface.co/models?filter=xlm-roberta
+]
+
+
+# Copied from transformers.models.roberta.modeling_roberta.RobertaEmbeddings with Roberta->XLMRoberta
+class XLMRobertaEmbeddings(nn.Module):
+ """
+ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
+ """
+
+ # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__
+ def __init__(self, config):
+ super().__init__()
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
+
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
+ # any TensorFlow checkpoint file
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
+ self.register_buffer(
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
+ )
+ self.register_buffer(
+ "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
+ )
+
+ # End copy
+ self.padding_idx = config.pad_token_id
+ self.position_embeddings = nn.Embedding(
+ config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
+ )
+
+ def forward(
+ self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
+ ):
+ if position_ids is None:
+ if input_ids is not None:
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
+ position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length)
+ else:
+ position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
+
+ if input_ids is not None:
+ input_shape = input_ids.size()
+ else:
+ input_shape = inputs_embeds.size()[:-1]
+
+ seq_length = input_shape[1]
+
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
+ # issue #5664
+ if token_type_ids is None:
+ if hasattr(self, "token_type_ids"):
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
+ token_type_ids = buffered_token_type_ids_expanded
+ else:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
+
+ if inputs_embeds is None:
+ inputs_embeds = self.word_embeddings(input_ids)
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
+
+ embeddings = inputs_embeds + token_type_embeddings
+ if self.position_embedding_type == "absolute":
+ position_embeddings = self.position_embeddings(position_ids)
+ embeddings += position_embeddings
+ embeddings = self.LayerNorm(embeddings)
+ embeddings = self.dropout(embeddings)
+ return embeddings
+
+ def create_position_ids_from_inputs_embeds(self, inputs_embeds):
+ """
+ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
+
+ Args:
+ inputs_embeds: torch.Tensor
+
+ Returns: torch.Tensor
+ """
+ input_shape = inputs_embeds.size()[:-1]
+ sequence_length = input_shape[1]
+
+ position_ids = torch.arange(
+ self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
+ )
+ return position_ids.unsqueeze(0).expand(input_shape)
+
+
+# Copied from transformers.models.roberta.modeling_roberta.RobertaSelfAttention with Roberta->XLMRoberta
+class XLMRobertaSelfAttention(nn.Module):
+ def __init__(self, config, position_embedding_type=None):
+ super().__init__()
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
+ raise ValueError(
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
+ f"heads ({config.num_attention_heads})"
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+ self.position_embedding_type = position_embedding_type or getattr(
+ config, "position_embedding_type", "absolute"
+ )
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
+ self.max_position_embeddings = config.max_position_embeddings
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
+
+ self.is_decoder = config.is_decoder
+
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ mixed_query_layer = self.query(hidden_states)
+
+ # If this is instantiated as a cross-attention module, the keys
+ # and values come from an encoder; the attention mask needs to be
+ # such that the encoder's padding tokens are not attended to.
+ is_cross_attention = encoder_hidden_states is not None
+
+ if is_cross_attention and past_key_value is not None:
+ # reuse k,v, cross_attentions
+ key_layer = past_key_value[0]
+ value_layer = past_key_value[1]
+ attention_mask = encoder_attention_mask
+ elif is_cross_attention:
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
+ attention_mask = encoder_attention_mask
+ elif past_key_value is not None:
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
+ else:
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+
+ query_layer = self.transpose_for_scores(mixed_query_layer)
+
+ use_cache = past_key_value is not None
+ if self.is_decoder:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_layer, value_layer)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
+ query_length, key_length = query_layer.shape[2], key_layer.shape[2]
+ if use_cache:
+ position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
+ -1, 1
+ )
+ else:
+ position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
+ position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
+ distance = position_ids_l - position_ids_r
+
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
+
+ if self.position_embedding_type == "relative_key":
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
+ attention_scores = attention_scores + relative_position_scores
+ elif self.position_embedding_type == "relative_key_query":
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
+
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
+ if attention_mask is not None:
+ # Apply the attention mask is (precomputed for all layers in XLMRobertaModel forward() function)
+ attention_scores = attention_scores + attention_mask
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ context_layer = torch.matmul(attention_probs, value_layer)
+
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(new_context_layer_shape)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ if self.is_decoder:
+ outputs = outputs + (past_key_value,)
+ return outputs
+
+
+# Copied from transformers.models.roberta.modeling_roberta.RobertaSelfOutput with Roberta->XLMRoberta
+class XLMRobertaSelfOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+# Copied from transformers.models.roberta.modeling_roberta.RobertaAttention with Roberta->XLMRoberta
+class XLMRobertaAttention(nn.Module):
+ def __init__(self, config, position_embedding_type=None):
+ super().__init__()
+ self.self = XLMRobertaSelfAttention(config, position_embedding_type=position_embedding_type)
+ self.output = XLMRobertaSelfOutput(config)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.self.query = prune_linear_layer(self.self.query, index)
+ self.self.key = prune_linear_layer(self.self.key, index)
+ self.self.value = prune_linear_layer(self.self.value, index)
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ self_outputs = self.self(
+ hidden_states,
+ attention_mask,
+ head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+ attention_output = self.output(self_outputs[0], hidden_states)
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+
+# Copied from transformers.models.roberta.modeling_roberta.RobertaIntermediate with Roberta->XLMRoberta
+class XLMRobertaIntermediate(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.roberta.modeling_roberta.RobertaOutput with Roberta->XLMRoberta
+class XLMRobertaOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+# Copied from transformers.models.roberta.modeling_roberta.RobertaLayer with Roberta->XLMRoberta
+class XLMRobertaLayer(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+ self.seq_len_dim = 1
+ self.attention = XLMRobertaAttention(config)
+ self.is_decoder = config.is_decoder
+ self.add_cross_attention = config.add_cross_attention
+ if self.add_cross_attention:
+ if not self.is_decoder:
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
+ self.crossattention = XLMRobertaAttention(config, position_embedding_type="absolute")
+ self.intermediate = XLMRobertaIntermediate(config)
+ self.output = XLMRobertaOutput(config)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ self_attention_outputs = self.attention(
+ hidden_states,
+ attention_mask,
+ head_mask,
+ output_attentions=output_attentions,
+ past_key_value=self_attn_past_key_value,
+ )
+ attention_output = self_attention_outputs[0]
+
+ # if decoder, the last output is tuple of self-attn cache
+ if self.is_decoder:
+ outputs = self_attention_outputs[1:-1]
+ present_key_value = self_attention_outputs[-1]
+ else:
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
+
+ cross_attn_present_key_value = None
+ if self.is_decoder and encoder_hidden_states is not None:
+ if not hasattr(self, "crossattention"):
+ raise ValueError(
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
+ " by setting `config.add_cross_attention=True`"
+ )
+
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
+ cross_attention_outputs = self.crossattention(
+ attention_output,
+ attention_mask,
+ head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ cross_attn_past_key_value,
+ output_attentions,
+ )
+ attention_output = cross_attention_outputs[0]
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
+
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
+ cross_attn_present_key_value = cross_attention_outputs[-1]
+ present_key_value = present_key_value + cross_attn_present_key_value
+
+ layer_output = apply_chunking_to_forward(
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
+ )
+ outputs = (layer_output,) + outputs
+
+ # if decoder, return the attn key/values as the last output
+ if self.is_decoder:
+ outputs = outputs + (present_key_value,)
+
+ return outputs
+
+ def feed_forward_chunk(self, attention_output):
+ intermediate_output = self.intermediate(attention_output)
+ layer_output = self.output(intermediate_output, attention_output)
+ return layer_output
+
+
+# Copied from transformers.models.roberta.modeling_roberta.RobertaEncoder with Roberta->XLMRoberta
+class XLMRobertaEncoder(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.layer = nn.ModuleList([XLMRobertaLayer(config) for _ in range(config.num_hidden_layers)])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = False,
+ output_hidden_states: Optional[bool] = False,
+ return_dict: Optional[bool] = True,
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ next_decoder_cache = () if use_cache else None
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_head_mask = head_mask[i] if head_mask is not None else None
+ past_key_value = past_key_values[i] if past_key_values is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.__call__,
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+ else:
+ layer_outputs = layer_module(
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+ if use_cache:
+ next_decoder_cache += (layer_outputs[-1],)
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+ if self.config.add_cross_attention:
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [
+ hidden_states,
+ next_decoder_cache,
+ all_hidden_states,
+ all_self_attentions,
+ all_cross_attentions,
+ ]
+ if v is not None
+ )
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=next_decoder_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+# Copied from transformers.models.roberta.modeling_roberta.RobertaPooler with Roberta->XLMRoberta
+class XLMRobertaPooler(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.activation = nn.Tanh()
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ # We "pool" the model by simply taking the hidden state corresponding
+ # to the first token.
+ first_token_tensor = hidden_states[:, 0]
+ pooled_output = self.dense(first_token_tensor)
+ pooled_output = self.activation(pooled_output)
+ return pooled_output
+
+
+# Copied from transformers.models.roberta.modeling_roberta.RobertaPreTrainedModel with Roberta->XLMRoberta
+class XLMRobertaPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = XLMRobertaConfig
+ base_model_prefix = "roberta"
+ supports_gradient_checkpointing = True
+ _no_split_modules = ["XLMRobertaEmbeddings", "XLMRobertaSelfAttention"]
+
+ # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, nn.Linear):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+XLM_ROBERTA_START_DOCSTRING = r"""
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`XLMRobertaConfig`]): Model configuration class with all the parameters of the
+ model. Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+XLM_ROBERTA_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare XLM-RoBERTa Model transformer outputting raw hidden-states without any specific head on top.",
+ XLM_ROBERTA_START_DOCSTRING,
+)
+# Copied from transformers.models.roberta.modeling_roberta.RobertaModel with Roberta->XLMRoberta, ROBERTA->XLM_ROBERTA
+class XLMRobertaModel(XLMRobertaPreTrainedModel):
+ """
+
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
+ cross-attention is added between the self-attention layers, following the architecture described in *Attention is
+ all you need*_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz
+ Kaiser and Illia Polosukhin.
+
+ To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
+ to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
+ `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
+
+ .. _*Attention is all you need*: https://arxiv.org/abs/1706.03762
+
+ """
+
+ # Copied from transformers.models.bert.modeling_bert.BertModel.__init__ with Bert->XLMRoberta
+ def __init__(self, config, add_pooling_layer=True):
+ super().__init__(config)
+ self.config = config
+
+ self.embeddings = XLMRobertaEmbeddings(config)
+ self.encoder = XLMRobertaEncoder(config)
+
+ self.pooler = XLMRobertaPooler(config) if add_pooling_layer else None
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embeddings.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.embeddings.word_embeddings = value
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(XLM_ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutputWithPoolingAndCrossAttentions,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ # Copied from transformers.models.bert.modeling_bert.BertModel.forward
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
+ r"""
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
+ the model is configured as a decoder.
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if self.config.is_decoder:
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ else:
+ use_cache = False
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
+ input_shape = input_ids.size()
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ batch_size, seq_length = input_shape
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+
+ # past_key_values_length
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
+
+ if attention_mask is None:
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
+
+ if token_type_ids is None:
+ if hasattr(self.embeddings, "token_type_ids"):
+ buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
+ token_type_ids = buffered_token_type_ids_expanded
+ else:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
+
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
+ # ourselves in which case we just need to make it broadcastable to all heads.
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
+
+ # If a 2D or 3D attention mask is provided for the cross-attention
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
+ if self.config.is_decoder and encoder_hidden_states is not None:
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
+ if encoder_attention_mask is None:
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
+ else:
+ encoder_extended_attention_mask = None
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
+
+ embedding_output = self.embeddings(
+ input_ids=input_ids,
+ position_ids=position_ids,
+ token_type_ids=token_type_ids,
+ inputs_embeds=inputs_embeds,
+ past_key_values_length=past_key_values_length,
+ )
+ encoder_outputs = self.encoder(
+ embedding_output,
+ attention_mask=extended_attention_mask,
+ head_mask=head_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_extended_attention_mask,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = encoder_outputs[0]
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
+
+ if not return_dict:
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
+
+ return BaseModelOutputWithPoolingAndCrossAttentions(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ past_key_values=encoder_outputs.past_key_values,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ cross_attentions=encoder_outputs.cross_attentions,
+ )
+
+
+@add_start_docstrings(
+ "XLM-RoBERTa Model with a `language modeling` head on top for CLM fine-tuning.",
+ XLM_ROBERTA_START_DOCSTRING,
+)
+# Copied from transformers.models.roberta.modeling_roberta.RobertaForCausalLM with Roberta->XLMRoberta, ROBERTA->XLM_ROBERTA
+class XLMRobertaForCausalLM(XLMRobertaPreTrainedModel):
+ _tied_weights_keys = ["lm_head.decoder.weight", "lm_head.decoder.bias"]
+
+ def __init__(self, config):
+ super().__init__(config)
+
+ if not config.is_decoder:
+ logger.warning("If you want to use `XLMRobertaLMHeadModel` as a standalone, add `is_decoder=True.`")
+
+ self.roberta = XLMRobertaModel(config, add_pooling_layer=False)
+ self.lm_head = XLMRobertaLMHead(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_output_embeddings(self):
+ return self.lm_head.decoder
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head.decoder = new_embeddings
+
+ @add_start_docstrings_to_model_forward(XLM_ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ past_key_values: Tuple[Tuple[torch.FloatTensor]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
+ r"""
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
+ the model is configured as a decoder.
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
+ `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
+ ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, XLMRobertaForCausalLM, AutoConfig
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("roberta-base")
+ >>> config = AutoConfig.from_pretrained("roberta-base")
+ >>> config.is_decoder = True
+ >>> model = XLMRobertaForCausalLM.from_pretrained("roberta-base", config=config)
+
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
+ >>> outputs = model(**inputs)
+
+ >>> prediction_logits = outputs.logits
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ if labels is not None:
+ use_cache = False
+
+ outputs = self.roberta(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+ prediction_scores = self.lm_head(sequence_output)
+
+ lm_loss = None
+ if labels is not None:
+ # move labels to correct device to enable model parallelism
+ labels = labels.to(prediction_scores.device)
+ # we are doing next-token prediction; shift prediction scores and input ids by one
+ shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
+ labels = labels[:, 1:].contiguous()
+ loss_fct = CrossEntropyLoss()
+ lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ output = (prediction_scores,) + outputs[2:]
+ return ((lm_loss,) + output) if lm_loss is not None else output
+
+ return CausalLMOutputWithCrossAttentions(
+ loss=lm_loss,
+ logits=prediction_scores,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ cross_attentions=outputs.cross_attentions,
+ )
+
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs):
+ input_shape = input_ids.shape
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
+ if attention_mask is None:
+ attention_mask = input_ids.new_ones(input_shape)
+
+ # cut decoder_input_ids if past_key_values is used
+ if past_key_values is not None:
+ past_length = past_key_values[0][0].shape[2]
+
+ # Some generation methods already pass only the last input ID
+ if input_ids.shape[1] > past_length:
+ remove_prefix_length = past_length
+ else:
+ # Default to old behavior: keep only final ID
+ remove_prefix_length = input_ids.shape[1] - 1
+
+ input_ids = input_ids[:, remove_prefix_length:]
+
+ return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values}
+
+ def _reorder_cache(self, past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
+ )
+ return reordered_past
+
+
+@add_start_docstrings(
+ """XLM-RoBERTa Model with a `language modeling` head on top.""",
+ XLM_ROBERTA_START_DOCSTRING,
+)
+# Copied from transformers.models.roberta.modeling_roberta.RobertaForMaskedLM with Roberta->XLMRoberta, ROBERTA->XLM_ROBERTA
+class XLMRobertaForMaskedLM(XLMRobertaPreTrainedModel):
+ _tied_weights_keys = ["lm_head.decoder.weight", "lm_head.decoder.bias"]
+
+ def __init__(self, config):
+ super().__init__(config)
+
+ if config.is_decoder:
+ logger.warning(
+ "If you want to use `XLMRobertaForMaskedLM` make sure `config.is_decoder=False` for "
+ "bi-directional self-attention."
+ )
+
+ self.roberta = XLMRobertaModel(config, add_pooling_layer=False)
+ self.lm_head = XLMRobertaLMHead(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_output_embeddings(self):
+ return self.lm_head.decoder
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head.decoder = new_embeddings
+
+ @add_start_docstrings_to_model_forward(XLM_ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=MaskedLMOutput,
+ config_class=_CONFIG_FOR_DOC,
+ mask="",
+ expected_output="' Paris'",
+ expected_loss=0.1,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+ kwargs (`Dict[str, any]`, optional, defaults to *{}*):
+ Used to hide legacy arguments that have been deprecated.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.roberta(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = outputs[0]
+ prediction_scores = self.lm_head(sequence_output)
+
+ masked_lm_loss = None
+ if labels is not None:
+ # move labels to correct device to enable model parallelism
+ labels = labels.to(prediction_scores.device)
+ loss_fct = CrossEntropyLoss()
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ output = (prediction_scores,) + outputs[2:]
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
+
+ return MaskedLMOutput(
+ loss=masked_lm_loss,
+ logits=prediction_scores,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+# Copied from transformers.models.roberta.modeling_roberta.RobertaLMHead
+class XLMRobertaLMHead(nn.Module):
+ """Roberta Head for masked language modeling."""
+
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size)
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
+ self.decoder.bias = self.bias
+
+ def forward(self, features, **kwargs):
+ x = self.dense(features)
+ x = gelu(x)
+ x = self.layer_norm(x)
+
+ # project back to size of vocabulary with bias
+ x = self.decoder(x)
+
+ return x
+
+ def _tie_weights(self):
+ # To tie those two weights if they get disconnected (on TPU or when the bias is resized)
+ # For accelerate compatibility and to not break backward compatibility
+ if self.decoder.bias.device.type == "meta":
+ self.decoder.bias = self.bias
+ else:
+ self.bias = self.decoder.bias
+
+
+@add_start_docstrings(
+ """
+ XLM-RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the
+ pooled output) e.g. for GLUE tasks.
+ """,
+ XLM_ROBERTA_START_DOCSTRING,
+)
+# Copied from transformers.models.roberta.modeling_roberta.RobertaForSequenceClassification with Roberta->XLMRoberta, ROBERTA->XLM_ROBERTA
+class XLMRobertaForSequenceClassification(XLMRobertaPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.config = config
+
+ self.roberta = XLMRobertaModel(config, add_pooling_layer=False)
+ self.classifier = XLMRobertaClassificationHead(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(XLM_ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint="cardiffnlp/twitter-roberta-base-emotion",
+ output_type=SequenceClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output="'optimism'",
+ expected_loss=0.08,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.roberta(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = outputs[0]
+ logits = self.classifier(sequence_output)
+
+ loss = None
+ if labels is not None:
+ # move labels to correct device to enable model parallelism
+ labels = labels.to(logits.device)
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ XLM-RoBERTa Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
+ a softmax) e.g. for RocStories/SWAG tasks.
+ """,
+ XLM_ROBERTA_START_DOCSTRING,
+)
+# Copied from transformers.models.roberta.modeling_roberta.RobertaForMultipleChoice with Roberta->XLMRoberta, ROBERTA->XLM_ROBERTA
+class XLMRobertaForMultipleChoice(XLMRobertaPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.roberta = XLMRobertaModel(config)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ self.classifier = nn.Linear(config.hidden_size, 1)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(
+ XLM_ROBERTA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
+ )
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=MultipleChoiceModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
+ `input_ids` above)
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
+
+ flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
+ flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
+ flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
+ flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
+ flat_inputs_embeds = (
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
+ if inputs_embeds is not None
+ else None
+ )
+
+ outputs = self.roberta(
+ flat_input_ids,
+ position_ids=flat_position_ids,
+ token_type_ids=flat_token_type_ids,
+ attention_mask=flat_attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=flat_inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ pooled_output = outputs[1]
+
+ pooled_output = self.dropout(pooled_output)
+ logits = self.classifier(pooled_output)
+ reshaped_logits = logits.view(-1, num_choices)
+
+ loss = None
+ if labels is not None:
+ # move labels to correct device to enable model parallelism
+ labels = labels.to(reshaped_logits.device)
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(reshaped_logits, labels)
+
+ if not return_dict:
+ output = (reshaped_logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return MultipleChoiceModelOutput(
+ loss=loss,
+ logits=reshaped_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ XLM-RoBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
+ for Named-Entity-Recognition (NER) tasks.
+ """,
+ XLM_ROBERTA_START_DOCSTRING,
+)
+# Copied from transformers.models.roberta.modeling_roberta.RobertaForTokenClassification with Roberta->XLMRoberta, ROBERTA->XLM_ROBERTA
+class XLMRobertaForTokenClassification(XLMRobertaPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.roberta = XLMRobertaModel(config, add_pooling_layer=False)
+ classifier_dropout = (
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
+ )
+ self.dropout = nn.Dropout(classifier_dropout)
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(XLM_ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint="Jean-Baptiste/roberta-large-ner-english",
+ output_type=TokenClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output="['O', 'ORG', 'ORG', 'O', 'O', 'O', 'O', 'O', 'LOC', 'O', 'LOC', 'LOC']",
+ expected_loss=0.01,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.roberta(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ sequence_output = self.dropout(sequence_output)
+ logits = self.classifier(sequence_output)
+
+ loss = None
+ if labels is not None:
+ # move labels to correct device to enable model parallelism
+ labels = labels.to(logits.device)
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TokenClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+# Copied from transformers.models.roberta.modeling_roberta.RobertaClassificationHead with Roberta->XLMRoberta
+class XLMRobertaClassificationHead(nn.Module):
+ """Head for sentence-level classification tasks."""
+
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ classifier_dropout = (
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
+ )
+ self.dropout = nn.Dropout(classifier_dropout)
+ self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
+
+ def forward(self, features, **kwargs):
+ x = features[:, 0, :] # take token (equiv. to [CLS])
+ x = self.dropout(x)
+ x = self.dense(x)
+ x = torch.tanh(x)
+ x = self.dropout(x)
+ x = self.out_proj(x)
+ return x
+
+
+@add_start_docstrings(
+ """
+ XLM-RoBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
+ linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ XLM_ROBERTA_START_DOCSTRING,
+)
+# Copied from transformers.models.roberta.modeling_roberta.RobertaForQuestionAnswering with Roberta->XLMRoberta, ROBERTA->XLM_ROBERTA
+class XLMRobertaForQuestionAnswering(XLMRobertaPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.roberta = XLMRobertaModel(config, add_pooling_layer=False)
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(XLM_ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint="deepset/roberta-base-squad2",
+ output_type=QuestionAnsweringModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output="' puppet'",
+ expected_loss=0.86,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ start_positions: Optional[torch.LongTensor] = None,
+ end_positions: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]:
+ r"""
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.roberta(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ logits = self.qa_outputs(sequence_output)
+ start_logits, end_logits = logits.split(1, dim=-1)
+ start_logits = start_logits.squeeze(-1).contiguous()
+ end_logits = end_logits.squeeze(-1).contiguous()
+
+ total_loss = None
+ if start_positions is not None and end_positions is not None:
+ # If we are on multi-GPU, split add a dimension
+ if len(start_positions.size()) > 1:
+ start_positions = start_positions.squeeze(-1)
+ if len(end_positions.size()) > 1:
+ end_positions = end_positions.squeeze(-1)
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
+ ignored_index = start_logits.size(1)
+ start_positions = start_positions.clamp(0, ignored_index)
+ end_positions = end_positions.clamp(0, ignored_index)
+
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
+ start_loss = loss_fct(start_logits, start_positions)
+ end_loss = loss_fct(end_logits, end_positions)
+ total_loss = (start_loss + end_loss) / 2
+
+ if not return_dict:
+ output = (start_logits, end_logits) + outputs[2:]
+ return ((total_loss,) + output) if total_loss is not None else output
+
+ return QuestionAnsweringModelOutput(
+ loss=total_loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+# Copied from transformers.models.roberta.modeling_roberta.create_position_ids_from_input_ids
+def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
+ """
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
+ are ignored. This is modified from fairseq's `utils.make_positions`.
+
+ Args:
+ x: torch.Tensor x:
+
+ Returns: torch.Tensor
+ """
+ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
+ mask = input_ids.ne(padding_idx).int()
+ incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
+ return incremental_indices.long() + padding_idx
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/xlm_roberta/tokenization_xlm_roberta.py b/llava_next/lib/python3.10/site-packages/transformers/models/xlm_roberta/tokenization_xlm_roberta.py
new file mode 100644
index 0000000000000000000000000000000000000000..f704d136faee5fb5b4348c15a1af7637f18ec1fe
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/transformers/models/xlm_roberta/tokenization_xlm_roberta.py
@@ -0,0 +1,327 @@
+# coding=utf-8
+# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+""" Tokenization classes for XLM-RoBERTa model."""
+
+
+import os
+from shutil import copyfile
+from typing import Any, Dict, List, Optional, Tuple
+
+import sentencepiece as spm
+
+from ...tokenization_utils import AddedToken, PreTrainedTokenizer
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+SPIECE_UNDERLINE = "▁"
+
+VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model"}
+
+PRETRAINED_VOCAB_FILES_MAP = {
+ "vocab_file": {
+ "xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
+ "xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
+ "xlm-roberta-large-finetuned-conll02-dutch": (
+ "https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
+ ),
+ "xlm-roberta-large-finetuned-conll02-spanish": (
+ "https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
+ ),
+ "xlm-roberta-large-finetuned-conll03-english": (
+ "https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
+ ),
+ "xlm-roberta-large-finetuned-conll03-german": (
+ "https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
+ ),
+ }
+}
+
+PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
+ "xlm-roberta-base": 512,
+ "xlm-roberta-large": 512,
+ "xlm-roberta-large-finetuned-conll02-dutch": 512,
+ "xlm-roberta-large-finetuned-conll02-spanish": 512,
+ "xlm-roberta-large-finetuned-conll03-english": 512,
+ "xlm-roberta-large-finetuned-conll03-german": 512,
+}
+
+
+class XLMRobertaTokenizer(PreTrainedTokenizer):
+ """
+ Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on
+ [SentencePiece](https://github.com/google/sentencepiece).
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
+ this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ Path to the vocabulary file.
+ bos_token (`str`, *optional*, defaults to `""`):
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
+ sequence. The token used is the `cls_token`.
+
+
+
+ eos_token (`str`, *optional*, defaults to `""`):
+ The end of sequence token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
+ The token used is the `sep_token`.
+
+
+
+ sep_token (`str`, *optional*, defaults to `""`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ cls_token (`str`, *optional*, defaults to `""`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ pad_token (`str`, *optional*, defaults to `""`):
+ The token used for padding, for example when batching sequences of different lengths.
+ mask_token (`str`, *optional*, defaults to `""`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ sp_model_kwargs (`dict`, *optional*):
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
+ to set:
+
+ - `enable_sampling`: Enable subword regularization.
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
+
+ - `nbest_size = {0,1}`: No sampling is performed.
+ - `nbest_size > 1`: samples from the nbest_size results.
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
+ using forward-filtering-and-backward-sampling algorithm.
+
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
+ BPE-dropout.
+
+ Attributes:
+ sp_model (`SentencePieceProcessor`):
+ The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
+ model_input_names = ["input_ids", "attention_mask"]
+
+ def __init__(
+ self,
+ vocab_file,
+ bos_token="",
+ eos_token="",
+ sep_token="",
+ cls_token="",
+ unk_token="",
+ pad_token="",
+ mask_token="",
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
+ **kwargs,
+ ) -> None:
+ # Mask token behave like a normal word, i.e. include the space before it
+ mask_token = AddedToken(mask_token, lstrip=True, special=True) if isinstance(mask_token, str) else mask_token
+
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
+
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.Load(str(vocab_file))
+ self.vocab_file = vocab_file
+
+ # Original fairseq vocab and spm vocab must be "aligned":
+ # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
+ # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
+ # fairseq | '' | '' | '' | '' | ',' | '.' | '▁' | 's' | '▁de' | '-'
+ # spm | '' | '' | '' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
+
+ # Mimic fairseq token-to-id alignment for the first 4 token
+ self.fairseq_tokens_to_ids = {"": 0, "": 1, "": 2, "": 3}
+
+ # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
+ self.fairseq_offset = 1
+
+ self.fairseq_tokens_to_ids[""] = len(self.sp_model) + self.fairseq_offset
+ self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
+
+ super().__init__(
+ bos_token=bos_token,
+ eos_token=eos_token,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ cls_token=cls_token,
+ pad_token=pad_token,
+ mask_token=mask_token,
+ sp_model_kwargs=self.sp_model_kwargs,
+ **kwargs,
+ )
+
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ state["sp_model"] = None
+ state["sp_model_proto"] = self.sp_model.serialized_model_proto()
+ return state
+
+ def __setstate__(self, d):
+ self.__dict__ = d
+
+ # for backward compatibility
+ if not hasattr(self, "sp_model_kwargs"):
+ self.sp_model_kwargs = {}
+
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
+
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. An XLM-RoBERTa sequence has the following format:
+
+ - single sequence: ` X `
+ - pair of sequences: ` A B `
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+
+ if token_ids_1 is None:
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
+ cls = [self.cls_token_id]
+ sep = [self.sep_token_id]
+ return cls + token_ids_0 + sep + sep + token_ids_1 + sep
+
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ if token_ids_1 is None:
+ return [1] + ([0] * len(token_ids_0)) + [1]
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does
+ not make use of token type ids, therefore a list of zeros is returned.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of zeros.
+
+ """
+
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
+
+ @property
+ def vocab_size(self):
+ return len(self.sp_model) + self.fairseq_offset + 1 # Add the token
+
+ def get_vocab(self):
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
+ vocab.update(self.added_tokens_encoder)
+ return vocab
+
+ def _tokenize(self, text: str) -> List[str]:
+ # TODO check if the t5/llama PR also applies here
+ return self.sp_model.encode(text, out_type=str)
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ if token in self.fairseq_tokens_to_ids:
+ return self.fairseq_tokens_to_ids[token]
+ spm_id = self.sp_model.PieceToId(token)
+
+ # Need to return unknown token if the SP model returned 0
+ return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ if index in self.fairseq_ids_to_tokens:
+ return self.fairseq_ids_to_tokens[index]
+ return self.sp_model.IdToPiece(index - self.fairseq_offset)
+
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (strings for sub-words) in a single string."""
+ out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
+ return out_string
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ out_vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
+ copyfile(self.vocab_file, out_vocab_file)
+ elif not os.path.isfile(self.vocab_file):
+ with open(out_vocab_file, "wb") as fi:
+ content_spiece_model = self.sp_model.serialized_model_proto()
+ fi.write(content_spiece_model)
+
+ return (out_vocab_file,)
diff --git a/llava_next/lib/python3.10/site-packages/transformers/models/xlm_roberta/tokenization_xlm_roberta_fast.py b/llava_next/lib/python3.10/site-packages/transformers/models/xlm_roberta/tokenization_xlm_roberta_fast.py
new file mode 100644
index 0000000000000000000000000000000000000000..41079e29d8ca8ba3d22711c0e0666551e24ff16b
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/transformers/models/xlm_roberta/tokenization_xlm_roberta_fast.py
@@ -0,0 +1,242 @@
+# coding=utf-8
+# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+""" Tokenization classes for XLM-RoBERTa model."""
+
+
+import os
+from shutil import copyfile
+from typing import List, Optional, Tuple
+
+from ...tokenization_utils import AddedToken
+from ...tokenization_utils_fast import PreTrainedTokenizerFast
+from ...utils import is_sentencepiece_available, logging
+
+
+if is_sentencepiece_available():
+ from .tokenization_xlm_roberta import XLMRobertaTokenizer
+else:
+ XLMRobertaTokenizer = None
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
+
+PRETRAINED_VOCAB_FILES_MAP = {
+ "vocab_file": {
+ "xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
+ "xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
+ "xlm-roberta-large-finetuned-conll02-dutch": (
+ "https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
+ ),
+ "xlm-roberta-large-finetuned-conll02-spanish": (
+ "https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
+ ),
+ "xlm-roberta-large-finetuned-conll03-english": (
+ "https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
+ ),
+ "xlm-roberta-large-finetuned-conll03-german": (
+ "https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
+ ),
+ },
+ "tokenizer_file": {
+ "xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/tokenizer.json",
+ "xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/tokenizer.json",
+ "xlm-roberta-large-finetuned-conll02-dutch": (
+ "https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/tokenizer.json"
+ ),
+ "xlm-roberta-large-finetuned-conll02-spanish": (
+ "https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/tokenizer.json"
+ ),
+ "xlm-roberta-large-finetuned-conll03-english": (
+ "https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/tokenizer.json"
+ ),
+ "xlm-roberta-large-finetuned-conll03-german": (
+ "https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/tokenizer.json"
+ ),
+ },
+}
+
+PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
+ "xlm-roberta-base": 512,
+ "xlm-roberta-large": 512,
+ "xlm-roberta-large-finetuned-conll02-dutch": 512,
+ "xlm-roberta-large-finetuned-conll02-spanish": 512,
+ "xlm-roberta-large-finetuned-conll03-english": 512,
+ "xlm-roberta-large-finetuned-conll03-german": 512,
+}
+
+
+class XLMRobertaTokenizerFast(PreTrainedTokenizerFast):
+ """
+ Construct a "fast" XLM-RoBERTa tokenizer (backed by HuggingFace's *tokenizers* library). Adapted from
+ [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on
+ [BPE](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models).
+
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
+ refer to this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ Path to the vocabulary file.
+ bos_token (`str`, *optional*, defaults to `""`):
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
+ sequence. The token used is the `cls_token`.
+
+
+
+ eos_token (`str`, *optional*, defaults to `""`):
+ The end of sequence token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
+ The token used is the `sep_token`.
+
+
+
+ sep_token (`str`, *optional*, defaults to `""`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ cls_token (`str`, *optional*, defaults to `""`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ pad_token (`str`, *optional*, defaults to `""`):
+ The token used for padding, for example when batching sequences of different lengths.
+ mask_token (`str`, *optional*, defaults to `""`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ additional_special_tokens (`List[str]`, *optional*, defaults to `["NOTUSED", "NOTUSED"]`):
+ Additional special tokens used by the tokenizer.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
+ model_input_names = ["input_ids", "attention_mask"]
+ slow_tokenizer_class = XLMRobertaTokenizer
+
+ def __init__(
+ self,
+ vocab_file=None,
+ tokenizer_file=None,
+ bos_token="",
+ eos_token="",
+ sep_token="",
+ cls_token="",
+ unk_token="",
+ pad_token="",
+ mask_token="",
+ **kwargs,
+ ):
+ # Mask token behave like a normal word, i.e. include the space before it
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
+
+ super().__init__(
+ vocab_file,
+ tokenizer_file=tokenizer_file,
+ bos_token=bos_token,
+ eos_token=eos_token,
+ sep_token=sep_token,
+ cls_token=cls_token,
+ unk_token=unk_token,
+ pad_token=pad_token,
+ mask_token=mask_token,
+ **kwargs,
+ )
+
+ self.vocab_file = vocab_file
+
+ @property
+ def can_save_slow_tokenizer(self) -> bool:
+ return os.path.isfile(self.vocab_file) if self.vocab_file else False
+
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. An XLM-RoBERTa sequence has the following format:
+
+ - single sequence: ` X `
+ - pair of sequences: ` A B `
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+
+ if token_ids_1 is None:
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
+ cls = [self.cls_token_id]
+ sep = [self.sep_token_id]
+ return cls + token_ids_0 + sep + sep + token_ids_1 + sep
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does
+ not make use of token type ids, therefore a list of zeros is returned.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of zeros.
+
+ """
+
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if not self.can_save_slow_tokenizer:
+ raise ValueError(
+ "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
+ "tokenizer."
+ )
+
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory.")
+ return
+ out_vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
+ copyfile(self.vocab_file, out_vocab_file)
+
+ return (out_vocab_file,)
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/commands/__init__.py b/vlmpy310/lib/python3.10/site-packages/transformers/commands/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..aa5d95a85b538171ec9cf4fa16e892df1efdef6b
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/commands/__init__.py
@@ -0,0 +1,27 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from abc import ABC, abstractmethod
+from argparse import ArgumentParser
+
+
+class BaseTransformersCLICommand(ABC):
+ @staticmethod
+ @abstractmethod
+ def register_subcommand(parser: ArgumentParser):
+ raise NotImplementedError()
+
+ @abstractmethod
+ def run(self):
+ raise NotImplementedError()
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/commands/convert.py b/vlmpy310/lib/python3.10/site-packages/transformers/commands/convert.py
new file mode 100644
index 0000000000000000000000000000000000000000..77df8ea1106439f1c5418c9c3c4ab2c06314c42a
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/commands/convert.py
@@ -0,0 +1,165 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from argparse import ArgumentParser, Namespace
+
+from ..utils import logging
+from . import BaseTransformersCLICommand
+
+
+def convert_command_factory(args: Namespace):
+ """
+ Factory function used to convert a model TF 1.0 checkpoint in a PyTorch checkpoint.
+
+ Returns: ServeCommand
+ """
+ return ConvertCommand(
+ args.model_type, args.tf_checkpoint, args.pytorch_dump_output, args.config, args.finetuning_task_name
+ )
+
+
+IMPORT_ERROR_MESSAGE = """
+transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
+TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
+"""
+
+
+class ConvertCommand(BaseTransformersCLICommand):
+ @staticmethod
+ def register_subcommand(parser: ArgumentParser):
+ """
+ Register this command to argparse so it's available for the transformer-cli
+
+ Args:
+ parser: Root parser to register command-specific arguments
+ """
+ train_parser = parser.add_parser(
+ "convert",
+ help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.",
+ )
+ train_parser.add_argument("--model_type", type=str, required=True, help="Model's type.")
+ train_parser.add_argument(
+ "--tf_checkpoint", type=str, required=True, help="TensorFlow checkpoint path or folder."
+ )
+ train_parser.add_argument(
+ "--pytorch_dump_output", type=str, required=True, help="Path to the PyTorch saved model output."
+ )
+ train_parser.add_argument("--config", type=str, default="", help="Configuration file path or folder.")
+ train_parser.add_argument(
+ "--finetuning_task_name",
+ type=str,
+ default=None,
+ help="Optional fine-tuning task name if the TF model was a finetuned model.",
+ )
+ train_parser.set_defaults(func=convert_command_factory)
+
+ def __init__(
+ self,
+ model_type: str,
+ tf_checkpoint: str,
+ pytorch_dump_output: str,
+ config: str,
+ finetuning_task_name: str,
+ *args,
+ ):
+ self._logger = logging.get_logger("transformers-cli/converting")
+
+ self._logger.info(f"Loading model {model_type}")
+ self._model_type = model_type
+ self._tf_checkpoint = tf_checkpoint
+ self._pytorch_dump_output = pytorch_dump_output
+ self._config = config
+ self._finetuning_task_name = finetuning_task_name
+
+ def run(self):
+ if self._model_type == "albert":
+ try:
+ from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
+ convert_tf_checkpoint_to_pytorch,
+ )
+ except ImportError:
+ raise ImportError(IMPORT_ERROR_MESSAGE)
+
+ convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
+ elif self._model_type == "bert":
+ try:
+ from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
+ convert_tf_checkpoint_to_pytorch,
+ )
+ except ImportError:
+ raise ImportError(IMPORT_ERROR_MESSAGE)
+
+ convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
+ elif self._model_type == "funnel":
+ try:
+ from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
+ convert_tf_checkpoint_to_pytorch,
+ )
+ except ImportError:
+ raise ImportError(IMPORT_ERROR_MESSAGE)
+
+ convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
+ elif self._model_type == "t5":
+ try:
+ from ..models.t5.convert_t5_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
+ except ImportError:
+ raise ImportError(IMPORT_ERROR_MESSAGE)
+
+ convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
+ elif self._model_type == "gpt":
+ from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
+ convert_openai_checkpoint_to_pytorch,
+ )
+
+ convert_openai_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
+ elif self._model_type == "gpt2":
+ try:
+ from ..models.gpt2.convert_gpt2_original_tf_checkpoint_to_pytorch import (
+ convert_gpt2_checkpoint_to_pytorch,
+ )
+ except ImportError:
+ raise ImportError(IMPORT_ERROR_MESSAGE)
+
+ convert_gpt2_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
+ elif self._model_type == "xlnet":
+ try:
+ from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
+ convert_xlnet_checkpoint_to_pytorch,
+ )
+ except ImportError:
+ raise ImportError(IMPORT_ERROR_MESSAGE)
+
+ convert_xlnet_checkpoint_to_pytorch(
+ self._tf_checkpoint, self._config, self._pytorch_dump_output, self._finetuning_task_name
+ )
+ elif self._model_type == "xlm":
+ from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
+ convert_xlm_checkpoint_to_pytorch,
+ )
+
+ convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output)
+ elif self._model_type == "lxmert":
+ from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
+ convert_lxmert_checkpoint_to_pytorch,
+ )
+
+ convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output)
+ elif self._model_type == "rembert":
+ from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
+ convert_rembert_tf_checkpoint_to_pytorch,
+ )
+
+ convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
+ else:
+ raise ValueError("--model_type should be selected in the list [bert, gpt, gpt2, t5, xlnet, xlm, lxmert]")
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/commands/env.py b/vlmpy310/lib/python3.10/site-packages/transformers/commands/env.py
new file mode 100644
index 0000000000000000000000000000000000000000..80d8b05e04e0a3e928fbc85d8fd75f5d54b5828d
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/commands/env.py
@@ -0,0 +1,152 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import importlib.util
+import os
+import platform
+from argparse import ArgumentParser
+
+import huggingface_hub
+
+from .. import __version__ as version
+from ..utils import (
+ is_accelerate_available,
+ is_flax_available,
+ is_safetensors_available,
+ is_tf_available,
+ is_torch_available,
+ is_torch_npu_available,
+)
+from . import BaseTransformersCLICommand
+
+
+def info_command_factory(_):
+ return EnvironmentCommand()
+
+
+def download_command_factory(args):
+ return EnvironmentCommand(args.accelerate_config_file)
+
+
+class EnvironmentCommand(BaseTransformersCLICommand):
+ @staticmethod
+ def register_subcommand(parser: ArgumentParser):
+ download_parser = parser.add_parser("env")
+ download_parser.set_defaults(func=info_command_factory)
+ download_parser.add_argument(
+ "--accelerate-config_file",
+ default=None,
+ help="The accelerate config file to use for the default values in the launching script.",
+ )
+ download_parser.set_defaults(func=download_command_factory)
+
+ def __init__(self, accelerate_config_file, *args) -> None:
+ self._accelerate_config_file = accelerate_config_file
+
+ def run(self):
+ safetensors_version = "not installed"
+ if is_safetensors_available():
+ import safetensors
+
+ safetensors_version = safetensors.__version__
+ elif importlib.util.find_spec("safetensors") is not None:
+ import safetensors
+
+ safetensors_version = f"{safetensors.__version__} but is ignored because of PyTorch version too old."
+
+ accelerate_version = "not installed"
+ accelerate_config = accelerate_config_str = "not found"
+ if is_accelerate_available():
+ import accelerate
+ from accelerate.commands.config import default_config_file, load_config_from_file
+
+ accelerate_version = accelerate.__version__
+ # Get the default from the config file.
+ if self._accelerate_config_file is not None or os.path.isfile(default_config_file):
+ accelerate_config = load_config_from_file(self._accelerate_config_file).to_dict()
+
+ accelerate_config_str = (
+ "\n".join([f"\t- {prop}: {val}" for prop, val in accelerate_config.items()])
+ if isinstance(accelerate_config, dict)
+ else f"\t{accelerate_config}"
+ )
+
+ pt_version = "not installed"
+ pt_cuda_available = "NA"
+ if is_torch_available():
+ import torch
+
+ pt_version = torch.__version__
+ pt_cuda_available = torch.cuda.is_available()
+ pt_npu_available = is_torch_npu_available()
+
+ tf_version = "not installed"
+ tf_cuda_available = "NA"
+ if is_tf_available():
+ import tensorflow as tf
+
+ tf_version = tf.__version__
+ try:
+ # deprecated in v2.1
+ tf_cuda_available = tf.test.is_gpu_available()
+ except AttributeError:
+ # returns list of devices, convert to bool
+ tf_cuda_available = bool(tf.config.list_physical_devices("GPU"))
+
+ flax_version = "not installed"
+ jax_version = "not installed"
+ jaxlib_version = "not installed"
+ jax_backend = "NA"
+ if is_flax_available():
+ import flax
+ import jax
+ import jaxlib
+
+ flax_version = flax.__version__
+ jax_version = jax.__version__
+ jaxlib_version = jaxlib.__version__
+ jax_backend = jax.lib.xla_bridge.get_backend().platform
+
+ info = {
+ "`transformers` version": version,
+ "Platform": platform.platform(),
+ "Python version": platform.python_version(),
+ "Huggingface_hub version": huggingface_hub.__version__,
+ "Safetensors version": f"{safetensors_version}",
+ "Accelerate version": f"{accelerate_version}",
+ "Accelerate config": f"{accelerate_config_str}",
+ "PyTorch version (GPU?)": f"{pt_version} ({pt_cuda_available})",
+ "Tensorflow version (GPU?)": f"{tf_version} ({tf_cuda_available})",
+ "Flax version (CPU?/GPU?/TPU?)": f"{flax_version} ({jax_backend})",
+ "Jax version": f"{jax_version}",
+ "JaxLib version": f"{jaxlib_version}",
+ "Using distributed or parallel set-up in script?": "",
+ }
+ if is_torch_available():
+ if pt_cuda_available:
+ info["Using GPU in script?"] = ""
+ info["GPU type"] = torch.cuda.get_device_name()
+ elif pt_npu_available:
+ info["Using NPU in script?"] = ""
+ info["NPU type"] = torch.npu.get_device_name()
+ info["CANN version"] = torch.version.cann
+
+ print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n")
+ print(self.format_dict(info))
+
+ return info
+
+ @staticmethod
+ def format_dict(d):
+ return "\n".join([f"- {prop}: {val}" for prop, val in d.items()]) + "\n"
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/commands/run.py b/vlmpy310/lib/python3.10/site-packages/transformers/commands/run.py
new file mode 100644
index 0000000000000000000000000000000000000000..dbf067ae4d95088a1e3a46deb02825ebe0d147d8
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/commands/run.py
@@ -0,0 +1,110 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from argparse import ArgumentParser
+
+from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
+from ..utils import logging
+from . import BaseTransformersCLICommand
+
+
+logger = logging.get_logger(__name__) # pylint: disable=invalid-name
+
+
+def try_infer_format_from_ext(path: str):
+ if not path:
+ return "pipe"
+
+ for ext in PipelineDataFormat.SUPPORTED_FORMATS:
+ if path.endswith(ext):
+ return ext
+
+ raise Exception(
+ f"Unable to determine file format from file extension {path}. "
+ f"Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}"
+ )
+
+
+def run_command_factory(args):
+ nlp = pipeline(
+ task=args.task,
+ model=args.model if args.model else None,
+ config=args.config,
+ tokenizer=args.tokenizer,
+ device=args.device,
+ )
+ format = try_infer_format_from_ext(args.input) if args.format == "infer" else args.format
+ reader = PipelineDataFormat.from_str(
+ format=format,
+ output_path=args.output,
+ input_path=args.input,
+ column=args.column if args.column else nlp.default_input_names,
+ overwrite=args.overwrite,
+ )
+ return RunCommand(nlp, reader)
+
+
+class RunCommand(BaseTransformersCLICommand):
+ def __init__(self, nlp: Pipeline, reader: PipelineDataFormat):
+ self._nlp = nlp
+ self._reader = reader
+
+ @staticmethod
+ def register_subcommand(parser: ArgumentParser):
+ run_parser = parser.add_parser("run", help="Run a pipeline through the CLI")
+ run_parser.add_argument("--task", choices=get_supported_tasks(), help="Task to run")
+ run_parser.add_argument("--input", type=str, help="Path to the file to use for inference")
+ run_parser.add_argument("--output", type=str, help="Path to the file that will be used post to write results.")
+ run_parser.add_argument("--model", type=str, help="Name or path to the model to instantiate.")
+ run_parser.add_argument("--config", type=str, help="Name or path to the model's config to instantiate.")
+ run_parser.add_argument(
+ "--tokenizer", type=str, help="Name of the tokenizer to use. (default: same as the model name)"
+ )
+ run_parser.add_argument(
+ "--column",
+ type=str,
+ help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)",
+ )
+ run_parser.add_argument(
+ "--format",
+ type=str,
+ default="infer",
+ choices=PipelineDataFormat.SUPPORTED_FORMATS,
+ help="Input format to read from",
+ )
+ run_parser.add_argument(
+ "--device",
+ type=int,
+ default=-1,
+ help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)",
+ )
+ run_parser.add_argument("--overwrite", action="store_true", help="Allow overwriting the output file.")
+ run_parser.set_defaults(func=run_command_factory)
+
+ def run(self):
+ nlp, outputs = self._nlp, []
+
+ for entry in self._reader:
+ output = nlp(**entry) if self._reader.is_multi_columns else nlp(entry)
+ if isinstance(output, dict):
+ outputs.append(output)
+ else:
+ outputs += output
+
+ # Saving data
+ if self._nlp.binary_output:
+ binary_path = self._reader.save_binary(outputs)
+ logger.warning(f"Current pipeline requires output to be in binary format, saving at {binary_path}")
+ else:
+ self._reader.save(outputs)
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/commands/serving.py b/vlmpy310/lib/python3.10/site-packages/transformers/commands/serving.py
new file mode 100644
index 0000000000000000000000000000000000000000..ea4276d9b83982612f134e39799295fd039c36d0
--- /dev/null
+++ b/vlmpy310/lib/python3.10/site-packages/transformers/commands/serving.py
@@ -0,0 +1,228 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from argparse import ArgumentParser, Namespace
+from typing import Any, List, Optional
+
+from ..pipelines import Pipeline, get_supported_tasks, pipeline
+from ..utils import logging
+from . import BaseTransformersCLICommand
+
+
+try:
+ from fastapi import Body, FastAPI, HTTPException
+ from fastapi.routing import APIRoute
+ from pydantic import BaseModel
+ from starlette.responses import JSONResponse
+ from uvicorn import run
+
+ _serve_dependencies_installed = True
+except (ImportError, AttributeError):
+ BaseModel = object
+
+ def Body(*x, **y):
+ pass
+
+ _serve_dependencies_installed = False
+
+
+logger = logging.get_logger("transformers-cli/serving")
+
+
+def serve_command_factory(args: Namespace):
+ """
+ Factory function used to instantiate serving server from provided command line arguments.
+
+ Returns: ServeCommand
+ """
+ nlp = pipeline(
+ task=args.task,
+ model=args.model if args.model else None,
+ config=args.config,
+ tokenizer=args.tokenizer,
+ device=args.device,
+ )
+ return ServeCommand(nlp, args.host, args.port, args.workers)
+
+
+class ServeModelInfoResult(BaseModel):
+ """
+ Expose model information
+ """
+
+ infos: dict
+
+
+class ServeTokenizeResult(BaseModel):
+ """
+ Tokenize result model
+ """
+
+ tokens: List[str]
+ tokens_ids: Optional[List[int]]
+
+
+class ServeDeTokenizeResult(BaseModel):
+ """
+ DeTokenize result model
+ """
+
+ text: str
+
+
+class ServeForwardResult(BaseModel):
+ """
+ Forward result model
+ """
+
+ output: Any
+
+
+class ServeCommand(BaseTransformersCLICommand):
+ @staticmethod
+ def register_subcommand(parser: ArgumentParser):
+ """
+ Register this command to argparse so it's available for the transformer-cli
+
+ Args:
+ parser: Root parser to register command-specific arguments
+ """
+ serve_parser = parser.add_parser(
+ "serve", help="CLI tool to run inference requests through REST and GraphQL endpoints."
+ )
+ serve_parser.add_argument(
+ "--task",
+ type=str,
+ choices=get_supported_tasks(),
+ help="The task to run the pipeline on",
+ )
+ serve_parser.add_argument("--host", type=str, default="localhost", help="Interface the server will listen on.")
+ serve_parser.add_argument("--port", type=int, default=8888, help="Port the serving will listen to.")
+ serve_parser.add_argument("--workers", type=int, default=1, help="Number of http workers")
+ serve_parser.add_argument("--model", type=str, help="Model's name or path to stored model.")
+ serve_parser.add_argument("--config", type=str, help="Model's config name or path to stored model.")
+ serve_parser.add_argument("--tokenizer", type=str, help="Tokenizer name to use.")
+ serve_parser.add_argument(
+ "--device",
+ type=int,
+ default=-1,
+ help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)",
+ )
+ serve_parser.set_defaults(func=serve_command_factory)
+
+ def __init__(self, pipeline: Pipeline, host: str, port: int, workers: int):
+ self._pipeline = pipeline
+
+ self.host = host
+ self.port = port
+ self.workers = workers
+
+ if not _serve_dependencies_installed:
+ raise RuntimeError(
+ "Using serve command requires FastAPI and uvicorn. "
+ 'Please install transformers with [serving]: pip install "transformers[serving]". '
+ "Or install FastAPI and uvicorn separately."
+ )
+ else:
+ logger.info(f"Serving model over {host}:{port}")
+ self._app = FastAPI(
+ routes=[
+ APIRoute(
+ "/",
+ self.model_info,
+ response_model=ServeModelInfoResult,
+ response_class=JSONResponse,
+ methods=["GET"],
+ ),
+ APIRoute(
+ "/tokenize",
+ self.tokenize,
+ response_model=ServeTokenizeResult,
+ response_class=JSONResponse,
+ methods=["POST"],
+ ),
+ APIRoute(
+ "/detokenize",
+ self.detokenize,
+ response_model=ServeDeTokenizeResult,
+ response_class=JSONResponse,
+ methods=["POST"],
+ ),
+ APIRoute(
+ "/forward",
+ self.forward,
+ response_model=ServeForwardResult,
+ response_class=JSONResponse,
+ methods=["POST"],
+ ),
+ ],
+ timeout=600,
+ )
+
+ def run(self):
+ run(self._app, host=self.host, port=self.port, workers=self.workers)
+
+ def model_info(self):
+ return ServeModelInfoResult(infos=vars(self._pipeline.model.config))
+
+ def tokenize(self, text_input: str = Body(None, embed=True), return_ids: bool = Body(False, embed=True)):
+ """
+ Tokenize the provided input and eventually returns corresponding tokens id: - **text_input**: String to
+ tokenize - **return_ids**: Boolean flags indicating if the tokens have to be converted to their integer
+ mapping.
+ """
+ try:
+ tokens_txt = self._pipeline.tokenizer.tokenize(text_input)
+
+ if return_ids:
+ tokens_ids = self._pipeline.tokenizer.convert_tokens_to_ids(tokens_txt)
+ return ServeTokenizeResult(tokens=tokens_txt, tokens_ids=tokens_ids)
+ else:
+ return ServeTokenizeResult(tokens=tokens_txt)
+
+ except Exception as e:
+ raise HTTPException(status_code=500, detail={"model": "", "error": str(e)})
+
+ def detokenize(
+ self,
+ tokens_ids: List[int] = Body(None, embed=True),
+ skip_special_tokens: bool = Body(False, embed=True),
+ cleanup_tokenization_spaces: bool = Body(True, embed=True),
+ ):
+ """
+ Detokenize the provided tokens ids to readable text: - **tokens_ids**: List of tokens ids -
+ **skip_special_tokens**: Flag indicating to not try to decode special tokens - **cleanup_tokenization_spaces**:
+ Flag indicating to remove all leading/trailing spaces and intermediate ones.
+ """
+ try:
+ decoded_str = self._pipeline.tokenizer.decode(tokens_ids, skip_special_tokens, cleanup_tokenization_spaces)
+ return ServeDeTokenizeResult(model="", text=decoded_str)
+ except Exception as e:
+ raise HTTPException(status_code=500, detail={"model": "", "error": str(e)})
+
+ async def forward(self, inputs=Body(None, embed=True)):
+ """
+ **inputs**: **attention_mask**: **tokens_type_ids**:
+ """
+
+ # Check we don't have empty string
+ if len(inputs) == 0:
+ return ServeForwardResult(output=[], attention=[])
+
+ try:
+ # Forward through the model
+ output = self._pipeline(inputs)
+ return ServeForwardResult(output=output)
+ except Exception as e:
+ raise HTTPException(500, {"error": str(e)})
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/sagemaker/__pycache__/__init__.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/sagemaker/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..32f2d4e6781bc0174c68a953f10cf163553fd29f
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/sagemaker/__pycache__/__init__.cpython-310.pyc differ
diff --git a/vlmpy310/lib/python3.10/site-packages/transformers/sagemaker/__pycache__/trainer_sm.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/transformers/sagemaker/__pycache__/trainer_sm.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e193e532f407b2526ba179727881ffe83a19b994
Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/transformers/sagemaker/__pycache__/trainer_sm.cpython-310.pyc differ