id
int64 0
328k
| repository_name
stringlengths 7
58
| file_path
stringlengths 9
302
| class_name
stringlengths 5
256
| human_written_code
stringlengths 16
2.16M
| class_skeleton
stringlengths 18
1.49M
⌀ | total_program_units
int64 1
1.76k
| total_doc_str
int64 0
771
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
297
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
168
| CountClassBase
float64 0
40
| CountClassCoupled
float64 0
583
| CountClassCoupledModified
float64 0
575
| CountClassDerived
float64 0
5.35k
| CountDeclInstanceMethod
float64 0
529
| CountDeclInstanceVariable
float64 0
296
| CountDeclMethod
float64 0
599
| CountDeclMethodAll
float64 0
1.12k
| CountLine
float64 1
40.4k
| CountLineBlank
float64 0
8.16k
| CountLineCode
float64 1
25.7k
| CountLineCodeDecl
float64 1
8.15k
| CountLineCodeExe
float64 0
24.2k
| CountLineComment
float64 0
16.5k
| CountStmt
float64 1
9.71k
| CountStmtDecl
float64 1
8.15k
| CountStmtExe
float64 0
9.69k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
2.9k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
600
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/aria/processing_aria.py
|
transformers.models.aria.processing_aria.AriaProcessor
|
from typing import Optional, Union
from ...image_processing_utils import BatchFeature
from ..auto import AutoTokenizer
from ...tokenization_utils import PreTokenizedInput, TextInput
import numpy as np
from ...processing_utils import MultiModalData, ProcessingKwargs, ProcessorMixin, Unpack
from ...image_utils import ImageInput
class AriaProcessor(ProcessorMixin):
"""
AriaProcessor is a processor for the Aria model which wraps the Aria image preprocessor and the LLama slow tokenizer.
Args:
image_processor (`AriaImageProcessor`, *optional*):
The AriaImageProcessor to use for image preprocessing.
tokenizer (`PreTrainedTokenizerBase`, *optional*):
An instance of [`PreTrainedTokenizerBase`]. This should correspond with the model's text model. The tokenizer is a required input.
chat_template (`str`, *optional*):
A Jinja template which will be used to convert lists of messages in a chat into a tokenizable string.
size_conversion (`Dict`, *optional*):
A dictionary indicating size conversions for images.
"""
attributes = ['image_processor', 'tokenizer']
image_processor_class = 'AriaImageProcessor'
tokenizer_class = 'AutoTokenizer'
def __init__(self, image_processor=None, tokenizer: Union[AutoTokenizer, str]=None, chat_template: Optional[str]=None, size_conversion: Optional[dict[Union[float, int], int]]=None):
if size_conversion is None:
size_conversion = {490: 128, 980: 256}
self.size_conversion = {int(k): v for k, v in size_conversion.items()}
self.image_token = tokenizer.image_token
self.image_token_id = tokenizer.image_token_id
if tokenizer is not None and tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.unk_token
super().__init__(image_processor, tokenizer, chat_template=chat_template)
def __call__(self, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]], images: Optional[ImageInput]=None, audio=None, videos=None, **kwargs: Unpack[AriaProcessorKwargs]) -> BatchFeature:
"""
Main method to prepare for the model one or several sequences(s) and image(s).
Args:
text (`TextInput`, `PreTokenizedInput`, `list[TextInput]`, `list[PreTokenizedInput]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
images (`ImageInput`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
- **pixel_mask** -- Pixel mask to be fed to a model. Returned when `images` is not `None`.
"""
output_kwargs = self._merge_kwargs(AriaProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs)
if isinstance(text, str):
text = [text]
elif not isinstance(text, list) and (not isinstance(text[0], str)):
raise TypeError('Invalid input text. Please provide a string, or a list of strings')
if images is not None:
image_inputs = self.image_processor(images, **output_kwargs['images_kwargs'])
tokens_per_image = self.size_conversion[image_inputs.pixel_values.shape[2]]
prompt_strings = []
num_crops = image_inputs.pop('num_crops') * tokens_per_image
for sample in text:
sample = sample.replace(self.tokenizer.image_token, self.tokenizer.image_token * num_crops)
prompt_strings.append(sample)
else:
image_inputs = {}
prompt_strings = text
return_tensors = output_kwargs['text_kwargs'].pop('return_tensors', None)
return_mm_token_type_ids = output_kwargs['text_kwargs'].pop('return_mm_token_type_ids', False)
text_inputs = self.tokenizer(prompt_strings, **output_kwargs['text_kwargs'], return_tensors=None)
self._check_special_mm_tokens(prompt_strings, text_inputs, modalities=['image'])
if return_mm_token_type_ids:
array_ids = np.array(text_inputs['input_ids'])
mm_token_type_ids = np.zeros_like(text_inputs['input_ids'])
mm_token_type_ids[array_ids == self.image_token_id] = 1
text_inputs['mm_token_type_ids'] = mm_token_type_ids.tolist()
return BatchFeature(data={**text_inputs, **image_inputs}, tensor_type=return_tensors)
def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs):
"""
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
Args:
image_sizes (`list[list[int]]`, *optional*):
The input sizes formatted as (height, width) per each image.
Returns:
`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
input modalities, along with other useful data.
"""
vision_data = {}
if image_sizes is not None:
images_kwargs = AriaProcessorKwargs._defaults.get('images_kwargs', {})
images_kwargs.update(kwargs)
max_size = images_kwargs.get('max_image_size', None) or self.image_processor.max_image_size
num_image_patches = [self.image_processor.get_number_of_image_patches(*image_size, images_kwargs) for image_size in image_sizes]
num_image_tokens = [self.size_conversion[max_size] * num_patches for num_patches in num_image_patches]
vision_data.update({'num_image_tokens': num_image_tokens, 'num_image_patches': num_image_patches})
return MultiModalData(**vision_data)
@property
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
image_processor_input_names = self.image_processor.model_input_names
image_processor_input_names = [name for name in image_processor_input_names if name != 'num_crops']
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
|
class AriaProcessor(ProcessorMixin):
'''
AriaProcessor is a processor for the Aria model which wraps the Aria image preprocessor and the LLama slow tokenizer.
Args:
image_processor (`AriaImageProcessor`, *optional*):
The AriaImageProcessor to use for image preprocessing.
tokenizer (`PreTrainedTokenizerBase`, *optional*):
An instance of [`PreTrainedTokenizerBase`]. This should correspond with the model's text model. The tokenizer is a required input.
chat_template (`str`, *optional*):
A Jinja template which will be used to convert lists of messages in a chat into a tokenizable string.
size_conversion (`Dict`, *optional*):
A dictionary indicating size conversions for images.
'''
def __init__(self, image_processor=None, tokenizer: Union[AutoTokenizer, str]=None, chat_template: Optional[str]=None, size_conversion: Optional[dict[Union[float, int], int]]=None):
pass
def __call__(self, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]], images: Optional[ImageInput]=None, audio=None, videos=None, **kwargs: Unpack[AriaProcessorKwargs]) -> BatchFeature:
'''
Main method to prepare for the model one or several sequences(s) and image(s).
Args:
text (`TextInput`, `PreTokenizedInput`, `list[TextInput]`, `list[PreTokenizedInput]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
images (`ImageInput`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
- **pixel_mask** -- Pixel mask to be fed to a model. Returned when `images` is not `None`.
'''
pass
def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs):
'''
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
Args:
image_sizes (`list[list[int]]`, *optional*):
The input sizes formatted as (height, width) per each image.
Returns:
`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
input modalities, along with other useful data.
'''
pass
@property
def model_input_names(self):
pass
| 6
| 3
| 19
| 2
| 11
| 6
| 2
| 0.63
| 1
| 10
| 3
| 0
| 5
| 1
| 5
| 22
| 118
| 15
| 63
| 34
| 43
| 40
| 37
| 20
| 31
| 5
| 2
| 2
| 11
|
601
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/aria/processing_aria.py
|
transformers.models.aria.processing_aria.AriaProcessorKwargs
|
from ...processing_utils import MultiModalData, ProcessingKwargs, ProcessorMixin, Unpack
from ...utils import TensorType
class AriaProcessorKwargs(ProcessingKwargs, total=False):
_defaults = {'text_kwargs': {'padding': False, 'return_mm_token_type_ids': False}, 'images_kwargs': {'max_image_size': 980, 'split_image': False}, 'return_tensors': TensorType.PYTORCH}
|
class AriaProcessorKwargs(ProcessingKwargs, total=False):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 11
| 0
| 11
| 2
| 10
| 0
| 2
| 2
| 1
| 0
| 3
| 0
| 0
|
602
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/audio_spectrogram_transformer/configuration_audio_spectrogram_transformer.py
|
transformers.models.audio_spectrogram_transformer.configuration_audio_spectrogram_transformer.ASTConfig
|
from ...configuration_utils import PretrainedConfig
from typing import Any
class ASTConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`ASTModel`]. It is used to instantiate an AST
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the AST
[MIT/ast-finetuned-audioset-10-10-0.4593](https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593)
architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
frequency_stride (`int`, *optional*, defaults to 10):
Frequency stride to use when patchifying the spectrograms.
time_stride (`int`, *optional*, defaults to 10):
Temporal stride to use when patchifying the spectrograms.
max_length (`int`, *optional*, defaults to 1024):
Temporal dimension of the spectrograms.
num_mel_bins (`int`, *optional*, defaults to 128):
Frequency dimension of the spectrograms (number of Mel-frequency bins).
Example:
```python
>>> from transformers import ASTConfig, ASTModel
>>> # Initializing a AST MIT/ast-finetuned-audioset-10-10-0.4593 style configuration
>>> configuration = ASTConfig()
>>> # Initializing a model (with random weights) from the MIT/ast-finetuned-audioset-10-10-0.4593 style configuration
>>> model = ASTModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'audio-spectrogram-transformer'
def __init__(self, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, patch_size=16, qkv_bias=True, frequency_stride=10, time_stride=10, max_length=1024, num_mel_bins=128, **kwargs):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.patch_size = patch_size
self.qkv_bias = qkv_bias
self.frequency_stride = frequency_stride
self.time_stride = time_stride
self.max_length = max_length
self.num_mel_bins = num_mel_bins
def _get_non_default_generation_parameters(self) -> dict[str, Any]:
return {}
|
class ASTConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`ASTModel`]. It is used to instantiate an AST
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the AST
[MIT/ast-finetuned-audioset-10-10-0.4593](https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593)
architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
frequency_stride (`int`, *optional*, defaults to 10):
Frequency stride to use when patchifying the spectrograms.
time_stride (`int`, *optional*, defaults to 10):
Temporal stride to use when patchifying the spectrograms.
max_length (`int`, *optional*, defaults to 1024):
Temporal dimension of the spectrograms.
num_mel_bins (`int`, *optional*, defaults to 128):
Frequency dimension of the spectrograms (number of Mel-frequency bins).
Example:
```python
>>> from transformers import ASTConfig, ASTModel
>>> # Initializing a AST MIT/ast-finetuned-audioset-10-10-0.4593 style configuration
>>> configuration = ASTConfig()
>>> # Initializing a model (with random weights) from the MIT/ast-finetuned-audioset-10-10-0.4593 style configuration
>>> model = ASTModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, patch_size=16, qkv_bias=True, frequency_stride=10, time_stride=10, max_length=1024, num_mel_bins=128, **kwargs):
pass
def _get_non_default_generation_parameters(self) -> dict[str, Any]:
pass
| 3
| 1
| 19
| 1
| 19
| 0
| 1
| 1.36
| 1
| 3
| 0
| 0
| 2
| 15
| 2
| 2
| 103
| 11
| 39
| 37
| 18
| 53
| 21
| 19
| 18
| 1
| 1
| 0
| 2
|
603
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.py
|
transformers.models.audio_spectrogram_transformer.feature_extraction_audio_spectrogram_transformer.ASTFeatureExtractor
|
import numpy as np
from ...utils import TensorType, is_speech_available, is_torch_available, logging
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_utils import BatchFeature
from typing import Optional, Union
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
class ASTFeatureExtractor(SequenceFeatureExtractor):
"""
Constructs a Audio Spectrogram Transformer (AST) feature extractor.
This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains
most of the main methods. Users should refer to this superclass for more information regarding those methods.
This class extracts mel-filter bank features from raw speech using TorchAudio if installed or using numpy
otherwise, pads/truncates them to a fixed length and normalizes them using a mean and standard deviation.
Args:
feature_size (`int`, *optional*, defaults to 1):
The feature dimension of the extracted features.
sampling_rate (`int`, *optional*, defaults to 16000):
The sampling rate at which the audio files should be digitalized expressed in hertz (Hz).
num_mel_bins (`int`, *optional*, defaults to 128):
Number of Mel-frequency bins.
max_length (`int`, *optional*, defaults to 1024):
Maximum length to which to pad/truncate the extracted features.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether or not to normalize the log-Mel features using `mean` and `std`.
mean (`float`, *optional*, defaults to -4.2677393):
The mean value used to normalize the log-Mel features. Uses the AudioSet mean by default.
std (`float`, *optional*, defaults to 4.5689974):
The standard deviation value used to normalize the log-Mel features. Uses the AudioSet standard deviation
by default.
return_attention_mask (`bool`, *optional*, defaults to `False`):
Whether or not [`~ASTFeatureExtractor.__call__`] should return `attention_mask`.
"""
model_input_names = ['input_values', 'attention_mask']
def __init__(self, feature_size=1, sampling_rate=16000, num_mel_bins=128, max_length=1024, padding_value=0.0, do_normalize=True, mean=-4.2677393, std=4.5689974, return_attention_mask=False, **kwargs):
super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs)
self.num_mel_bins = num_mel_bins
self.max_length = max_length
self.do_normalize = do_normalize
self.mean = mean
self.std = std
self.return_attention_mask = return_attention_mask
if not is_speech_available():
mel_filters = mel_filter_bank(num_frequency_bins=257, num_mel_filters=self.num_mel_bins, min_frequency=20, max_frequency=sampling_rate // 2, sampling_rate=sampling_rate, norm=None, mel_scale='kaldi', triangularize_in_mel_space=True)
self.mel_filters = mel_filters
self.window = window_function(400, 'hann', periodic=False)
def _extract_fbank_features(self, waveform: np.ndarray, max_length: int) -> np.ndarray:
"""
Get mel-filter bank features using TorchAudio. Note that TorchAudio requires 16-bit signed integers as inputs
and hence the waveform should not be normalized before feature extraction.
"""
if is_speech_available():
waveform = torch.from_numpy(waveform).unsqueeze(0)
fbank = ta_kaldi.fbank(waveform, sample_frequency=self.sampling_rate, window_type='hanning', num_mel_bins=self.num_mel_bins)
else:
waveform = np.squeeze(waveform)
fbank = spectrogram(waveform, self.window, frame_length=400, hop_length=160, fft_length=512, power=2.0, center=False, preemphasis=0.97, mel_filters=self.mel_filters, log_mel='log', mel_floor=1.192092955078125e-07, remove_dc_offset=True).T
fbank = torch.from_numpy(fbank)
n_frames = fbank.shape[0]
difference = max_length - n_frames
if difference > 0:
pad_module = torch.nn.ZeroPad2d((0, 0, 0, difference))
fbank = pad_module(fbank)
elif difference < 0:
fbank = fbank[0:max_length, :]
fbank = fbank.numpy()
return fbank
def normalize(self, input_values: np.ndarray) -> np.ndarray:
return (input_values - self.mean) / (self.std * 2)
def __call__(self, raw_speech: Union[np.ndarray, list[float], list[np.ndarray], list[list[float]]], sampling_rate: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs) -> BatchFeature:
"""
Main method to featurize and prepare for the model one or several sequence(s).
Args:
raw_speech (`np.ndarray`, `list[float]`, `list[np.ndarray]`, `list[list[float]]`):
The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float
values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not
stereo, i.e. single float per timestep.
sampling_rate (`int`, *optional*):
The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass
`sampling_rate` at the forward call to prevent silent errors.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with {self.sampling_rate} and not {sampling_rate}.')
else:
logger.warning(f'It is strongly recommended to pass the `sampling_rate` argument to `{self.__class__.__name__}()`. Failing to do so can result in silent errors that might be hard to debug.')
is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}')
is_batched = is_batched_numpy or (isinstance(raw_speech, (list, tuple)) and isinstance(raw_speech[0], (np.ndarray, tuple, list)))
if is_batched:
raw_speech = [np.asarray(speech, dtype=np.float32) for speech in raw_speech]
elif not is_batched and (not isinstance(raw_speech, np.ndarray)):
raw_speech = np.asarray(raw_speech, dtype=np.float32)
elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64):
raw_speech = raw_speech.astype(np.float32)
if not is_batched:
raw_speech = [raw_speech]
features = [self._extract_fbank_features(waveform, max_length=self.max_length) for waveform in raw_speech]
padded_inputs = BatchFeature({'input_values': features})
input_values = padded_inputs.get('input_values')
if isinstance(input_values[0], list):
padded_inputs['input_values'] = [np.asarray(feature, dtype=np.float32) for feature in input_values]
if self.do_normalize:
padded_inputs['input_values'] = [self.normalize(feature) for feature in input_values]
if return_tensors is not None:
padded_inputs = padded_inputs.convert_to_tensors(return_tensors)
return padded_inputs
|
class ASTFeatureExtractor(SequenceFeatureExtractor):
'''
Constructs a Audio Spectrogram Transformer (AST) feature extractor.
This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains
most of the main methods. Users should refer to this superclass for more information regarding those methods.
This class extracts mel-filter bank features from raw speech using TorchAudio if installed or using numpy
otherwise, pads/truncates them to a fixed length and normalizes them using a mean and standard deviation.
Args:
feature_size (`int`, *optional*, defaults to 1):
The feature dimension of the extracted features.
sampling_rate (`int`, *optional*, defaults to 16000):
The sampling rate at which the audio files should be digitalized expressed in hertz (Hz).
num_mel_bins (`int`, *optional*, defaults to 128):
Number of Mel-frequency bins.
max_length (`int`, *optional*, defaults to 1024):
Maximum length to which to pad/truncate the extracted features.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether or not to normalize the log-Mel features using `mean` and `std`.
mean (`float`, *optional*, defaults to -4.2677393):
The mean value used to normalize the log-Mel features. Uses the AudioSet mean by default.
std (`float`, *optional*, defaults to 4.5689974):
The standard deviation value used to normalize the log-Mel features. Uses the AudioSet standard deviation
by default.
return_attention_mask (`bool`, *optional*, defaults to `False`):
Whether or not [`~ASTFeatureExtractor.__call__`] should return `attention_mask`.
'''
def __init__(self, feature_size=1, sampling_rate=16000, num_mel_bins=128, max_length=1024, padding_value=0.0, do_normalize=True, mean=-4.2677393, std=4.5689974, return_attention_mask=False, **kwargs):
pass
def _extract_fbank_features(self, waveform: np.ndarray, max_length: int) -> np.ndarray:
'''
Get mel-filter bank features using TorchAudio. Note that TorchAudio requires 16-bit signed integers as inputs
and hence the waveform should not be normalized before feature extraction.
'''
pass
def normalize(self, input_values: np.ndarray) -> np.ndarray:
pass
def __call__(self, raw_speech: Union[np.ndarray, list[float], list[np.ndarray], list[list[float]]], sampling_rate: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs) -> BatchFeature:
'''
Main method to featurize and prepare for the model one or several sequence(s).
Args:
raw_speech (`np.ndarray`, `list[float]`, `list[np.ndarray]`, `list[list[float]]`):
The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float
values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not
stereo, i.e. single float per timestep.
sampling_rate (`int`, *optional*):
The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass
`sampling_rate` at the forward call to prevent silent errors.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
'''
pass
| 5
| 3
| 41
| 5
| 29
| 7
| 5
| 0.44
| 1
| 8
| 1
| 0
| 4
| 9
| 4
| 21
| 198
| 27
| 119
| 47
| 92
| 52
| 56
| 24
| 51
| 11
| 3
| 2
| 18
|
604
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py
|
transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer.ASTAttention
|
from typing import Callable, Optional, Union
import torch
from torch import nn
from .configuration_audio_spectrogram_transformer import ASTConfig
from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
class ASTAttention(nn.Module):
def __init__(self, config: ASTConfig):
super().__init__()
self.attention = ASTSelfAttention(config)
self.output = ASTSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads: set[int]):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads)
self.attention.query = prune_linear_layer(self.attention.query, index)
self.attention.key = prune_linear_layer(self.attention.key, index)
self.attention.value = prune_linear_layer(self.attention.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None) -> torch.Tensor:
self_attn_output, _ = self.attention(hidden_states, head_mask)
output = self.output(self_attn_output, hidden_states)
return output
|
class ASTAttention(nn.Module):
def __init__(self, config: ASTConfig):
pass
def prune_heads(self, heads: set[int]):
pass
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None) -> torch.Tensor:
pass
| 4
| 0
| 11
| 1
| 9
| 1
| 1
| 0.1
| 1
| 8
| 3
| 1
| 3
| 3
| 3
| 13
| 37
| 6
| 29
| 16
| 20
| 3
| 22
| 11
| 18
| 2
| 1
| 1
| 4
|
605
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py
|
transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer.ASTEmbeddings
|
from .configuration_audio_spectrogram_transformer import ASTConfig
import torch
from torch import nn
class ASTEmbeddings(nn.Module):
"""
Construct the CLS token, position and patch embeddings.
"""
def __init__(self, config: ASTConfig) -> None:
super().__init__()
self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
self.distillation_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
self.patch_embeddings = ASTPatchEmbeddings(config)
frequency_out_dimension, time_out_dimension = self.get_shape(config)
num_patches = frequency_out_dimension * time_out_dimension
self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 2, config.hidden_size))
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.config = config
def get_shape(self, config):
frequency_out_dimension = (config.num_mel_bins - config.patch_size) // config.frequency_stride + 1
time_out_dimension = (config.max_length - config.patch_size) // config.time_stride + 1
return (frequency_out_dimension, time_out_dimension)
def forward(self, input_values: torch.Tensor) -> torch.Tensor:
batch_size = input_values.shape[0]
embeddings = self.patch_embeddings(input_values)
cls_tokens = self.cls_token.expand(batch_size, -1, -1)
distillation_tokens = self.distillation_token.expand(batch_size, -1, -1)
embeddings = torch.cat((cls_tokens, distillation_tokens, embeddings), dim=1)
embeddings = embeddings + self.position_embeddings
embeddings = self.dropout(embeddings)
return embeddings
|
class ASTEmbeddings(nn.Module):
'''
Construct the CLS token, position and patch embeddings.
'''
def __init__(self, config: ASTConfig) -> None:
pass
def get_shape(self, config):
pass
def forward(self, input_values: torch.Tensor) -> torch.Tensor:
pass
| 4
| 1
| 10
| 2
| 8
| 1
| 1
| 0.21
| 1
| 4
| 2
| 0
| 3
| 6
| 3
| 13
| 37
| 8
| 24
| 18
| 20
| 5
| 24
| 18
| 20
| 1
| 1
| 0
| 3
|
606
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py
|
transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer.ASTEncoder
|
from .configuration_audio_spectrogram_transformer import ASTConfig
import torch
from typing import Callable, Optional, Union
from torch import nn
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, SequenceClassifierOutput
class ASTEncoder(nn.Module):
def __init__(self, config: ASTConfig):
super().__init__()
self.config = config
self.layer = nn.ModuleList([ASTLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None) -> BaseModelOutput:
for i, layer_module in enumerate(self.layer):
layer_head_mask = head_mask[i] if head_mask is not None else None
hidden_states = layer_module(hidden_states, layer_head_mask)
return BaseModelOutput(last_hidden_state=hidden_states)
|
class ASTEncoder(nn.Module):
def __init__(self, config: ASTConfig):
pass
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None) -> BaseModelOutput:
pass
| 3
| 0
| 24
| 4
| 20
| 0
| 6
| 0
| 1
| 9
| 3
| 0
| 2
| 3
| 2
| 12
| 49
| 8
| 41
| 18
| 31
| 0
| 24
| 11
| 21
| 10
| 1
| 2
| 11
|
607
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py
|
transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer.ASTForAudioClassification
|
from ...processing_utils import Unpack
from .configuration_audio_spectrogram_transformer import ASTConfig
import torch
from ...utils.generic import can_return_tuple, check_model_inputs
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, SequenceClassifierOutput
from ...utils import TransformersKwargs, auto_docstring, logging
from typing import Callable, Optional, Union
@auto_docstring(custom_intro='\n Audio Spectrogram Transformer model with an audio classification head on top (a linear layer on top of the pooled\n output) e.g. for datasets like AudioSet, Speech Commands v2.\n ')
class ASTForAudioClassification(ASTPreTrainedModel):
def __init__(self, config: ASTConfig) -> None:
super().__init__(config)
self.num_labels = config.num_labels
self.audio_spectrogram_transformer = ASTModel(config)
self.classifier = ASTMLPHead(config)
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, input_values: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> SequenceClassifierOutput:
"""
input_values (`torch.FloatTensor` of shape `(batch_size, max_length, num_mel_bins)`):
Float values mel features extracted from the raw audio waveform. Raw audio waveform can be obtained by
loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via
the torchcodec library (`pip install torchcodec`) or the soundfile library (`pip install soundfile`).
To prepare the array into `input_features`, the [`AutoFeatureExtractor`] should be used for extracting the
mel features, padding and conversion into a tensor of type `torch.FloatTensor`.
See [`~ASTFeatureExtractor.__call__`]
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the audio classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
outputs: BaseModelOutputWithPooling = self.audio_spectrogram_transformer(input_values, head_mask=head_mask, **kwargs)
pooled_output = outputs.pooler_output
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config, **kwargs)
return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n Audio Spectrogram Transformer model with an audio classification head on top (a linear layer on top of the pooled\n output) e.g. for datasets like AudioSet, Speech Commands v2.\n ')
class ASTForAudioClassification(ASTPreTrainedModel):
def __init__(self, config: ASTConfig) -> None:
pass
@can_return_tuple
@auto_docstring
def forward(self, input_values: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> SequenceClassifierOutput:
'''
input_values (`torch.FloatTensor` of shape `(batch_size, max_length, num_mel_bins)`):
Float values mel features extracted from the raw audio waveform. Raw audio waveform can be obtained by
loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via
the torchcodec library (`pip install torchcodec`) or the soundfile library (`pip install soundfile`).
To prepare the array into `input_features`, the [`AutoFeatureExtractor`] should be used for extracting the
mel features, padding and conversion into a tensor of type `torch.FloatTensor`.
See [`~ASTFeatureExtractor.__call__`]
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the audio classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
'''
pass
| 6
| 1
| 36
| 5
| 28
| 4
| 7
| 0.12
| 1
| 9
| 4
| 0
| 2
| 3
| 2
| 3
| 83
| 10
| 65
| 21
| 45
| 8
| 32
| 12
| 29
| 12
| 2
| 3
| 13
|
608
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py
|
transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer.ASTIntermediate
|
import torch
from torch import nn
from ...activations import ACT2FN
from .configuration_audio_spectrogram_transformer import ASTConfig
class ASTIntermediate(nn.Module):
def __init__(self, config: ASTConfig):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
|
class ASTIntermediate(nn.Module):
def __init__(self, config: ASTConfig):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 1
| 6
| 0
| 2
| 0
| 1
| 4
| 1
| 0
| 2
| 2
| 2
| 12
| 14
| 2
| 12
| 5
| 9
| 0
| 11
| 5
| 8
| 2
| 1
| 1
| 3
|
609
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py
|
transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer.ASTLayer
|
from typing import Callable, Optional, Union
import torch
from torch import nn
from .configuration_audio_spectrogram_transformer import ASTConfig
from ...modeling_layers import GradientCheckpointingLayer
class ASTLayer(GradientCheckpointingLayer):
"""This corresponds to the Block class in the timm implementation."""
def __init__(self, config: ASTConfig):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = ASTAttention(config)
self.intermediate = ASTIntermediate(config)
self.output = ASTOutput(config)
self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None) -> torch.Tensor:
hidden_states_norm = self.layernorm_before(hidden_states)
attention_output = self.attention(hidden_states_norm, head_mask)
hidden_states = attention_output + hidden_states
layer_output = self.layernorm_after(hidden_states)
layer_output = self.intermediate(layer_output)
layer_output = self.output(layer_output, hidden_states)
return layer_output
|
class ASTLayer(GradientCheckpointingLayer):
'''This corresponds to the Block class in the timm implementation.'''
def __init__(self, config: ASTConfig):
pass
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None) -> torch.Tensor:
pass
| 3
| 1
| 18
| 3
| 14
| 3
| 1
| 0.21
| 1
| 6
| 3
| 0
| 2
| 7
| 2
| 12
| 40
| 7
| 29
| 19
| 21
| 6
| 20
| 14
| 17
| 1
| 1
| 0
| 2
|
610
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py
|
transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer.ASTMLPHead
|
from torch import nn
from .configuration_audio_spectrogram_transformer import ASTConfig
class ASTMLPHead(nn.Module):
def __init__(self, config: ASTConfig):
super().__init__()
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dense = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
def forward(self, hidden_state):
hidden_state = self.layernorm(hidden_state)
hidden_state = self.dense(hidden_state)
return hidden_state
|
class ASTMLPHead(nn.Module):
def __init__(self, config: ASTConfig):
pass
def forward(self, hidden_state):
pass
| 3
| 0
| 4
| 0
| 4
| 0
| 2
| 0
| 1
| 2
| 1
| 0
| 2
| 2
| 2
| 12
| 10
| 1
| 9
| 5
| 6
| 0
| 9
| 5
| 6
| 2
| 1
| 0
| 3
|
611
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py
|
transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer.ASTModel
|
import torch
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, SequenceClassifierOutput
from .configuration_audio_spectrogram_transformer import ASTConfig
from torch import nn
from ...utils import TransformersKwargs, auto_docstring, logging
from ...processing_utils import Unpack
from ...utils.generic import can_return_tuple, check_model_inputs
from typing import Callable, Optional, Union
@auto_docstring
class ASTModel(ASTPreTrainedModel):
def __init__(self, config: ASTConfig) -> None:
super().__init__(config)
self.config = config
self.embeddings = ASTEmbeddings(config)
self.encoder = ASTEncoder(config)
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.post_init()
def get_input_embeddings(self) -> ASTPatchEmbeddings:
return self.embeddings.patch_embeddings
def _prune_heads(self, heads_to_prune: dict[int, list[int]]) -> None:
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@check_model_inputs
@auto_docstring
def forward(self, input_values: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> BaseModelOutputWithPooling:
"""
input_values (`torch.FloatTensor` of shape `(batch_size, max_length, num_mel_bins)`):
Float values mel features extracted from the raw audio waveform. Raw audio waveform can be obtained by
loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a `numpy.ndarray` or a
`torch.Tensor`, *e.g.* via the torchcodec library (`pip install torchcodec`) or the soundfile library
(`pip install soundfile`).
To prepare the array into `input_features`, the [`AutoFeatureExtractor`] should be used for extracting the
mel features, padding and conversion into a tensor of type `torch.FloatTensor`.
See [`~ASTFeatureExtractor.__call__`]
"""
if input_values is None:
raise ValueError('You have to specify input_values')
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(input_values)
encoder_outputs: BaseModelOutput = self.encoder(embedding_output, head_mask=head_mask)
sequence_output = encoder_outputs.last_hidden_state
sequence_output = self.layernorm(sequence_output)
pooled_output = (sequence_output[:, 0] + sequence_output[:, 1]) / 2
return BaseModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output)
|
@auto_docstring
class ASTModel(ASTPreTrainedModel):
def __init__(self, config: ASTConfig) -> None:
pass
def get_input_embeddings(self) -> ASTPatchEmbeddings:
pass
def _prune_heads(self, heads_to_prune: dict[int, list[int]]) -> None:
'''
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
'''
pass
@check_model_inputs
@auto_docstring
def forward(self, input_values: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> BaseModelOutputWithPooling:
'''
input_values (`torch.FloatTensor` of shape `(batch_size, max_length, num_mel_bins)`):
Float values mel features extracted from the raw audio waveform. Raw audio waveform can be obtained by
loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a `numpy.ndarray` or a
`torch.Tensor`, *e.g.* via the torchcodec library (`pip install torchcodec`) or the soundfile library
(`pip install soundfile`).
To prepare the array into `input_features`, the [`AutoFeatureExtractor`] should be used for extracting the
mel features, padding and conversion into a tensor of type `torch.FloatTensor`.
See [`~ASTFeatureExtractor.__call__`]
'''
pass
| 8
| 2
| 17
| 3
| 12
| 3
| 3
| 0.18
| 1
| 10
| 5
| 0
| 4
| 4
| 4
| 5
| 79
| 13
| 56
| 22
| 36
| 10
| 28
| 14
| 23
| 6
| 2
| 1
| 10
|
612
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py
|
transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer.ASTOutput
|
from .configuration_audio_spectrogram_transformer import ASTConfig
from torch import nn
import torch
class ASTOutput(nn.Module):
def __init__(self, config: ASTConfig):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = hidden_states + input_tensor
return hidden_states
|
class ASTOutput(nn.Module):
def __init__(self, config: ASTConfig):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 1
| 5
| 0
| 1
| 0
| 1
| 3
| 1
| 0
| 2
| 2
| 2
| 12
| 13
| 3
| 10
| 5
| 7
| 0
| 10
| 5
| 7
| 1
| 1
| 0
| 2
|
613
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py
|
transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer.ASTPatchEmbeddings
|
from .configuration_audio_spectrogram_transformer import ASTConfig
import torch
from torch import nn
class ASTPatchEmbeddings(nn.Module):
"""
This class turns `input_values` into the initial `hidden_states` (patch embeddings) of shape `(batch_size,
seq_length, hidden_size)` to be consumed by a Transformer.
"""
def __init__(self, config: ASTConfig):
super().__init__()
patch_size = config.patch_size
frequency_stride = config.frequency_stride
time_stride = config.time_stride
self.projection = nn.Conv2d(1, config.hidden_size, kernel_size=(patch_size, patch_size), stride=(frequency_stride, time_stride))
def forward(self, input_values: torch.Tensor) -> torch.Tensor:
input_values = input_values.unsqueeze(1)
input_values = input_values.transpose(2, 3)
embeddings = self.projection(input_values).flatten(2).transpose(1, 2)
return embeddings
|
class ASTPatchEmbeddings(nn.Module):
'''
This class turns `input_values` into the initial `hidden_states` (patch embeddings) of shape `(batch_size,
seq_length, hidden_size)` to be consumed by a Transformer.
'''
def __init__(self, config: ASTConfig):
pass
def forward(self, input_values: torch.Tensor) -> torch.Tensor:
pass
| 3
| 1
| 8
| 1
| 7
| 0
| 1
| 0.29
| 1
| 2
| 0
| 0
| 2
| 1
| 2
| 12
| 22
| 4
| 14
| 8
| 11
| 4
| 12
| 8
| 9
| 1
| 1
| 0
| 2
|
614
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py
|
transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer.ASTPreTrainedModel
|
from typing import Callable, Optional, Union
from .configuration_audio_spectrogram_transformer import ASTConfig
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...utils import TransformersKwargs, auto_docstring, logging
import torch
from torch import nn
@auto_docstring
class ASTPreTrainedModel(PreTrainedModel):
config: ASTConfig
base_model_prefix = 'audio_spectrogram_transformer'
main_input_name = 'input_values'
supports_gradient_checkpointing = True
_supports_sdpa = True
_supports_flash_attn = True
_supports_flex_attn = True
_supports_attention_backend = True
_can_record_outputs = {'hidden_states': ASTLayer, 'attentions': ASTSelfAttention}
def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
module.weight.data = nn.init.trunc_normal_(module.weight.data.to(torch.float32), mean=0.0, std=self.config.initializer_range).to(module.weight.dtype)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, ASTEmbeddings):
module.cls_token.data.zero_()
module.position_embeddings.data.zero_()
module.distillation_token.data.zero_()
|
@auto_docstring
class ASTPreTrainedModel(PreTrainedModel):
def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
'''Initialize the weights'''
pass
| 3
| 1
| 13
| 0
| 10
| 3
| 4
| 0.5
| 1
| 0
| 0
| 2
| 1
| 0
| 1
| 1
| 26
| 2
| 16
| 7
| 14
| 8
| 13
| 7
| 11
| 4
| 1
| 2
| 4
|
615
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py
|
transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer.ASTSelfAttention
|
from .configuration_audio_spectrogram_transformer import ASTConfig
from torch import nn
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
import torch
from typing import Callable, Optional, Union
class ASTSelfAttention(nn.Module):
def __init__(self, config: ASTConfig):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')):
raise ValueError(f'The hidden size {config.hidden_size} is not a multiple of the number of attention heads {config.num_attention_heads}.')
self.config = config
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.dropout_prob = config.attention_probs_dropout_prob
self.scaling = self.attention_head_size ** (-0.5)
self.is_causal = False
self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None) -> tuple[torch.Tensor, torch.Tensor]:
batch_size = hidden_states.shape[0]
new_shape = (batch_size, -1, self.num_attention_heads, self.attention_head_size)
key_layer = self.key(hidden_states).view(*new_shape).transpose(1, 2)
value_layer = self.value(hidden_states).view(*new_shape).transpose(1, 2)
query_layer = self.query(hidden_states).view(*new_shape).transpose(1, 2)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
context_layer, attention_probs = attention_interface(self, query_layer, key_layer, value_layer, head_mask, is_causal=self.is_causal, scaling=self.scaling, dropout=0.0 if not self.training else self.dropout_prob)
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.reshape(new_context_layer_shape)
return (context_layer, attention_probs)
|
class ASTSelfAttention(nn.Module):
def __init__(self, config: ASTConfig):
pass
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None) -> tuple[torch.Tensor, torch.Tensor]:
pass
| 3
| 0
| 18
| 4
| 12
| 2
| 2
| 0.13
| 1
| 6
| 1
| 1
| 3
| 7
| 3
| 13
| 58
| 15
| 38
| 23
| 32
| 5
| 33
| 21
| 29
| 3
| 1
| 1
| 6
|
616
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py
|
transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer.ASTSelfOutput
|
from torch import nn
from .configuration_audio_spectrogram_transformer import ASTConfig
import torch
class ASTSelfOutput(nn.Module):
"""
The residual connection is defined in ASTLayer instead of here (as is the case with other models), due to the
layernorm applied before each block.
"""
def __init__(self, config: ASTConfig):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
|
class ASTSelfOutput(nn.Module):
'''
The residual connection is defined in ASTLayer instead of here (as is the case with other models), due to the
layernorm applied before each block.
'''
def __init__(self, config: ASTConfig):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 1
| 5
| 1
| 4
| 0
| 1
| 0.44
| 1
| 3
| 1
| 0
| 2
| 2
| 2
| 12
| 16
| 3
| 9
| 5
| 6
| 4
| 9
| 5
| 6
| 1
| 1
| 0
| 2
|
617
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/auto_factory.py
|
transformers.models.auto.auto_factory._BaseAutoBackboneClass
|
from ...utils import CONFIG_NAME, cached_file, copy_func, extract_commit_hash, find_adapter_config_file, is_peft_available, is_torch_available, logging, requires_backends
class _BaseAutoBackboneClass(_BaseAutoModelClass):
_model_mapping = None
@classmethod
def _load_timm_backbone_from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
requires_backends(cls, ['vision', 'timm'])
from ...models.timm_backbone import TimmBackboneConfig
config = kwargs.pop('config', TimmBackboneConfig())
if kwargs.get('out_features') is not None:
raise ValueError('Cannot specify `out_features` for timm backbones')
if kwargs.get('output_loading_info', False):
raise ValueError('Cannot specify `output_loading_info=True` when loading from timm')
num_channels = kwargs.pop('num_channels', config.num_channels)
features_only = kwargs.pop('features_only', config.features_only)
use_pretrained_backbone = kwargs.pop('use_pretrained_backbone', config.use_pretrained_backbone)
out_indices = kwargs.pop('out_indices', config.out_indices)
config = TimmBackboneConfig(backbone=pretrained_model_name_or_path, num_channels=num_channels, features_only=features_only, use_pretrained_backbone=use_pretrained_backbone, out_indices=out_indices)
return super().from_config(config, **kwargs)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
use_timm_backbone = kwargs.pop('use_timm_backbone', False)
if use_timm_backbone:
return cls._load_timm_backbone_from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
|
class _BaseAutoBackboneClass(_BaseAutoModelClass):
@classmethod
def _load_timm_backbone_from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
pass
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
pass
| 5
| 0
| 15
| 3
| 13
| 0
| 3
| 0.03
| 1
| 3
| 1
| 1
| 0
| 0
| 2
| 6
| 37
| 7
| 29
| 13
| 23
| 1
| 21
| 11
| 17
| 3
| 1
| 1
| 5
|
618
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/auto_factory.py
|
transformers.models.auto.auto_factory._BaseAutoModelClass
|
import os
from typing import Any, TypeVar, Union
from ...utils import CONFIG_NAME, cached_file, copy_func, extract_commit_hash, find_adapter_config_file, is_peft_available, is_torch_available, logging, requires_backends
from .configuration_auto import AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings
import json
from ...configuration_utils import PretrainedConfig
import warnings
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
import copy
class _BaseAutoModelClass:
_model_mapping = None
def __init__(self, *args, **kwargs) -> None:
raise OSError(f'{self.__class__.__name__} is designed to be instantiated using the `{self.__class__.__name__}.from_pretrained(pretrained_model_name_or_path)` or `{self.__class__.__name__}.from_config(config)` methods.')
@classmethod
def from_config(cls, config, **kwargs):
trust_remote_code = kwargs.pop('trust_remote_code', None)
has_remote_code = hasattr(config, 'auto_map') and cls.__name__ in config.auto_map
has_local_code = type(config) in cls._model_mapping
if has_remote_code:
class_ref = config.auto_map[cls.__name__]
if '--' in class_ref:
upstream_repo = class_ref.split('--')[0]
else:
upstream_repo = None
trust_remote_code = resolve_trust_remote_code(trust_remote_code, config._name_or_path, has_local_code, has_remote_code, upstream_repo=upstream_repo)
if has_remote_code and trust_remote_code:
if '--' in class_ref:
repo_id, class_ref = class_ref.split('--')
else:
repo_id = config.name_or_path
model_class = get_class_from_dynamic_module(class_ref, repo_id, **kwargs)
if not has_local_code:
cls.register(config.__class__, model_class, exist_ok=True)
model_class.register_for_auto_class(auto_class=cls)
_ = kwargs.pop('code_revision', None)
model_class = add_generation_mixin_to_remote_model(model_class)
return model_class._from_config(config, **kwargs)
elif type(config) in cls._model_mapping:
model_class = _get_model_class(config, cls._model_mapping)
return model_class._from_config(config, **kwargs)
raise ValueError(f"Unrecognized configuration class {config.__class__} for this kind of AutoModel: {cls.__name__}.\nModel type should be one of {', '.join((c.__name__ for c in cls._model_mapping))}.")
@classmethod
def _prepare_config_for_auto_class(cls, config: PretrainedConfig) -> PretrainedConfig:
"""Additional autoclass-specific config post-loading manipulation. May be overridden in subclasses."""
return config
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike[str]], *model_args, **kwargs):
config = kwargs.pop('config', None)
trust_remote_code = kwargs.get('trust_remote_code')
kwargs['_from_auto'] = True
hub_kwargs_names = ['cache_dir', 'force_download', 'local_files_only', 'proxies', 'resume_download', 'revision', 'subfolder', 'use_auth_token', 'token']
hub_kwargs = {name: kwargs.pop(name) for name in hub_kwargs_names if name in kwargs}
code_revision = kwargs.pop('code_revision', None)
commit_hash = kwargs.pop('_commit_hash', None)
adapter_kwargs = kwargs.pop('adapter_kwargs', None)
token = hub_kwargs.pop('token', None)
use_auth_token = hub_kwargs.pop('use_auth_token', None)
if use_auth_token is not None:
warnings.warn('The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.', FutureWarning)
if token is not None:
raise ValueError('`token` and `use_auth_token` are both specified. Please set only the argument `token`.')
token = use_auth_token
if token is not None:
hub_kwargs['token'] = token
if commit_hash is None:
if not isinstance(config, PretrainedConfig):
resolved_config_file = cached_file(pretrained_model_name_or_path, CONFIG_NAME, _raise_exceptions_for_gated_repo=False, _raise_exceptions_for_missing_entries=False, _raise_exceptions_for_connection_errors=False, **hub_kwargs)
commit_hash = extract_commit_hash(resolved_config_file, commit_hash)
else:
commit_hash = getattr(config, '_commit_hash', None)
if is_peft_available():
if adapter_kwargs is None:
adapter_kwargs = {}
if token is not None:
adapter_kwargs['token'] = token
maybe_adapter_path = find_adapter_config_file(pretrained_model_name_or_path, _commit_hash=commit_hash, **adapter_kwargs)
if maybe_adapter_path is not None:
with open(maybe_adapter_path, 'r', encoding='utf-8') as f:
adapter_config = json.load(f)
adapter_kwargs['_adapter_model_path'] = pretrained_model_name_or_path
pretrained_model_name_or_path = adapter_config['base_model_name_or_path']
if not isinstance(config, PretrainedConfig):
kwargs_orig = copy.deepcopy(kwargs)
if kwargs.get('torch_dtype') == 'auto':
_ = kwargs.pop('torch_dtype')
if kwargs.get('dtype') == 'auto':
_ = kwargs.pop('dtype')
if kwargs.get('quantization_config') is not None:
_ = kwargs.pop('quantization_config')
config, kwargs = AutoConfig.from_pretrained(pretrained_model_name_or_path, return_unused_kwargs=True, code_revision=code_revision, _commit_hash=commit_hash, **hub_kwargs, **kwargs)
if kwargs_orig.get('torch_dtype', None) == 'auto':
kwargs['torch_dtype'] = 'auto'
if kwargs_orig.get('dtype', None) == 'auto':
kwargs['dtype'] = 'auto'
if kwargs_orig.get('quantization_config', None) is not None:
kwargs['quantization_config'] = kwargs_orig['quantization_config']
has_remote_code = hasattr(config, 'auto_map') and cls.__name__ in config.auto_map
has_local_code = type(config) in cls._model_mapping
upstream_repo = None
if has_remote_code:
class_ref = config.auto_map[cls.__name__]
if '--' in class_ref:
upstream_repo = class_ref.split('--')[0]
trust_remote_code = resolve_trust_remote_code(trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code, upstream_repo=upstream_repo)
kwargs['trust_remote_code'] = trust_remote_code
kwargs['adapter_kwargs'] = adapter_kwargs
if has_remote_code and trust_remote_code:
model_class = get_class_from_dynamic_module(class_ref, pretrained_model_name_or_path, code_revision=code_revision, **hub_kwargs, **kwargs)
_ = hub_kwargs.pop('code_revision', None)
if not has_local_code:
cls.register(config.__class__, model_class, exist_ok=True)
model_class.register_for_auto_class(auto_class=cls)
model_class = add_generation_mixin_to_remote_model(model_class)
return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs)
elif type(config) in cls._model_mapping:
model_class = _get_model_class(config, cls._model_mapping)
if model_class.config_class == config.sub_configs.get('text_config', None):
config = config.get_text_config()
return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs)
raise ValueError(f"Unrecognized configuration class {config.__class__} for this kind of AutoModel: {cls.__name__}.\nModel type should be one of {', '.join((c.__name__ for c in cls._model_mapping))}.")
@classmethod
def register(cls, config_class, model_class, exist_ok=False) -> None:
"""
Register a new model for this class.
Args:
config_class ([`PretrainedConfig`]):
The configuration corresponding to the model to register.
model_class ([`PreTrainedModel`]):
The model to register.
"""
if hasattr(model_class, 'config_class') and model_class.config_class.__name__ != config_class.__name__:
raise ValueError(f'The model class you are passing has a `config_class` attribute that is not consistent with the config class you passed (model has {model_class.config_class} and you passed {config_class}. Fix one of those so they match!')
cls._model_mapping.register(config_class, model_class, exist_ok=exist_ok)
|
class _BaseAutoModelClass:
def __init__(self, *args, **kwargs) -> None:
pass
@classmethod
def from_config(cls, config, **kwargs):
pass
@classmethod
def _prepare_config_for_auto_class(cls, config: PretrainedConfig) -> PretrainedConfig:
'''Additional autoclass-specific config post-loading manipulation. May be overridden in subclasses.'''
pass
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike[str]], *model_args, **kwargs):
pass
@classmethod
def register(cls, config_class, model_class, exist_ok=False) -> None:
'''
Register a new model for this class.
Args:
config_class ([`PretrainedConfig`]):
The configuration corresponding to the model to register.
model_class ([`PreTrainedModel`]):
The model to register.
'''
pass
| 10
| 2
| 43
| 4
| 36
| 4
| 6
| 0.1
| 0
| 4
| 1
| 74
| 1
| 0
| 4
| 4
| 183
| 20
| 148
| 35
| 140
| 15
| 85
| 31
| 80
| 17
| 0
| 3
| 24
|
619
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/auto_factory.py
|
transformers.models.auto.auto_factory._LazyAutoMapping
|
from .configuration_auto import AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings
from collections.abc import Iterator
import importlib
from typing import Any, TypeVar, Union
from collections import OrderedDict
from ...configuration_utils import PretrainedConfig
class _LazyAutoMapping(OrderedDict[type[PretrainedConfig], _LazyAutoMappingValue]):
"""
" A mapping config to object (model or tokenizer for instance) that will load keys and values when it is accessed.
Args:
- config_mapping: The map model type to config class
- model_mapping: The map model type to model (or tokenizer) class
"""
def __init__(self, config_mapping, model_mapping) -> None:
self._config_mapping = config_mapping
self._reverse_config_mapping = {v: k for k, v in config_mapping.items()}
self._model_mapping = model_mapping
self._model_mapping._model_mapping = self
self._extra_content = {}
self._modules = {}
def __len__(self) -> int:
common_keys = set(self._config_mapping.keys()).intersection(self._model_mapping.keys())
return len(common_keys) + len(self._extra_content)
def __getitem__(self, key: type[PretrainedConfig]) -> _LazyAutoMappingValue:
if key in self._extra_content:
return self._extra_content[key]
model_type = self._reverse_config_mapping[key.__name__]
if model_type in self._model_mapping:
model_name = self._model_mapping[model_type]
return self._load_attr_from_module(model_type, model_name)
model_types = [k for k, v in self._config_mapping.items() if v == key.__name__]
for mtype in model_types:
if mtype in self._model_mapping:
model_name = self._model_mapping[mtype]
return self._load_attr_from_module(mtype, model_name)
raise KeyError(key)
def _load_attr_from_module(self, model_type, attr):
module_name = model_type_to_module_name(model_type)
if module_name not in self._modules:
self._modules[module_name] = importlib.import_module(f'.{module_name}', 'transformers.models')
return getattribute_from_module(self._modules[module_name], attr)
def keys(self) -> list[type[PretrainedConfig]]:
mapping_keys = [self._load_attr_from_module(key, name) for key, name in self._config_mapping.items() if key in self._model_mapping]
return mapping_keys + list(self._extra_content.keys())
def get(self, key: type[PretrainedConfig], default: _T) -> Union[_LazyAutoMappingValue, _T]:
try:
return self.__getitem__(key)
except KeyError:
return default
def __bool__(self) -> bool:
return bool(self.keys())
def values(self) -> list[_LazyAutoMappingValue]:
mapping_values = [self._load_attr_from_module(key, name) for key, name in self._model_mapping.items() if key in self._config_mapping]
return mapping_values + list(self._extra_content.values())
def items(self) -> list[tuple[type[PretrainedConfig], _LazyAutoMappingValue]]:
mapping_items = [(self._load_attr_from_module(key, self._config_mapping[key]), self._load_attr_from_module(key, self._model_mapping[key])) for key in self._model_mapping if key in self._config_mapping]
return mapping_items + list(self._extra_content.items())
def __iter__(self) -> Iterator[type[PretrainedConfig]]:
return iter(self.keys())
def __contains__(self, item: type) -> bool:
if item in self._extra_content:
return True
if not hasattr(item, '__name__') or item.__name__ not in self._reverse_config_mapping:
return False
model_type = self._reverse_config_mapping[item.__name__]
return model_type in self._model_mapping
def register(self, key: type[PretrainedConfig], value: _LazyAutoMappingValue, exist_ok=False) -> None:
"""
Register a new model in this mapping.
"""
if hasattr(key, '__name__') and key.__name__ in self._reverse_config_mapping:
model_type = self._reverse_config_mapping[key.__name__]
if model_type in self._model_mapping and (not exist_ok):
raise ValueError(f"'{key}' is already used by a Transformers model.")
self._extra_content[key] = value
|
class _LazyAutoMapping(OrderedDict[type[PretrainedConfig], _LazyAutoMappingValue]):
'''
" A mapping config to object (model or tokenizer for instance) that will load keys and values when it is accessed.
Args:
- config_mapping: The map model type to config class
- model_mapping: The map model type to model (or tokenizer) class
'''
def __init__(self, config_mapping, model_mapping) -> None:
pass
def __len__(self) -> int:
pass
def __getitem__(self, key: type[PretrainedConfig]) -> _LazyAutoMappingValue:
pass
def _load_attr_from_module(self, model_type, attr):
pass
def keys(self) -> list[type[PretrainedConfig]]:
pass
def get(self, key: type[PretrainedConfig], default: _T) -> Union[_LazyAutoMappingValue, _T]:
pass
def __bool__(self) -> bool:
pass
def values(self) -> list[_LazyAutoMappingValue]:
pass
def items(self) -> list[tuple[type[PretrainedConfig], _LazyAutoMappingValue]]:
pass
def __iter__(self) -> Iterator[type[PretrainedConfig]]:
pass
def __contains__(self, item: type) -> bool:
pass
def register(self, key: type[PretrainedConfig], value: _LazyAutoMappingValue, exist_ok=False) -> None:
'''
Register a new model in this mapping.
'''
pass
| 13
| 2
| 7
| 0
| 6
| 0
| 2
| 0.13
| 1
| 5
| 0
| 0
| 12
| 5
| 12
| 62
| 100
| 15
| 75
| 29
| 62
| 10
| 60
| 29
| 47
| 5
| 3
| 2
| 22
|
620
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/configuration_auto.py
|
transformers.models.auto.configuration_auto.AutoConfig
|
from ...configuration_utils import PretrainedConfig
from ...utils import CONFIG_NAME, logging
import warnings
import os
from typing import Any, TypeVar, Union
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
class AutoConfig:
"""
This is a generic configuration class that will be instantiated as one of the configuration classes of the library
when created with the [`~AutoConfig.from_pretrained`] class method.
This class cannot be instantiated directly using `__init__()` (throws an error).
"""
def __init__(self) -> None:
raise OSError('AutoConfig is designed to be instantiated using the `AutoConfig.from_pretrained(pretrained_model_name_or_path)` method.')
@classmethod
def for_model(cls, model_type: str, *args, **kwargs) -> PretrainedConfig:
if model_type in CONFIG_MAPPING:
config_class = CONFIG_MAPPING[model_type]
return config_class(*args, **kwargs)
raise ValueError(f"Unrecognized model identifier: {model_type}. Should contain one of {', '.join(CONFIG_MAPPING.keys())}")
@classmethod
@replace_list_option_in_docstrings()
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike[str]], **kwargs):
"""
Instantiate one of the configuration classes of the library from a pretrained model configuration.
The configuration class to instantiate is selected based on the `model_type` property of the config object that
is loaded, or when it's missing, by falling back to using pattern matching on `pretrained_model_name_or_path`:
List options
Args:
pretrained_model_name_or_path (`str` or `os.PathLike`):
Can be either:
- A string, the *model id* of a pretrained model configuration hosted inside a model repo on
huggingface.co.
- A path to a *directory* containing a configuration file saved using the
[`~PretrainedConfig.save_pretrained`] method, or the [`~PreTrainedModel.save_pretrained`] method,
e.g., `./my_model_directory/`.
- A path or url to a saved configuration JSON *file*, e.g.,
`./my_model_directory/configuration.json`.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force the (re-)download the model weights and configuration files and override the
cached versions if they exist.
resume_download:
Deprecated and ignored. All downloads are now resumed by default when possible.
Will be removed in v5 of Transformers.
proxies (`dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
return_unused_kwargs (`bool`, *optional*, defaults to `False`):
If `False`, then this function returns just the final configuration object.
If `True`, then this functions returns a `Tuple(config, unused_kwargs)` where *unused_kwargs* is a
dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e., the
part of `kwargs` which has not been used to update `config` and is otherwise ignored.
trust_remote_code (`bool`, *optional*, defaults to `False`):
Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
should only be set to `True` for repositories you trust and in which you have read the code, as it will
execute code present on the Hub on your local machine.
kwargs(additional keyword arguments, *optional*):
The values in kwargs of any keys which are configuration attributes will be used to override the loaded
values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled
by the `return_unused_kwargs` keyword parameter.
Examples:
```python
>>> from transformers import AutoConfig
>>> # Download configuration from huggingface.co and cache.
>>> config = AutoConfig.from_pretrained("google-bert/bert-base-uncased")
>>> # Download configuration from huggingface.co (user-uploaded) and cache.
>>> config = AutoConfig.from_pretrained("dbmdz/bert-base-german-cased")
>>> # If configuration file is in a directory (e.g., was saved using *save_pretrained('./test/saved_model/')*).
>>> config = AutoConfig.from_pretrained("./test/bert_saved_model/")
>>> # Load a specific configuration file.
>>> config = AutoConfig.from_pretrained("./test/bert_saved_model/my_configuration.json")
>>> # Change some config attributes when loading a pretrained config.
>>> config = AutoConfig.from_pretrained("google-bert/bert-base-uncased", output_attentions=True, foo=False)
>>> config.output_attentions
True
>>> config, unused_kwargs = AutoConfig.from_pretrained(
... "google-bert/bert-base-uncased", output_attentions=True, foo=False, return_unused_kwargs=True
... )
>>> config.output_attentions
True
>>> unused_kwargs
{'foo': False}
```
"""
use_auth_token = kwargs.pop('use_auth_token', None)
if use_auth_token is not None:
warnings.warn('The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.', FutureWarning)
if kwargs.get('token') is not None:
raise ValueError('`token` and `use_auth_token` are both specified. Please set only the argument `token`.')
kwargs['token'] = use_auth_token
kwargs['_from_auto'] = True
kwargs['name_or_path'] = pretrained_model_name_or_path
trust_remote_code = kwargs.pop('trust_remote_code', None)
code_revision = kwargs.pop('code_revision', None)
config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs)
has_remote_code = 'auto_map' in config_dict and 'AutoConfig' in config_dict['auto_map']
has_local_code = 'model_type' in config_dict and config_dict['model_type'] in CONFIG_MAPPING
if has_remote_code:
class_ref = config_dict['auto_map']['AutoConfig']
if '--' in class_ref:
upstream_repo = class_ref.split('--')[0]
else:
upstream_repo = None
trust_remote_code = resolve_trust_remote_code(trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code, upstream_repo)
if has_remote_code and trust_remote_code:
config_class = get_class_from_dynamic_module(class_ref, pretrained_model_name_or_path, code_revision=code_revision, **kwargs)
config_class.register_for_auto_class()
return config_class.from_pretrained(pretrained_model_name_or_path, **kwargs)
elif 'model_type' in config_dict:
if config_dict['model_type'] == 'mistral' and 'layer_types' in config_dict:
logger.info('Detected mistral model with layer_types, treating as ministral for alternating attention compatibility. ')
config_dict['model_type'] = 'ministral'
try:
config_class = CONFIG_MAPPING[config_dict['model_type']]
except KeyError:
raise ValueError(f"The checkpoint you are trying to load has model type `{config_dict['model_type']}` but Transformers does not recognize this architecture. This could be because of an issue with the checkpoint, or because your version of Transformers is out of date.\n\nYou can update Transformers with the command `pip install --upgrade transformers`. If this does not work, and the checkpoint is very new, then there may not be a release version that supports this model yet. In this case, you can get the most up-to-date code by installing Transformers from source with the command `pip install git+https://github.com/huggingface/transformers.git`")
return config_class.from_dict(config_dict, **unused_kwargs)
else:
for pattern in sorted(CONFIG_MAPPING.keys(), key=len, reverse=True):
if pattern in str(pretrained_model_name_or_path):
return CONFIG_MAPPING[pattern].from_dict(config_dict, **unused_kwargs)
raise ValueError(f"Unrecognized model in {pretrained_model_name_or_path}. Should have a `model_type` key in its {CONFIG_NAME}, or contain one of the following strings in its name: {', '.join(CONFIG_MAPPING.keys())}")
@staticmethod
def register(model_type, config, exist_ok=False) -> None:
"""
Register a new configuration for this class.
Args:
model_type (`str`): The model type like "bert" or "gpt".
config ([`PretrainedConfig`]): The config to register.
"""
if issubclass(config, PretrainedConfig) and config.model_type != model_type:
raise ValueError(f'The config you are passing has a `model_type` attribute that is not consistent with the model type you passed (config has {config.model_type} and you passed {model_type}. Fix one of those so they match!')
CONFIG_MAPPING.register(model_type, config, exist_ok=exist_ok)
|
class AutoConfig:
'''
This is a generic configuration class that will be instantiated as one of the configuration classes of the library
when created with the [`~AutoConfig.from_pretrained`] class method.
This class cannot be instantiated directly using `__init__()` (throws an error).
'''
def __init__(self) -> None:
pass
@classmethod
def for_model(cls, model_type: str, *args, **kwargs) -> PretrainedConfig:
pass
@classmethod
@replace_list_option_in_docstrings()
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike[str]], **kwargs):
'''
Instantiate one of the configuration classes of the library from a pretrained model configuration.
The configuration class to instantiate is selected based on the `model_type` property of the config object that
is loaded, or when it's missing, by falling back to using pattern matching on `pretrained_model_name_or_path`:
List options
Args:
pretrained_model_name_or_path (`str` or `os.PathLike`):
Can be either:
- A string, the *model id* of a pretrained model configuration hosted inside a model repo on
huggingface.co.
- A path to a *directory* containing a configuration file saved using the
[`~PretrainedConfig.save_pretrained`] method, or the [`~PreTrainedModel.save_pretrained`] method,
e.g., `./my_model_directory/`.
- A path or url to a saved configuration JSON *file*, e.g.,
`./my_model_directory/configuration.json`.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force the (re-)download the model weights and configuration files and override the
cached versions if they exist.
resume_download:
Deprecated and ignored. All downloads are now resumed by default when possible.
Will be removed in v5 of Transformers.
proxies (`dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
return_unused_kwargs (`bool`, *optional*, defaults to `False`):
If `False`, then this function returns just the final configuration object.
If `True`, then this functions returns a `Tuple(config, unused_kwargs)` where *unused_kwargs* is a
dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e., the
part of `kwargs` which has not been used to update `config` and is otherwise ignored.
trust_remote_code (`bool`, *optional*, defaults to `False`):
Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
should only be set to `True` for repositories you trust and in which you have read the code, as it will
execute code present on the Hub on your local machine.
kwargs(additional keyword arguments, *optional*):
The values in kwargs of any keys which are configuration attributes will be used to override the loaded
values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled
by the `return_unused_kwargs` keyword parameter.
Examples:
```python
>>> from transformers import AutoConfig
>>> # Download configuration from huggingface.co and cache.
>>> config = AutoConfig.from_pretrained("google-bert/bert-base-uncased")
>>> # Download configuration from huggingface.co (user-uploaded) and cache.
>>> config = AutoConfig.from_pretrained("dbmdz/bert-base-german-cased")
>>> # If configuration file is in a directory (e.g., was saved using *save_pretrained('./test/saved_model/')*).
>>> config = AutoConfig.from_pretrained("./test/bert_saved_model/")
>>> # Load a specific configuration file.
>>> config = AutoConfig.from_pretrained("./test/bert_saved_model/my_configuration.json")
>>> # Change some config attributes when loading a pretrained config.
>>> config = AutoConfig.from_pretrained("google-bert/bert-base-uncased", output_attentions=True, foo=False)
>>> config.output_attentions
True
>>> config, unused_kwargs = AutoConfig.from_pretrained(
... "google-bert/bert-base-uncased", output_attentions=True, foo=False, return_unused_kwargs=True
... )
>>> config.output_attentions
True
>>> unused_kwargs
{'foo': False}
```
'''
pass
@staticmethod
def register(model_type, config, exist_ok=False) -> None:
'''
Register a new configuration for this class.
Args:
model_type (`str`): The model type like "bert" or "gpt".
config ([`PretrainedConfig`]): The config to register.
'''
pass
| 9
| 3
| 42
| 5
| 19
| 19
| 4
| 1.01
| 0
| 4
| 0
| 0
| 1
| 0
| 4
| 4
| 183
| 24
| 79
| 18
| 70
| 80
| 42
| 15
| 37
| 9
| 0
| 3
| 14
|
621
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/configuration_auto.py
|
transformers.models.auto.configuration_auto._LazyConfigMapping
|
from collections.abc import Callable, Iterator, KeysView, ValuesView
import importlib
from collections import OrderedDict
from ...configuration_utils import PretrainedConfig
class _LazyConfigMapping(OrderedDict[str, type[PretrainedConfig]]):
"""
A dictionary that lazily load its values when they are requested.
"""
def __init__(self, mapping) -> None:
self._mapping = mapping
self._extra_content = {}
self._modules = {}
def __getitem__(self, key: str) -> type[PretrainedConfig]:
if key in self._extra_content:
return self._extra_content[key]
if key not in self._mapping:
raise KeyError(key)
value = self._mapping[key]
module_name = model_type_to_module_name(key)
if module_name not in self._modules:
self._modules[module_name] = importlib.import_module(f'.{module_name}', 'transformers.models')
if hasattr(self._modules[module_name], value):
return getattr(self._modules[module_name], value)
transformers_module = importlib.import_module('transformers')
return getattr(transformers_module, value)
def keys(self) -> list[str]:
return list(self._mapping.keys()) + list(self._extra_content.keys())
def values(self) -> list[type[PretrainedConfig]]:
return [self[k] for k in self._mapping] + list(self._extra_content.values())
def items(self) -> list[tuple[str, type[PretrainedConfig]]]:
return [(k, self[k]) for k in self._mapping] + list(self._extra_content.items())
def __iter__(self) -> Iterator[str]:
return iter(list(self._mapping.keys()) + list(self._extra_content.keys()))
def __contains__(self, item: object) -> bool:
return item in self._mapping or item in self._extra_content
def register(self, key: str, value: type[PretrainedConfig], exist_ok=False) -> None:
"""
Register a new configuration in this mapping.
"""
if key in self._mapping and (not exist_ok):
raise ValueError(f"'{key}' is already used by a Transformers config, pick another name.")
self._extra_content[key] = value
|
class _LazyConfigMapping(OrderedDict[str, type[PretrainedConfig]]):
'''
A dictionary that lazily load its values when they are requested.
'''
def __init__(self, mapping) -> None:
pass
def __getitem__(self, key: str) -> type[PretrainedConfig]:
pass
def keys(self) -> list[str]:
pass
def values(self) -> list[type[PretrainedConfig]]:
pass
def items(self) -> list[tuple[str, type[PretrainedConfig]]]:
pass
def __iter__(self) -> Iterator[str]:
pass
def __contains__(self, item: object) -> bool:
pass
def register(self, key: str, value: type[PretrainedConfig], exist_ok=False) -> None:
'''
Register a new configuration in this mapping.
'''
pass
| 9
| 2
| 5
| 0
| 4
| 1
| 2
| 0.25
| 1
| 3
| 0
| 0
| 8
| 3
| 8
| 58
| 49
| 9
| 32
| 15
| 23
| 8
| 32
| 15
| 23
| 5
| 3
| 1
| 13
|
622
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/configuration_auto.py
|
transformers.models.auto.configuration_auto._LazyLoadAllMappings
|
from collections import OrderedDict
from collections.abc import Callable, Iterator, KeysView, ValuesView
import importlib
class _LazyLoadAllMappings(OrderedDict[str, str]):
"""
A mapping that will load all pairs of key values at the first access (either by indexing, requestions keys, values,
etc.)
Args:
mapping: The mapping to load.
"""
def __init__(self, mapping):
self._mapping = mapping
self._initialized = False
self._data = {}
def _initialize(self):
if self._initialized:
return
for model_type, map_name in self._mapping.items():
module_name = model_type_to_module_name(model_type)
module = importlib.import_module(f'.{module_name}', 'transformers.models')
mapping = getattr(module, map_name)
self._data.update(mapping)
self._initialized = True
def __getitem__(self, key):
self._initialize()
return self._data[key]
def keys(self) -> KeysView[str]:
self._initialize()
return self._data.keys()
def values(self) -> ValuesView[str]:
self._initialize()
return self._data.values()
def items(self) -> KeysView[str]:
self._initialize()
return self._data.keys()
def __iter__(self) -> Iterator[str]:
self._initialize()
return iter(self._data)
def __contains__(self, item: object) -> bool:
self._initialize()
return item in self._data
|
class _LazyLoadAllMappings(OrderedDict[str, str]):
'''
A mapping that will load all pairs of key values at the first access (either by indexing, requestions keys, values,
etc.)
Args:
mapping: The mapping to load.
'''
def __init__(self, mapping):
pass
def _initialize(self):
pass
def __getitem__(self, key):
pass
def keys(self) -> KeysView[str]:
pass
def values(self) -> ValuesView[str]:
pass
def items(self) -> KeysView[str]:
pass
def __iter__(self) -> Iterator[str]:
pass
def __contains__(self, item: object) -> bool:
pass
| 9
| 1
| 4
| 0
| 4
| 0
| 1
| 0.19
| 1
| 0
| 0
| 0
| 8
| 3
| 8
| 58
| 49
| 11
| 32
| 16
| 23
| 6
| 32
| 16
| 23
| 3
| 3
| 1
| 10
|
623
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/feature_extraction_auto.py
|
transformers.models.auto.feature_extraction_auto.AutoFeatureExtractor
|
from .configuration_auto import CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings
import warnings
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, cached_file, logging
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...configuration_utils import PretrainedConfig
class AutoFeatureExtractor:
"""
This is a generic feature extractor class that will be instantiated as one of the feature extractor classes of the
library when created with the [`AutoFeatureExtractor.from_pretrained`] class method.
This class cannot be instantiated directly using `__init__()` (throws an error).
"""
def __init__(self):
raise OSError('AutoFeatureExtractor is designed to be instantiated using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.')
@classmethod
@replace_list_option_in_docstrings(FEATURE_EXTRACTOR_MAPPING_NAMES)
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
"""
Instantiate one of the feature extractor classes of the library from a pretrained model vocabulary.
The feature extractor class to instantiate is selected based on the `model_type` property of the config object
(either passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's
missing, by falling back to using pattern matching on `pretrained_model_name_or_path`:
List options
Params:
pretrained_model_name_or_path (`str` or `os.PathLike`):
This can be either:
- a string, the *model id* of a pretrained feature_extractor hosted inside a model repo on
huggingface.co.
- a path to a *directory* containing a feature extractor file saved using the
[`~feature_extraction_utils.FeatureExtractionMixin.save_pretrained`] method, e.g.,
`./my_model_directory/`.
- a path or url to a saved feature extractor JSON *file*, e.g.,
`./my_model_directory/preprocessor_config.json`.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded pretrained model feature extractor should be cached if the
standard cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force to (re-)download the feature extractor files and override the cached versions
if they exist.
resume_download:
Deprecated and ignored. All downloads are now resumed by default when possible.
Will be removed in v5 of Transformers.
proxies (`dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
when running `hf auth login` (stored in `~/.huggingface`).
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
return_unused_kwargs (`bool`, *optional*, defaults to `False`):
If `False`, then this function returns just the final feature extractor object. If `True`, then this
functions returns a `Tuple(feature_extractor, unused_kwargs)` where *unused_kwargs* is a dictionary
consisting of the key/value pairs whose keys are not feature extractor attributes: i.e., the part of
`kwargs` which has not been used to update `feature_extractor` and is otherwise ignored.
trust_remote_code (`bool`, *optional*, defaults to `False`):
Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
should only be set to `True` for repositories you trust and in which you have read the code, as it will
execute code present on the Hub on your local machine.
kwargs (`dict[str, Any]`, *optional*):
The values in kwargs of any keys which are feature extractor attributes will be used to override the
loaded values. Behavior concerning key/value pairs whose keys are *not* feature extractor attributes is
controlled by the `return_unused_kwargs` keyword parameter.
<Tip>
Passing `token=True` is required when you want to use a private model.
</Tip>
Examples:
```python
>>> from transformers import AutoFeatureExtractor
>>> # Download feature extractor from huggingface.co and cache.
>>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h")
>>> # If feature extractor files are in a directory (e.g. feature extractor was saved using *save_pretrained('./test/saved_model/')*)
>>> # feature_extractor = AutoFeatureExtractor.from_pretrained("./test/saved_model/")
```"""
use_auth_token = kwargs.pop('use_auth_token', None)
if use_auth_token is not None:
warnings.warn('The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.', FutureWarning)
if kwargs.get('token') is not None:
raise ValueError('`token` and `use_auth_token` are both specified. Please set only the argument `token`.')
kwargs['token'] = use_auth_token
config = kwargs.pop('config', None)
trust_remote_code = kwargs.pop('trust_remote_code', None)
kwargs['_from_auto'] = True
config_dict, _ = FeatureExtractionMixin.get_feature_extractor_dict(pretrained_model_name_or_path, **kwargs)
feature_extractor_class = config_dict.get('feature_extractor_type', None)
feature_extractor_auto_map = None
if 'AutoFeatureExtractor' in config_dict.get('auto_map', {}):
feature_extractor_auto_map = config_dict['auto_map']['AutoFeatureExtractor']
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(config, PretrainedConfig):
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs)
feature_extractor_class = getattr(config, 'feature_extractor_type', None)
if hasattr(config, 'auto_map') and 'AutoFeatureExtractor' in config.auto_map:
feature_extractor_auto_map = config.auto_map['AutoFeatureExtractor']
if feature_extractor_class is not None:
feature_extractor_class = feature_extractor_class_from_name(feature_extractor_class)
has_remote_code = feature_extractor_auto_map is not None
has_local_code = feature_extractor_class is not None or type(config) in FEATURE_EXTRACTOR_MAPPING
if has_remote_code:
if '--' in feature_extractor_auto_map:
upstream_repo = feature_extractor_auto_map.split('--')[0]
else:
upstream_repo = None
trust_remote_code = resolve_trust_remote_code(trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code, upstream_repo)
if has_remote_code and trust_remote_code:
feature_extractor_class = get_class_from_dynamic_module(feature_extractor_auto_map, pretrained_model_name_or_path, **kwargs)
_ = kwargs.pop('code_revision', None)
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(config_dict, **kwargs)
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(config_dict, **kwargs)
elif type(config) in FEATURE_EXTRACTOR_MAPPING:
feature_extractor_class = FEATURE_EXTRACTOR_MAPPING[type(config)]
return feature_extractor_class.from_dict(config_dict, **kwargs)
raise ValueError(f"Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a `feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following `model_type` keys in its {CONFIG_NAME}: {', '.join((c for c in FEATURE_EXTRACTOR_MAPPING_NAMES))}")
@staticmethod
def register(config_class, feature_extractor_class, exist_ok=False):
"""
Register a new feature extractor for this class.
Args:
config_class ([`PretrainedConfig`]):
The configuration corresponding to the model to register.
feature_extractor_class ([`FeatureExtractorMixin`]): The feature extractor to register.
"""
FEATURE_EXTRACTOR_MAPPING.register(config_class, feature_extractor_class, exist_ok=exist_ok)
|
class AutoFeatureExtractor:
'''
This is a generic feature extractor class that will be instantiated as one of the feature extractor classes of the
library when created with the [`AutoFeatureExtractor.from_pretrained`] class method.
This class cannot be instantiated directly using `__init__()` (throws an error).
'''
def __init__(self):
pass
@classmethod
@replace_list_option_in_docstrings(FEATURE_EXTRACTOR_MAPPING_NAMES)
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
'''
Instantiate one of the feature extractor classes of the library from a pretrained model vocabulary.
The feature extractor class to instantiate is selected based on the `model_type` property of the config object
(either passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's
missing, by falling back to using pattern matching on `pretrained_model_name_or_path`:
List options
Params:
pretrained_model_name_or_path (`str` or `os.PathLike`):
This can be either:
- a string, the *model id* of a pretrained feature_extractor hosted inside a model repo on
huggingface.co.
- a path to a *directory* containing a feature extractor file saved using the
[`~feature_extraction_utils.FeatureExtractionMixin.save_pretrained`] method, e.g.,
`./my_model_directory/`.
- a path or url to a saved feature extractor JSON *file*, e.g.,
`./my_model_directory/preprocessor_config.json`.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded pretrained model feature extractor should be cached if the
standard cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force to (re-)download the feature extractor files and override the cached versions
if they exist.
resume_download:
Deprecated and ignored. All downloads are now resumed by default when possible.
Will be removed in v5 of Transformers.
proxies (`dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
when running `hf auth login` (stored in `~/.huggingface`).
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
return_unused_kwargs (`bool`, *optional*, defaults to `False`):
If `False`, then this function returns just the final feature extractor object. If `True`, then this
functions returns a `Tuple(feature_extractor, unused_kwargs)` where *unused_kwargs* is a dictionary
consisting of the key/value pairs whose keys are not feature extractor attributes: i.e., the part of
`kwargs` which has not been used to update `feature_extractor` and is otherwise ignored.
trust_remote_code (`bool`, *optional*, defaults to `False`):
Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
should only be set to `True` for repositories you trust and in which you have read the code, as it will
execute code present on the Hub on your local machine.
kwargs (`dict[str, Any]`, *optional*):
The values in kwargs of any keys which are feature extractor attributes will be used to override the
loaded values. Behavior concerning key/value pairs whose keys are *not* feature extractor attributes is
controlled by the `return_unused_kwargs` keyword parameter.
<Tip>
Passing `token=True` is required when you want to use a private model.
</Tip>
Examples:
```python
>>> from transformers import AutoFeatureExtractor
>>> # Download feature extractor from huggingface.co and cache.
>>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h")
>>> # If feature extractor files are in a directory (e.g. feature extractor was saved using *save_pretrained('./test/saved_model/')*)
>>> # feature_extractor = AutoFeatureExtractor.from_pretrained("./test/saved_model/")
```'''
pass
@staticmethod
def register(config_class, feature_extractor_class, exist_ok=False):
'''
Register a new feature extractor for this class.
Args:
config_class ([`PretrainedConfig`]):
The configuration corresponding to the model to register.
feature_extractor_class ([`FeatureExtractorMixin`]): The feature extractor to register.
'''
pass
| 7
| 3
| 49
| 6
| 20
| 23
| 5
| 1.16
| 0
| 5
| 2
| 0
| 1
| 0
| 3
| 3
| 161
| 23
| 64
| 14
| 57
| 74
| 41
| 12
| 37
| 12
| 0
| 2
| 14
|
624
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/image_processing_auto.py
|
transformers.models.auto.image_processing_auto.AutoImageProcessor
|
from ...utils.import_utils import requires
from .configuration_auto import CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, cached_file, is_timm_config_dict, is_timm_local_checkpoint, is_torchvision_available, is_vision_available, logging
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
import warnings
from ...image_processing_utils_fast import BaseImageProcessorFast
@requires(backends=('vision',))
class AutoImageProcessor:
"""
This is a generic image processor class that will be instantiated as one of the image processor classes of the
library when created with the [`AutoImageProcessor.from_pretrained`] class method.
This class cannot be instantiated directly using `__init__()` (throws an error).
"""
def __init__(self):
raise OSError('AutoImageProcessor is designed to be instantiated using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.')
@classmethod
@replace_list_option_in_docstrings(IMAGE_PROCESSOR_MAPPING_NAMES)
def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
"""
Instantiate one of the image processor classes of the library from a pretrained model vocabulary.
The image processor class to instantiate is selected based on the `model_type` property of the config object
(either passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's
missing, by falling back to using pattern matching on `pretrained_model_name_or_path`:
List options
Params:
pretrained_model_name_or_path (`str` or `os.PathLike`):
This can be either:
- a string, the *model id* of a pretrained image_processor hosted inside a model repo on
huggingface.co.
- a path to a *directory* containing a image processor file saved using the
[`~image_processing_utils.ImageProcessingMixin.save_pretrained`] method, e.g.,
`./my_model_directory/`.
- a path or url to a saved image processor JSON *file*, e.g.,
`./my_model_directory/preprocessor_config.json`.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded pretrained model image processor should be cached if the
standard cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force to (re-)download the image processor files and override the cached versions if
they exist.
resume_download:
Deprecated and ignored. All downloads are now resumed by default when possible.
Will be removed in v5 of Transformers.
proxies (`dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
when running `hf auth login` (stored in `~/.huggingface`).
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
use_fast (`bool`, *optional*, defaults to `False`):
Use a fast torchvision-base image processor if it is supported for a given model.
If a fast image processor is not available for a given model, a normal numpy-based image processor
is returned instead.
return_unused_kwargs (`bool`, *optional*, defaults to `False`):
If `False`, then this function returns just the final image processor object. If `True`, then this
functions returns a `Tuple(image_processor, unused_kwargs)` where *unused_kwargs* is a dictionary
consisting of the key/value pairs whose keys are not image processor attributes: i.e., the part of
`kwargs` which has not been used to update `image_processor` and is otherwise ignored.
trust_remote_code (`bool`, *optional*, defaults to `False`):
Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
should only be set to `True` for repositories you trust and in which you have read the code, as it will
execute code present on the Hub on your local machine.
image_processor_filename (`str`, *optional*, defaults to `"config.json"`):
The name of the file in the model directory to use for the image processor config.
kwargs (`dict[str, Any]`, *optional*):
The values in kwargs of any keys which are image processor attributes will be used to override the
loaded values. Behavior concerning key/value pairs whose keys are *not* image processor attributes is
controlled by the `return_unused_kwargs` keyword parameter.
<Tip>
Passing `token=True` is required when you want to use a private model.
</Tip>
Examples:
```python
>>> from transformers import AutoImageProcessor
>>> # Download image processor from huggingface.co and cache.
>>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224-in21k")
>>> # If image processor files are in a directory (e.g. image processor was saved using *save_pretrained('./test/saved_model/')*)
>>> # image_processor = AutoImageProcessor.from_pretrained("./test/saved_model/")
```"""
use_auth_token = kwargs.pop('use_auth_token', None)
if use_auth_token is not None:
warnings.warn('The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.', FutureWarning)
if kwargs.get('token') is not None:
raise ValueError('`token` and `use_auth_token` are both specified. Please set only the argument `token`.')
kwargs['token'] = use_auth_token
config = kwargs.pop('config', None)
use_fast = kwargs.pop('use_fast', None)
trust_remote_code = kwargs.pop('trust_remote_code', None)
kwargs['_from_auto'] = True
if 'image_processor_filename' in kwargs:
image_processor_filename = kwargs.pop('image_processor_filename')
elif is_timm_local_checkpoint(pretrained_model_name_or_path):
image_processor_filename = CONFIG_NAME
else:
image_processor_filename = IMAGE_PROCESSOR_NAME
try:
config_dict, _ = ImageProcessingMixin.get_image_processor_dict(pretrained_model_name_or_path, image_processor_filename=image_processor_filename, **kwargs)
except Exception as initial_exception:
try:
config_dict, _ = ImageProcessingMixin.get_image_processor_dict(pretrained_model_name_or_path, image_processor_filename=CONFIG_NAME, **kwargs)
except Exception:
raise initial_exception
if not is_timm_config_dict(config_dict):
raise initial_exception
image_processor_type = config_dict.get('image_processor_type', None)
image_processor_auto_map = None
if 'AutoImageProcessor' in config_dict.get('auto_map', {}):
image_processor_auto_map = config_dict['auto_map']['AutoImageProcessor']
if image_processor_type is None and image_processor_auto_map is None:
feature_extractor_class = config_dict.pop('feature_extractor_type', None)
if feature_extractor_class is not None:
image_processor_type = feature_extractor_class.replace('FeatureExtractor', 'ImageProcessor')
if 'AutoFeatureExtractor' in config_dict.get('auto_map', {}):
feature_extractor_auto_map = config_dict['auto_map']['AutoFeatureExtractor']
image_processor_auto_map = feature_extractor_auto_map.replace('FeatureExtractor', 'ImageProcessor')
if image_processor_type is None and image_processor_auto_map is None:
if not isinstance(config, PretrainedConfig):
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs)
image_processor_type = getattr(config, 'image_processor_type', None)
if hasattr(config, 'auto_map') and 'AutoImageProcessor' in config.auto_map:
image_processor_auto_map = config.auto_map['AutoImageProcessor']
image_processor_class = None
if image_processor_type is not None:
if use_fast is None:
use_fast = image_processor_type.endswith('Fast')
if not use_fast and image_processor_type in FORCE_FAST_IMAGE_PROCESSOR and is_torchvision_available():
use_fast = True
logger.warning_once(f'The image processor of type `{image_processor_type}` is now loaded as a fast processor by default, even if the model checkpoint was saved with a slow processor. This is a breaking change and may produce slightly different outputs. To continue using the slow processor, instantiate this class with `use_fast=False`. Note that this behavior will be extended to all models in a future release.')
if not use_fast:
logger.warning_once("Using a slow image processor as `use_fast` is unset and a slow processor was saved with this model. `use_fast=True` will be the default behavior in v4.52, even if the model was saved with a slow processor. This will result in minor differences in outputs. You'll still be able to use a slow processor with `use_fast=False`.")
if use_fast and (not image_processor_type.endswith('Fast')):
image_processor_type += 'Fast'
if use_fast and (not is_torchvision_available()):
image_processor_class = get_image_processor_class_from_name(image_processor_type[:-4])
if image_processor_class is None:
raise ValueError(f'`{image_processor_type}` requires `torchvision` to be installed. Please install `torchvision` and try again.')
logger.warning_once('Using `use_fast=True` but `torchvision` is not available. Falling back to the slow image processor.')
use_fast = False
if use_fast:
for image_processors in IMAGE_PROCESSOR_MAPPING_NAMES.values():
if image_processor_type in image_processors:
break
else:
image_processor_type = image_processor_type[:-4]
use_fast = False
logger.warning_once('`use_fast` is set to `True` but the image processor class does not have a fast version. Falling back to the slow version.')
image_processor_class = get_image_processor_class_from_name(image_processor_type)
else:
image_processor_type_slow = image_processor_type[:-4] if image_processor_type.endswith('Fast') else image_processor_type
image_processor_class = get_image_processor_class_from_name(image_processor_type_slow)
if image_processor_class is None and image_processor_type.endswith('Fast'):
raise ValueError(f'`{image_processor_type}` does not have a slow version. Please set `use_fast=True` when instantiating the processor.')
has_remote_code = image_processor_auto_map is not None
has_local_code = image_processor_class is not None or type(config) in IMAGE_PROCESSOR_MAPPING
if has_remote_code:
if image_processor_auto_map is not None and (not isinstance(image_processor_auto_map, tuple)):
image_processor_auto_map = (image_processor_auto_map, None)
if use_fast and image_processor_auto_map[1] is not None:
class_ref = image_processor_auto_map[1]
else:
class_ref = image_processor_auto_map[0]
if '--' in class_ref:
upstream_repo = class_ref.split('--')[0]
else:
upstream_repo = None
trust_remote_code = resolve_trust_remote_code(trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code, upstream_repo)
if has_remote_code and trust_remote_code:
if not use_fast and image_processor_auto_map[1] is not None:
_warning_fast_image_processor_available(image_processor_auto_map[1])
image_processor_class = get_class_from_dynamic_module(class_ref, pretrained_model_name_or_path, **kwargs)
_ = kwargs.pop('code_revision', None)
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(config_dict, **kwargs)
elif image_processor_class is not None:
return image_processor_class.from_dict(config_dict, **kwargs)
elif type(config) in IMAGE_PROCESSOR_MAPPING:
image_processor_tuple = IMAGE_PROCESSOR_MAPPING[type(config)]
image_processor_class_py, image_processor_class_fast = image_processor_tuple
if not use_fast and image_processor_class_fast is not None:
_warning_fast_image_processor_available(image_processor_class_fast)
if image_processor_class_fast and (use_fast or image_processor_class_py is None):
return image_processor_class_fast.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
elif image_processor_class_py is not None:
return image_processor_class_py.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
else:
raise ValueError('This image processor cannot be instantiated. Please make sure you have `Pillow` installed.')
raise ValueError(f"Unrecognized image processor in {pretrained_model_name_or_path}. Should have a `image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following `model_type` keys in its {CONFIG_NAME}: {', '.join((c for c in IMAGE_PROCESSOR_MAPPING_NAMES))}")
@staticmethod
def register(config_class, image_processor_class=None, slow_image_processor_class=None, fast_image_processor_class=None, exist_ok=False):
"""
Register a new image processor for this class.
Args:
config_class ([`PretrainedConfig`]):
The configuration corresponding to the model to register.
image_processor_class ([`ImageProcessingMixin`]): The image processor to register.
"""
if image_processor_class is not None:
if slow_image_processor_class is not None:
raise ValueError('Cannot specify both image_processor_class and slow_image_processor_class')
warnings.warn('The image_processor_class argument is deprecated and will be removed in v4.42. Please use `slow_image_processor_class`, or `fast_image_processor_class` instead', FutureWarning)
slow_image_processor_class = image_processor_class
if slow_image_processor_class is None and fast_image_processor_class is None:
raise ValueError('You need to specify either slow_image_processor_class or fast_image_processor_class')
if slow_image_processor_class is not None and issubclass(slow_image_processor_class, BaseImageProcessorFast):
raise ValueError('You passed a fast image processor in as the `slow_image_processor_class`.')
if fast_image_processor_class is not None and (not issubclass(fast_image_processor_class, BaseImageProcessorFast)):
raise ValueError('The `fast_image_processor_class` should inherit from `BaseImageProcessorFast`.')
if slow_image_processor_class is not None and fast_image_processor_class is not None and issubclass(fast_image_processor_class, BaseImageProcessorFast) and (fast_image_processor_class.slow_image_processor_class != slow_image_processor_class):
raise ValueError(f'The fast processor class you are passing has a `slow_image_processor_class` attribute that is not consistent with the slow processor class you passed (fast tokenizer has {fast_image_processor_class.slow_image_processor_class} and you passed {slow_image_processor_class}. Fix one of those so they match!')
if config_class in IMAGE_PROCESSOR_MAPPING._extra_content:
existing_slow, existing_fast = IMAGE_PROCESSOR_MAPPING[config_class]
if slow_image_processor_class is None:
slow_image_processor_class = existing_slow
if fast_image_processor_class is None:
fast_image_processor_class = existing_fast
IMAGE_PROCESSOR_MAPPING.register(config_class, (slow_image_processor_class, fast_image_processor_class), exist_ok=exist_ok)
|
@requires(backends=('vision',))
class AutoImageProcessor:
'''
This is a generic image processor class that will be instantiated as one of the image processor classes of the
library when created with the [`AutoImageProcessor.from_pretrained`] class method.
This class cannot be instantiated directly using `__init__()` (throws an error).
'''
def __init__(self):
pass
@classmethod
@replace_list_option_in_docstrings(IMAGE_PROCESSOR_MAPPING_NAMES)
def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
'''
Instantiate one of the image processor classes of the library from a pretrained model vocabulary.
The image processor class to instantiate is selected based on the `model_type` property of the config object
(either passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's
missing, by falling back to using pattern matching on `pretrained_model_name_or_path`:
List options
Params:
pretrained_model_name_or_path (`str` or `os.PathLike`):
This can be either:
- a string, the *model id* of a pretrained image_processor hosted inside a model repo on
huggingface.co.
- a path to a *directory* containing a image processor file saved using the
[`~image_processing_utils.ImageProcessingMixin.save_pretrained`] method, e.g.,
`./my_model_directory/`.
- a path or url to a saved image processor JSON *file*, e.g.,
`./my_model_directory/preprocessor_config.json`.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded pretrained model image processor should be cached if the
standard cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force to (re-)download the image processor files and override the cached versions if
they exist.
resume_download:
Deprecated and ignored. All downloads are now resumed by default when possible.
Will be removed in v5 of Transformers.
proxies (`dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
when running `hf auth login` (stored in `~/.huggingface`).
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
use_fast (`bool`, *optional*, defaults to `False`):
Use a fast torchvision-base image processor if it is supported for a given model.
If a fast image processor is not available for a given model, a normal numpy-based image processor
is returned instead.
return_unused_kwargs (`bool`, *optional*, defaults to `False`):
If `False`, then this function returns just the final image processor object. If `True`, then this
functions returns a `Tuple(image_processor, unused_kwargs)` where *unused_kwargs* is a dictionary
consisting of the key/value pairs whose keys are not image processor attributes: i.e., the part of
`kwargs` which has not been used to update `image_processor` and is otherwise ignored.
trust_remote_code (`bool`, *optional*, defaults to `False`):
Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
should only be set to `True` for repositories you trust and in which you have read the code, as it will
execute code present on the Hub on your local machine.
image_processor_filename (`str`, *optional*, defaults to `"config.json"`):
The name of the file in the model directory to use for the image processor config.
kwargs (`dict[str, Any]`, *optional*):
The values in kwargs of any keys which are image processor attributes will be used to override the
loaded values. Behavior concerning key/value pairs whose keys are *not* image processor attributes is
controlled by the `return_unused_kwargs` keyword parameter.
<Tip>
Passing `token=True` is required when you want to use a private model.
</Tip>
Examples:
```python
>>> from transformers import AutoImageProcessor
>>> # Download image processor from huggingface.co and cache.
>>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224-in21k")
>>> # If image processor files are in a directory (e.g. image processor was saved using *save_pretrained('./test/saved_model/')*)
>>> # image_processor = AutoImageProcessor.from_pretrained("./test/saved_model/")
```'''
pass
@staticmethod
def register(config_class, image_processor_class=None, slow_image_processor_class=None, fast_image_processor_class=None, exist_ok=False):
'''
Register a new image processor for this class.
Args:
config_class ([`PretrainedConfig`]):
The configuration corresponding to the model to register.
image_processor_class ([`ImageProcessingMixin`]): The image processor to register.
'''
pass
| 8
| 3
| 100
| 11
| 59
| 31
| 14
| 0.54
| 0
| 9
| 4
| 0
| 1
| 0
| 3
| 3
| 314
| 36
| 181
| 31
| 168
| 97
| 114
| 22
| 110
| 32
| 0
| 4
| 43
|
625
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/modeling_auto.py
|
transformers.models.auto.modeling_auto.AutoBackbone
|
from .auto_factory import _BaseAutoBackboneClass, _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
class AutoBackbone(_BaseAutoBackboneClass):
_model_mapping = MODEL_FOR_BACKBONE_MAPPING
|
class AutoBackbone(_BaseAutoBackboneClass):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 6
| 2
| 0
| 2
| 2
| 1
| 0
| 2
| 2
| 1
| 0
| 2
| 0
| 0
|
626
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/modeling_auto.py
|
transformers.models.auto.modeling_auto.AutoModel
|
from .auto_factory import _BaseAutoBackboneClass, _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
class AutoModel(_BaseAutoModelClass):
_model_mapping = MODEL_MAPPING
|
class AutoModel(_BaseAutoModelClass):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 2
| 0
| 2
| 2
| 1
| 0
| 2
| 2
| 1
| 0
| 1
| 0
| 0
|
627
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/modeling_auto.py
|
transformers.models.auto.modeling_auto.AutoModelForAudioClassification
|
from .auto_factory import _BaseAutoBackboneClass, _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
class AutoModelForAudioClassification(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
|
class AutoModelForAudioClassification(_BaseAutoModelClass):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 2
| 0
| 2
| 2
| 1
| 0
| 2
| 2
| 1
| 0
| 1
| 0
| 0
|
628
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/modeling_auto.py
|
transformers.models.auto.modeling_auto.AutoModelForAudioFrameClassification
|
from .auto_factory import _BaseAutoBackboneClass, _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
class AutoModelForAudioFrameClassification(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING
|
class AutoModelForAudioFrameClassification(_BaseAutoModelClass):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 2
| 0
| 2
| 2
| 1
| 0
| 2
| 2
| 1
| 0
| 1
| 0
| 0
|
629
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/modeling_auto.py
|
transformers.models.auto.modeling_auto.AutoModelForAudioXVector
|
from .auto_factory import _BaseAutoBackboneClass, _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
class AutoModelForAudioXVector(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_AUDIO_XVECTOR_MAPPING
|
class AutoModelForAudioXVector(_BaseAutoModelClass):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 2
| 0
| 2
| 2
| 1
| 0
| 2
| 2
| 1
| 0
| 1
| 0
| 0
|
630
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/modeling_auto.py
|
transformers.models.auto.modeling_auto.AutoModelForCTC
|
from .auto_factory import _BaseAutoBackboneClass, _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
class AutoModelForCTC(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_CTC_MAPPING
|
class AutoModelForCTC(_BaseAutoModelClass):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 2
| 0
| 2
| 2
| 1
| 0
| 2
| 2
| 1
| 0
| 1
| 0
| 0
|
631
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/modeling_auto.py
|
transformers.models.auto.modeling_auto.AutoModelForCausalLM
|
import os
from .auto_factory import _BaseAutoBackboneClass, _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from typing import TYPE_CHECKING, Union
class AutoModelForCausalLM(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_CAUSAL_LM_MAPPING
@classmethod
def from_pretrained(cls: type['AutoModelForCausalLM'], pretrained_model_name_or_path: Union[str, os.PathLike[str]], *model_args, **kwargs) -> '_BaseModelWithGenerate':
return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
|
class AutoModelForCausalLM(_BaseAutoModelClass):
@classmethod
def from_pretrained(cls: type['AutoModelForCausalLM'], pretrained_model_name_or_path: Union[str, os.PathLike[str]], *model_args, **kwargs) -> '_BaseModelWithGenerate':
pass
| 3
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 2
| 0
| 2
| 2
| 1
| 0
| 2
| 2
| 1
| 0
| 1
| 0
| 0
|
632
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/modeling_auto.py
|
transformers.models.auto.modeling_auto.AutoModelForDepthEstimation
|
from .auto_factory import _BaseAutoBackboneClass, _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
class AutoModelForDepthEstimation(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
|
class AutoModelForDepthEstimation(_BaseAutoModelClass):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 2
| 0
| 2
| 2
| 1
| 0
| 2
| 2
| 1
| 0
| 1
| 0
| 0
|
633
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/modeling_auto.py
|
transformers.models.auto.modeling_auto.AutoModelForDocumentQuestionAnswering
|
from .auto_factory import _BaseAutoBackboneClass, _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
class AutoModelForDocumentQuestionAnswering(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
|
class AutoModelForDocumentQuestionAnswering(_BaseAutoModelClass):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 2
| 0
| 2
| 2
| 1
| 0
| 2
| 2
| 1
| 0
| 1
| 0
| 0
|
634
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/modeling_auto.py
|
transformers.models.auto.modeling_auto.AutoModelForImageClassification
|
from .auto_factory import _BaseAutoBackboneClass, _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
class AutoModelForImageClassification(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
|
class AutoModelForImageClassification(_BaseAutoModelClass):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 2
| 0
| 2
| 2
| 1
| 0
| 2
| 2
| 1
| 0
| 1
| 0
| 0
|
635
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/modeling_auto.py
|
transformers.models.auto.modeling_auto.AutoModelForImageSegmentation
|
from .auto_factory import _BaseAutoBackboneClass, _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
class AutoModelForImageSegmentation(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_IMAGE_SEGMENTATION_MAPPING
|
class AutoModelForImageSegmentation(_BaseAutoModelClass):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 2
| 0
| 2
| 2
| 1
| 0
| 2
| 2
| 1
| 0
| 1
| 0
| 0
|
636
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/modeling_auto.py
|
transformers.models.auto.modeling_auto.AutoModelForImageTextToText
|
from typing import TYPE_CHECKING, Union
from .auto_factory import _BaseAutoBackboneClass, _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
import os
class AutoModelForImageTextToText(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING
@classmethod
def from_pretrained(cls: type['AutoModelForImageTextToText'], pretrained_model_name_or_path: Union[str, os.PathLike[str]], *model_args, **kwargs) -> '_BaseModelWithGenerate':
return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
|
class AutoModelForImageTextToText(_BaseAutoModelClass):
@classmethod
def from_pretrained(cls: type['AutoModelForImageTextToText'], pretrained_model_name_or_path: Union[str, os.PathLike[str]], *model_args, **kwargs) -> '_BaseModelWithGenerate':
pass
| 3
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 2
| 0
| 2
| 2
| 1
| 0
| 2
| 2
| 1
| 0
| 1
| 0
| 0
|
637
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/modeling_auto.py
|
transformers.models.auto.modeling_auto.AutoModelForImageToImage
|
from .auto_factory import _BaseAutoBackboneClass, _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
class AutoModelForImageToImage(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_IMAGE_TO_IMAGE_MAPPING
|
class AutoModelForImageToImage(_BaseAutoModelClass):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 2
| 0
| 2
| 2
| 1
| 0
| 2
| 2
| 1
| 0
| 1
| 0
| 0
|
638
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/modeling_auto.py
|
transformers.models.auto.modeling_auto.AutoModelForInstanceSegmentation
|
from .auto_factory import _BaseAutoBackboneClass, _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
class AutoModelForInstanceSegmentation(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING
|
class AutoModelForInstanceSegmentation(_BaseAutoModelClass):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 2
| 0
| 2
| 2
| 1
| 0
| 2
| 2
| 1
| 0
| 1
| 0
| 0
|
639
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/modeling_auto.py
|
transformers.models.auto.modeling_auto.AutoModelForKeypointDetection
|
from .auto_factory import _BaseAutoBackboneClass, _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
class AutoModelForKeypointDetection(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_KEYPOINT_DETECTION_MAPPING
|
class AutoModelForKeypointDetection(_BaseAutoModelClass):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 2
| 0
| 2
| 2
| 1
| 0
| 2
| 2
| 1
| 0
| 1
| 0
| 0
|
640
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/modeling_auto.py
|
transformers.models.auto.modeling_auto.AutoModelForMaskGeneration
|
from .auto_factory import _BaseAutoBackboneClass, _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
class AutoModelForMaskGeneration(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_MASK_GENERATION_MAPPING
|
class AutoModelForMaskGeneration(_BaseAutoModelClass):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 2
| 0
| 2
| 2
| 1
| 0
| 2
| 2
| 1
| 0
| 1
| 0
| 0
|
641
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/modeling_auto.py
|
transformers.models.auto.modeling_auto.AutoModelForMaskedImageModeling
|
from .auto_factory import _BaseAutoBackboneClass, _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
class AutoModelForMaskedImageModeling(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING
|
class AutoModelForMaskedImageModeling(_BaseAutoModelClass):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 2
| 0
| 2
| 2
| 1
| 0
| 2
| 2
| 1
| 0
| 1
| 0
| 0
|
642
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/modeling_auto.py
|
transformers.models.auto.modeling_auto.AutoModelForMaskedLM
|
from .auto_factory import _BaseAutoBackboneClass, _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
class AutoModelForMaskedLM(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_MASKED_LM_MAPPING
|
class AutoModelForMaskedLM(_BaseAutoModelClass):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 2
| 0
| 2
| 2
| 1
| 0
| 2
| 2
| 1
| 0
| 1
| 0
| 0
|
643
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/modeling_auto.py
|
transformers.models.auto.modeling_auto.AutoModelForMultipleChoice
|
from .auto_factory import _BaseAutoBackboneClass, _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
class AutoModelForMultipleChoice(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_MULTIPLE_CHOICE_MAPPING
|
class AutoModelForMultipleChoice(_BaseAutoModelClass):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 2
| 0
| 2
| 2
| 1
| 0
| 2
| 2
| 1
| 0
| 1
| 0
| 0
|
644
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/modeling_auto.py
|
transformers.models.auto.modeling_auto.AutoModelForNextSentencePrediction
|
from .auto_factory import _BaseAutoBackboneClass, _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
class AutoModelForNextSentencePrediction(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
|
class AutoModelForNextSentencePrediction(_BaseAutoModelClass):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 2
| 0
| 2
| 2
| 1
| 0
| 2
| 2
| 1
| 0
| 1
| 0
| 0
|
645
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/modeling_auto.py
|
transformers.models.auto.modeling_auto.AutoModelForObjectDetection
|
from .auto_factory import _BaseAutoBackboneClass, _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
class AutoModelForObjectDetection(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_OBJECT_DETECTION_MAPPING
|
class AutoModelForObjectDetection(_BaseAutoModelClass):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 2
| 0
| 2
| 2
| 1
| 0
| 2
| 2
| 1
| 0
| 1
| 0
| 0
|
646
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/modeling_auto.py
|
transformers.models.auto.modeling_auto.AutoModelForPreTraining
|
from .auto_factory import _BaseAutoBackboneClass, _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
class AutoModelForPreTraining(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_PRETRAINING_MAPPING
|
class AutoModelForPreTraining(_BaseAutoModelClass):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 2
| 0
| 2
| 2
| 1
| 0
| 2
| 2
| 1
| 0
| 1
| 0
| 0
|
647
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/modeling_auto.py
|
transformers.models.auto.modeling_auto.AutoModelForQuestionAnswering
|
from .auto_factory import _BaseAutoBackboneClass, _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
class AutoModelForQuestionAnswering(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_QUESTION_ANSWERING_MAPPING
|
class AutoModelForQuestionAnswering(_BaseAutoModelClass):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 2
| 0
| 2
| 2
| 1
| 0
| 2
| 2
| 1
| 0
| 1
| 0
| 0
|
648
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/modeling_auto.py
|
transformers.models.auto.modeling_auto.AutoModelForSemanticSegmentation
|
from .auto_factory import _BaseAutoBackboneClass, _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
class AutoModelForSemanticSegmentation(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING
|
class AutoModelForSemanticSegmentation(_BaseAutoModelClass):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 2
| 0
| 2
| 2
| 1
| 0
| 2
| 2
| 1
| 0
| 1
| 0
| 0
|
649
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/modeling_auto.py
|
transformers.models.auto.modeling_auto.AutoModelForSeq2SeqLM
|
from .auto_factory import _BaseAutoBackboneClass, _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
class AutoModelForSeq2SeqLM(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
|
class AutoModelForSeq2SeqLM(_BaseAutoModelClass):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 2
| 0
| 2
| 2
| 1
| 0
| 2
| 2
| 1
| 0
| 1
| 0
| 0
|
650
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/modeling_auto.py
|
transformers.models.auto.modeling_auto.AutoModelForSequenceClassification
|
from .auto_factory import _BaseAutoBackboneClass, _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
class AutoModelForSequenceClassification(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
|
class AutoModelForSequenceClassification(_BaseAutoModelClass):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 2
| 0
| 2
| 2
| 1
| 0
| 2
| 2
| 1
| 0
| 1
| 0
| 0
|
651
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/modeling_auto.py
|
transformers.models.auto.modeling_auto.AutoModelForSpeechSeq2Seq
|
from .auto_factory import _BaseAutoBackboneClass, _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
class AutoModelForSpeechSeq2Seq(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
|
class AutoModelForSpeechSeq2Seq(_BaseAutoModelClass):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 2
| 0
| 2
| 2
| 1
| 0
| 2
| 2
| 1
| 0
| 1
| 0
| 0
|
652
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/modeling_auto.py
|
transformers.models.auto.modeling_auto.AutoModelForTableQuestionAnswering
|
from .auto_factory import _BaseAutoBackboneClass, _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
class AutoModelForTableQuestionAnswering(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING
|
class AutoModelForTableQuestionAnswering(_BaseAutoModelClass):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 2
| 0
| 2
| 2
| 1
| 0
| 2
| 2
| 1
| 0
| 1
| 0
| 0
|
653
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/modeling_auto.py
|
transformers.models.auto.modeling_auto.AutoModelForTextEncoding
|
from .auto_factory import _BaseAutoBackboneClass, _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
class AutoModelForTextEncoding(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_TEXT_ENCODING_MAPPING
|
class AutoModelForTextEncoding(_BaseAutoModelClass):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 2
| 0
| 2
| 2
| 1
| 0
| 2
| 2
| 1
| 0
| 1
| 0
| 0
|
654
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/modeling_auto.py
|
transformers.models.auto.modeling_auto.AutoModelForTextToSpectrogram
|
from .auto_factory import _BaseAutoBackboneClass, _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
class AutoModelForTextToSpectrogram(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING
|
class AutoModelForTextToSpectrogram(_BaseAutoModelClass):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 2
| 0
| 2
| 2
| 1
| 0
| 2
| 2
| 1
| 0
| 1
| 0
| 0
|
655
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/modeling_auto.py
|
transformers.models.auto.modeling_auto.AutoModelForTextToWaveform
|
from .auto_factory import _BaseAutoBackboneClass, _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
class AutoModelForTextToWaveform(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING
|
class AutoModelForTextToWaveform(_BaseAutoModelClass):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 2
| 0
| 2
| 2
| 1
| 0
| 2
| 2
| 1
| 0
| 1
| 0
| 0
|
656
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/modeling_auto.py
|
transformers.models.auto.modeling_auto.AutoModelForTokenClassification
|
from .auto_factory import _BaseAutoBackboneClass, _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
class AutoModelForTokenClassification(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
|
class AutoModelForTokenClassification(_BaseAutoModelClass):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 2
| 0
| 2
| 2
| 1
| 0
| 2
| 2
| 1
| 0
| 1
| 0
| 0
|
657
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/modeling_auto.py
|
transformers.models.auto.modeling_auto.AutoModelForUniversalSegmentation
|
from .auto_factory import _BaseAutoBackboneClass, _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
class AutoModelForUniversalSegmentation(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING
|
class AutoModelForUniversalSegmentation(_BaseAutoModelClass):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 2
| 0
| 2
| 2
| 1
| 0
| 2
| 2
| 1
| 0
| 1
| 0
| 0
|
658
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/modeling_auto.py
|
transformers.models.auto.modeling_auto.AutoModelForVideoClassification
|
from .auto_factory import _BaseAutoBackboneClass, _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
class AutoModelForVideoClassification(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
|
class AutoModelForVideoClassification(_BaseAutoModelClass):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 2
| 0
| 2
| 2
| 1
| 0
| 2
| 2
| 1
| 0
| 1
| 0
| 0
|
659
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/modeling_auto.py
|
transformers.models.auto.modeling_auto.AutoModelForVision2Seq
|
import warnings
class AutoModelForVision2Seq(_AutoModelForVision2Seq):
@classmethod
def from_config(cls, config, **kwargs):
warnings.warn('The class `AutoModelForVision2Seq` is deprecated and will be removed in v5.0. Please use `AutoModelForImageTextToText` instead.', FutureWarning)
return super().from_config(config, **kwargs)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
warnings.warn('The class `AutoModelForVision2Seq` is deprecated and will be removed in v5.0. Please use `AutoModelForImageTextToText` instead.', FutureWarning)
return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
|
class AutoModelForVision2Seq(_AutoModelForVision2Seq):
@classmethod
def from_config(cls, config, **kwargs):
pass
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
pass
| 5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 2
| 0
| 2
| 2
| 1
| 0
| 2
| 2
| 1
| 0
| 1
| 0
| 0
|
660
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/modeling_auto.py
|
transformers.models.auto.modeling_auto.AutoModelForVisualQuestionAnswering
|
from .auto_factory import _BaseAutoBackboneClass, _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
class AutoModelForVisualQuestionAnswering(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
|
class AutoModelForVisualQuestionAnswering(_BaseAutoModelClass):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 2
| 0
| 2
| 2
| 1
| 0
| 2
| 2
| 1
| 0
| 1
| 0
| 0
|
661
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/modeling_auto.py
|
transformers.models.auto.modeling_auto.AutoModelForZeroShotImageClassification
|
from .auto_factory import _BaseAutoBackboneClass, _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
class AutoModelForZeroShotImageClassification(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
|
class AutoModelForZeroShotImageClassification(_BaseAutoModelClass):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 2
| 0
| 2
| 2
| 1
| 0
| 2
| 2
| 1
| 0
| 1
| 0
| 0
|
662
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/modeling_auto.py
|
transformers.models.auto.modeling_auto.AutoModelForZeroShotObjectDetection
|
from .auto_factory import _BaseAutoBackboneClass, _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
class AutoModelForZeroShotObjectDetection(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
|
class AutoModelForZeroShotObjectDetection(_BaseAutoModelClass):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 2
| 0
| 2
| 2
| 1
| 0
| 2
| 2
| 1
| 0
| 1
| 0
| 0
|
663
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/modeling_auto.py
|
transformers.models.auto.modeling_auto.AutoModelWithLMHead
|
import warnings
class AutoModelWithLMHead(_AutoModelWithLMHead):
@classmethod
def from_config(cls, config, **kwargs):
warnings.warn('The class `AutoModelWithLMHead` is deprecated and will be removed in a future version. Please use `AutoModelForCausalLM` for causal language models, `AutoModelForMaskedLM` for masked language models and `AutoModelForSeq2SeqLM` for encoder-decoder models.', FutureWarning)
return super().from_config(config, **kwargs)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
warnings.warn('The class `AutoModelWithLMHead` is deprecated and will be removed in a future version. Please use `AutoModelForCausalLM` for causal language models, `AutoModelForMaskedLM` for masked language models and `AutoModelForSeq2SeqLM` for encoder-decoder models.', FutureWarning)
return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
|
class AutoModelWithLMHead(_AutoModelWithLMHead):
@classmethod
def from_config(cls, config, **kwargs):
pass
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
pass
| 5
| 0
| 8
| 0
| 8
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 0
| 0
| 2
| 6
| 20
| 1
| 19
| 5
| 14
| 0
| 7
| 3
| 4
| 1
| 2
| 0
| 2
|
664
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/modeling_auto.py
|
transformers.models.auto.modeling_auto._AutoModelWithLMHead
|
from .auto_factory import _BaseAutoBackboneClass, _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
class _AutoModelWithLMHead(_BaseAutoModelClass):
_model_mapping = MODEL_WITH_LM_HEAD_MAPPING
|
class _AutoModelWithLMHead(_BaseAutoModelClass):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 4
| 2
| 0
| 2
| 2
| 1
| 0
| 2
| 2
| 1
| 0
| 1
| 0
| 0
|
665
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/processing_auto.py
|
transformers.models.auto.processing_auto.AutoProcessor
|
import warnings
import inspect
from .configuration_auto import CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings
from ...video_processing_utils import BaseVideoProcessor
from ...image_processing_utils import ImageProcessingMixin
from ...configuration_utils import PretrainedConfig
import json
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...tokenization_utils import TOKENIZER_CONFIG_FILE
from ...feature_extraction_utils import FeatureExtractionMixin
from ...processing_utils import ProcessorMixin
from .image_processing_auto import AutoImageProcessor
from ...utils import FEATURE_EXTRACTOR_NAME, PROCESSOR_NAME, VIDEO_PROCESSOR_NAME, cached_file, logging
from .tokenization_auto import AutoTokenizer
from .feature_extraction_auto import AutoFeatureExtractor
class AutoProcessor:
"""
This is a generic processor class that will be instantiated as one of the processor classes of the library when
created with the [`AutoProcessor.from_pretrained`] class method.
This class cannot be instantiated directly using `__init__()` (throws an error).
"""
def __init__(self):
raise OSError('AutoProcessor is designed to be instantiated using the `AutoProcessor.from_pretrained(pretrained_model_name_or_path)` method.')
@classmethod
@replace_list_option_in_docstrings(PROCESSOR_MAPPING_NAMES)
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
"""
Instantiate one of the processor classes of the library from a pretrained model vocabulary.
The processor class to instantiate is selected based on the `model_type` property of the config object (either
passed as an argument or loaded from `pretrained_model_name_or_path` if possible):
List options
Params:
pretrained_model_name_or_path (`str` or `os.PathLike`):
This can be either:
- a string, the *model id* of a pretrained feature_extractor hosted inside a model repo on
huggingface.co.
- a path to a *directory* containing a processor files saved using the `save_pretrained()` method,
e.g., `./my_model_directory/`.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded pretrained model feature extractor should be cached if the
standard cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force to (re-)download the feature extractor files and override the cached versions
if they exist.
resume_download:
Deprecated and ignored. All downloads are now resumed by default when possible.
Will be removed in v5 of Transformers.
proxies (`dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
when running `hf auth login` (stored in `~/.huggingface`).
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
return_unused_kwargs (`bool`, *optional*, defaults to `False`):
If `False`, then this function returns just the final feature extractor object. If `True`, then this
functions returns a `Tuple(feature_extractor, unused_kwargs)` where *unused_kwargs* is a dictionary
consisting of the key/value pairs whose keys are not feature extractor attributes: i.e., the part of
`kwargs` which has not been used to update `feature_extractor` and is otherwise ignored.
trust_remote_code (`bool`, *optional*, defaults to `False`):
Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
should only be set to `True` for repositories you trust and in which you have read the code, as it will
execute code present on the Hub on your local machine.
kwargs (`dict[str, Any]`, *optional*):
The values in kwargs of any keys which are feature extractor attributes will be used to override the
loaded values. Behavior concerning key/value pairs whose keys are *not* feature extractor attributes is
controlled by the `return_unused_kwargs` keyword parameter.
<Tip>
Passing `token=True` is required when you want to use a private model.
</Tip>
Examples:
```python
>>> from transformers import AutoProcessor
>>> # Download processor from huggingface.co and cache.
>>> processor = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h")
>>> # If processor files are in a directory (e.g. processor was saved using *save_pretrained('./test/saved_model/')*)
>>> # processor = AutoProcessor.from_pretrained("./test/saved_model/")
```"""
use_auth_token = kwargs.pop('use_auth_token', None)
if use_auth_token is not None:
warnings.warn('The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.', FutureWarning)
if kwargs.get('token') is not None:
raise ValueError('`token` and `use_auth_token` are both specified. Please set only the argument `token`.')
kwargs['token'] = use_auth_token
config = kwargs.pop('config', None)
trust_remote_code = kwargs.pop('trust_remote_code', None)
kwargs['_from_auto'] = True
processor_class = None
processor_auto_map = None
cached_file_kwargs = {key: kwargs[key] for key in inspect.signature(cached_file).parameters if key in kwargs}
cached_file_kwargs.update({'_raise_exceptions_for_gated_repo': False, '_raise_exceptions_for_missing_entries': False, '_raise_exceptions_for_connection_errors': False})
processor_config_file = cached_file(pretrained_model_name_or_path, PROCESSOR_NAME, **cached_file_kwargs)
if processor_config_file is not None:
config_dict, _ = ProcessorMixin.get_processor_dict(pretrained_model_name_or_path, **kwargs)
processor_class = config_dict.get('processor_class', None)
if 'AutoProcessor' in config_dict.get('auto_map', {}):
processor_auto_map = config_dict['auto_map']['AutoProcessor']
if processor_class is None:
preprocessor_config_file = cached_file(pretrained_model_name_or_path, FEATURE_EXTRACTOR_NAME, **cached_file_kwargs)
if preprocessor_config_file is not None:
config_dict, _ = ImageProcessingMixin.get_image_processor_dict(pretrained_model_name_or_path, **kwargs)
processor_class = config_dict.get('processor_class', None)
if 'AutoProcessor' in config_dict.get('auto_map', {}):
processor_auto_map = config_dict['auto_map']['AutoProcessor']
if preprocessor_config_file is None:
preprocessor_config_file = cached_file(pretrained_model_name_or_path, VIDEO_PROCESSOR_NAME, **cached_file_kwargs)
if preprocessor_config_file is not None:
config_dict, _ = BaseVideoProcessor.get_video_processor_dict(pretrained_model_name_or_path, **kwargs)
processor_class = config_dict.get('processor_class', None)
if 'AutoProcessor' in config_dict.get('auto_map', {}):
processor_auto_map = config_dict['auto_map']['AutoProcessor']
if preprocessor_config_file is None:
preprocessor_config_file = cached_file(pretrained_model_name_or_path, FEATURE_EXTRACTOR_NAME, **cached_file_kwargs)
if preprocessor_config_file is not None and processor_class is None:
config_dict, _ = FeatureExtractionMixin.get_feature_extractor_dict(pretrained_model_name_or_path, **kwargs)
processor_class = config_dict.get('processor_class', None)
if 'AutoProcessor' in config_dict.get('auto_map', {}):
processor_auto_map = config_dict['auto_map']['AutoProcessor']
if processor_class is None:
tokenizer_config_file = cached_file(pretrained_model_name_or_path, TOKENIZER_CONFIG_FILE, **cached_file_kwargs)
if tokenizer_config_file is not None:
with open(tokenizer_config_file, encoding='utf-8') as reader:
config_dict = json.load(reader)
processor_class = config_dict.get('processor_class', None)
if 'AutoProcessor' in config_dict.get('auto_map', {}):
processor_auto_map = config_dict['auto_map']['AutoProcessor']
if processor_class is None:
if not isinstance(config, PretrainedConfig):
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs)
processor_class = getattr(config, 'processor_class', None)
if hasattr(config, 'auto_map') and 'AutoProcessor' in config.auto_map:
processor_auto_map = config.auto_map['AutoProcessor']
if processor_class is not None:
processor_class = processor_class_from_name(processor_class)
has_remote_code = processor_auto_map is not None
has_local_code = processor_class is not None or type(config) in PROCESSOR_MAPPING
if has_remote_code:
if '--' in processor_auto_map:
upstream_repo = processor_auto_map.split('--')[0]
else:
upstream_repo = None
trust_remote_code = resolve_trust_remote_code(trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code, upstream_repo)
if has_remote_code and trust_remote_code:
processor_class = get_class_from_dynamic_module(processor_auto_map, pretrained_model_name_or_path, **kwargs)
_ = kwargs.pop('code_revision', None)
processor_class.register_for_auto_class()
return processor_class.from_pretrained(pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs)
elif processor_class is not None:
return processor_class.from_pretrained(pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs)
elif type(config) in PROCESSOR_MAPPING:
return PROCESSOR_MAPPING[type(config)].from_pretrained(pretrained_model_name_or_path, **kwargs)
try:
return AutoTokenizer.from_pretrained(pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs)
except Exception:
try:
return AutoImageProcessor.from_pretrained(pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs)
except Exception:
pass
try:
return AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs)
except Exception:
pass
raise ValueError(f"Unrecognized processing class in {pretrained_model_name_or_path}. Can't instantiate a processor, a tokenizer, an image processor or a feature extractor for this model. Make sure the repository contains the files of at least one of those processing classes.")
@staticmethod
def register(config_class, processor_class, exist_ok=False):
"""
Register a new processor for this class.
Args:
config_class ([`PretrainedConfig`]):
The configuration corresponding to the model to register.
processor_class ([`ProcessorMixin`]): The processor to register.
"""
PROCESSOR_MAPPING.register(config_class, processor_class, exist_ok=exist_ok)
|
class AutoProcessor:
'''
This is a generic processor class that will be instantiated as one of the processor classes of the library when
created with the [`AutoProcessor.from_pretrained`] class method.
This class cannot be instantiated directly using `__init__()` (throws an error).
'''
def __init__(self):
pass
@classmethod
@replace_list_option_in_docstrings(PROCESSOR_MAPPING_NAMES)
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
'''
Instantiate one of the processor classes of the library from a pretrained model vocabulary.
The processor class to instantiate is selected based on the `model_type` property of the config object (either
passed as an argument or loaded from `pretrained_model_name_or_path` if possible):
List options
Params:
pretrained_model_name_or_path (`str` or `os.PathLike`):
This can be either:
- a string, the *model id* of a pretrained feature_extractor hosted inside a model repo on
huggingface.co.
- a path to a *directory* containing a processor files saved using the `save_pretrained()` method,
e.g., `./my_model_directory/`.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded pretrained model feature extractor should be cached if the
standard cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force to (re-)download the feature extractor files and override the cached versions
if they exist.
resume_download:
Deprecated and ignored. All downloads are now resumed by default when possible.
Will be removed in v5 of Transformers.
proxies (`dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
when running `hf auth login` (stored in `~/.huggingface`).
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
return_unused_kwargs (`bool`, *optional*, defaults to `False`):
If `False`, then this function returns just the final feature extractor object. If `True`, then this
functions returns a `Tuple(feature_extractor, unused_kwargs)` where *unused_kwargs* is a dictionary
consisting of the key/value pairs whose keys are not feature extractor attributes: i.e., the part of
`kwargs` which has not been used to update `feature_extractor` and is otherwise ignored.
trust_remote_code (`bool`, *optional*, defaults to `False`):
Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
should only be set to `True` for repositories you trust and in which you have read the code, as it will
execute code present on the Hub on your local machine.
kwargs (`dict[str, Any]`, *optional*):
The values in kwargs of any keys which are feature extractor attributes will be used to override the
loaded values. Behavior concerning key/value pairs whose keys are *not* feature extractor attributes is
controlled by the `return_unused_kwargs` keyword parameter.
<Tip>
Passing `token=True` is required when you want to use a private model.
</Tip>
Examples:
```python
>>> from transformers import AutoProcessor
>>> # Download processor from huggingface.co and cache.
>>> processor = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h")
>>> # If processor files are in a directory (e.g. processor was saved using *save_pretrained('./test/saved_model/')*)
>>> # processor = AutoProcessor.from_pretrained("./test/saved_model/")
```'''
pass
@staticmethod
def register(config_class, processor_class, exist_ok=False):
'''
Register a new processor for this class.
Args:
config_class ([`PretrainedConfig`]):
The configuration corresponding to the model to register.
processor_class ([`ProcessorMixin`]): The processor to register.
'''
pass
| 7
| 3
| 72
| 9
| 38
| 24
| 9
| 0.66
| 0
| 11
| 7
| 0
| 1
| 0
| 3
| 3
| 228
| 32
| 118
| 19
| 111
| 78
| 75
| 16
| 71
| 24
| 0
| 3
| 26
|
666
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/auto/tokenization_auto.py
|
transformers.models.auto.tokenization_auto.AutoTokenizer
|
from ...tokenization_utils import PreTrainedTokenizer
from ..encoder_decoder import EncoderDecoderConfig
import warnings
from ...configuration_utils import PretrainedConfig
from .configuration_auto import CONFIG_MAPPING_NAMES, AutoConfig, config_class_to_model_type, model_type_to_module_name, replace_list_option_in_docstrings
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...utils import cached_file, extract_commit_hash, is_g2p_en_available, is_sentencepiece_available, is_tokenizers_available, logging
from ...modeling_gguf_pytorch_utils import load_gguf_checkpoint
class AutoTokenizer:
"""
This is a generic tokenizer class that will be instantiated as one of the tokenizer classes of the library when
created with the [`AutoTokenizer.from_pretrained`] class method.
This class cannot be instantiated directly using `__init__()` (throws an error).
"""
def __init__(self):
raise OSError('AutoTokenizer is designed to be instantiated using the `AutoTokenizer.from_pretrained(pretrained_model_name_or_path)` method.')
@classmethod
@replace_list_option_in_docstrings(TOKENIZER_MAPPING_NAMES)
def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
"""
Instantiate one of the tokenizer classes of the library from a pretrained model vocabulary.
The tokenizer class to instantiate is selected based on the `model_type` property of the config object (either
passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's missing, by
falling back to using pattern matching on `pretrained_model_name_or_path`:
List options
Params:
pretrained_model_name_or_path (`str` or `os.PathLike`):
Can be either:
- A string, the *model id* of a predefined tokenizer hosted inside a model repo on huggingface.co.
- A path to a *directory* containing vocabulary files required by the tokenizer, for instance saved
using the [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`.
- A path or url to a single saved vocabulary file if and only if the tokenizer only requires a
single vocabulary file (like Bert or XLNet), e.g.: `./my_model_directory/vocab.txt`. (Not
applicable to all derived classes)
inputs (additional positional arguments, *optional*):
Will be passed along to the Tokenizer `__init__()` method.
config ([`PretrainedConfig`], *optional*)
The configuration object used to determine the tokenizer class to instantiate.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force the (re-)download the model weights and configuration files and override the
cached versions if they exist.
resume_download:
Deprecated and ignored. All downloads are now resumed by default when possible.
Will be removed in v5 of Transformers.
proxies (`dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
subfolder (`str`, *optional*):
In case the relevant files are located inside a subfolder of the model repo on huggingface.co (e.g. for
facebook/rag-token-base), specify it here.
use_fast (`bool`, *optional*, defaults to `True`):
Use a [fast Rust-based tokenizer](https://huggingface.co/docs/tokenizers/index) if it is supported for
a given model. If a fast tokenizer is not available for a given model, a normal Python-based tokenizer
is returned instead.
tokenizer_type (`str`, *optional*):
Tokenizer type to be loaded.
trust_remote_code (`bool`, *optional*, defaults to `False`):
Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
should only be set to `True` for repositories you trust and in which you have read the code, as it will
execute code present on the Hub on your local machine.
kwargs (additional keyword arguments, *optional*):
Will be passed to the Tokenizer `__init__()` method. Can be used to set special tokens like
`bos_token`, `eos_token`, `unk_token`, `sep_token`, `pad_token`, `cls_token`, `mask_token`,
`additional_special_tokens`. See parameters in the `__init__()` for more details.
Examples:
```python
>>> from transformers import AutoTokenizer
>>> # Download vocabulary from huggingface.co and cache.
>>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
>>> # Download vocabulary from huggingface.co (user-uploaded) and cache.
>>> tokenizer = AutoTokenizer.from_pretrained("dbmdz/bert-base-german-cased")
>>> # If vocabulary files are in a directory (e.g. tokenizer was saved using *save_pretrained('./test/saved_model/')*)
>>> # tokenizer = AutoTokenizer.from_pretrained("./test/bert_saved_model/")
>>> # Download vocabulary from huggingface.co and define model-specific arguments
>>> tokenizer = AutoTokenizer.from_pretrained("FacebookAI/roberta-base", add_prefix_space=True)
```"""
use_auth_token = kwargs.pop('use_auth_token', None)
if use_auth_token is not None:
warnings.warn('The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.', FutureWarning)
if kwargs.get('token') is not None:
raise ValueError('`token` and `use_auth_token` are both specified. Please set only the argument `token`.')
kwargs['token'] = use_auth_token
config = kwargs.pop('config', None)
kwargs['_from_auto'] = True
use_fast = kwargs.pop('use_fast', True)
tokenizer_type = kwargs.pop('tokenizer_type', None)
trust_remote_code = kwargs.pop('trust_remote_code', None)
gguf_file = kwargs.get('gguf_file')
if tokenizer_type is not None:
tokenizer_class = None
tokenizer_class_tuple = TOKENIZER_MAPPING_NAMES.get(tokenizer_type, None)
if tokenizer_class_tuple is None:
raise ValueError(f"Passed `tokenizer_type` {tokenizer_type} does not exist. `tokenizer_type` should be one of {', '.join((c for c in TOKENIZER_MAPPING_NAMES))}.")
tokenizer_class_name, tokenizer_fast_class_name = tokenizer_class_tuple
if use_fast:
if tokenizer_fast_class_name is not None:
tokenizer_class = tokenizer_class_from_name(tokenizer_fast_class_name)
else:
logger.warning('`use_fast` is set to `True` but the tokenizer class does not have a fast version. Falling back to the slow version.')
if tokenizer_class is None:
tokenizer_class = tokenizer_class_from_name(tokenizer_class_name)
if tokenizer_class is None:
raise ValueError(f'Tokenizer class {tokenizer_class_name} is not currently imported.')
return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
tokenizer_config = get_tokenizer_config(pretrained_model_name_or_path, **kwargs)
if '_commit_hash' in tokenizer_config:
kwargs['_commit_hash'] = tokenizer_config['_commit_hash']
config_tokenizer_class = tokenizer_config.get('tokenizer_class')
tokenizer_auto_map = None
if 'auto_map' in tokenizer_config:
if isinstance(tokenizer_config['auto_map'], (tuple, list)):
tokenizer_auto_map = tokenizer_config['auto_map']
else:
tokenizer_auto_map = tokenizer_config['auto_map'].get('AutoTokenizer', None)
if config_tokenizer_class is None:
if not isinstance(config, PretrainedConfig):
if gguf_file:
gguf_path = cached_file(pretrained_model_name_or_path, gguf_file, **kwargs)
config_dict = load_gguf_checkpoint(gguf_path, return_tensors=False)['config']
config = AutoConfig.for_model(**config_dict)
else:
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs)
config_tokenizer_class = config.tokenizer_class
if hasattr(config, 'auto_map') and 'AutoTokenizer' in config.auto_map:
tokenizer_auto_map = config.auto_map['AutoTokenizer']
has_remote_code = tokenizer_auto_map is not None
has_local_code = type(config) in TOKENIZER_MAPPING or (config_tokenizer_class is not None and (tokenizer_class_from_name(config_tokenizer_class) is not None or tokenizer_class_from_name(config_tokenizer_class + 'Fast') is not None))
if has_remote_code:
if use_fast and tokenizer_auto_map[1] is not None:
class_ref = tokenizer_auto_map[1]
else:
class_ref = tokenizer_auto_map[0]
if '--' in class_ref:
upstream_repo = class_ref.split('--')[0]
else:
upstream_repo = None
trust_remote_code = resolve_trust_remote_code(trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code, upstream_repo)
if has_remote_code and trust_remote_code:
tokenizer_class = get_class_from_dynamic_module(class_ref, pretrained_model_name_or_path, **kwargs)
_ = kwargs.pop('code_revision', None)
tokenizer_class.register_for_auto_class()
return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, trust_remote_code=trust_remote_code, **kwargs)
elif config_tokenizer_class is not None:
tokenizer_class = None
if use_fast and (not config_tokenizer_class.endswith('Fast')):
tokenizer_class_candidate = f'{config_tokenizer_class}Fast'
tokenizer_class = tokenizer_class_from_name(tokenizer_class_candidate)
if tokenizer_class is None:
tokenizer_class_candidate = config_tokenizer_class
tokenizer_class = tokenizer_class_from_name(tokenizer_class_candidate)
if tokenizer_class is None:
raise ValueError(f'Tokenizer class {tokenizer_class_candidate} does not exist or is not currently imported.')
return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
if isinstance(config, EncoderDecoderConfig):
if type(config.decoder) is not type(config.encoder):
logger.warning(f'The encoder model config class: {config.encoder.__class__} is different from the decoder model config class: {config.decoder.__class__}. It is not recommended to use the `AutoTokenizer.from_pretrained()` method in this case. Please use the encoder and decoder specific tokenizer classes.')
config = config.encoder
model_type = config_class_to_model_type(type(config).__name__)
if model_type is not None:
tokenizer_class_py, tokenizer_class_fast = TOKENIZER_MAPPING[type(config)]
if tokenizer_class_fast and (use_fast or tokenizer_class_py is None):
return tokenizer_class_fast.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
elif tokenizer_class_py is not None:
return tokenizer_class_py.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
else:
raise ValueError('This tokenizer cannot be instantiated. Please make sure you have `sentencepiece` installed in order to use this tokenizer.')
raise ValueError(f"Unrecognized configuration class {config.__class__} to build an AutoTokenizer.\nModel type should be one of {', '.join((c.__name__ for c in TOKENIZER_MAPPING))}.")
@staticmethod
def register(config_class, slow_tokenizer_class=None, fast_tokenizer_class=None, exist_ok=False):
"""
Register a new tokenizer in this mapping.
Args:
config_class ([`PretrainedConfig`]):
The configuration corresponding to the model to register.
slow_tokenizer_class ([`PretrainedTokenizer`], *optional*):
The slow tokenizer to register.
fast_tokenizer_class ([`PretrainedTokenizerFast`], *optional*):
The fast tokenizer to register.
"""
if slow_tokenizer_class is None and fast_tokenizer_class is None:
raise ValueError('You need to pass either a `slow_tokenizer_class` or a `fast_tokenizer_class')
if slow_tokenizer_class is not None and issubclass(slow_tokenizer_class, PreTrainedTokenizerFast):
raise ValueError('You passed a fast tokenizer in the `slow_tokenizer_class`.')
if fast_tokenizer_class is not None and issubclass(fast_tokenizer_class, PreTrainedTokenizer):
raise ValueError('You passed a slow tokenizer in the `fast_tokenizer_class`.')
if slow_tokenizer_class is not None and fast_tokenizer_class is not None and issubclass(fast_tokenizer_class, PreTrainedTokenizerFast) and (fast_tokenizer_class.slow_tokenizer_class != slow_tokenizer_class):
raise ValueError(f'The fast tokenizer class you are passing has a `slow_tokenizer_class` attribute that is not consistent with the slow tokenizer class you passed (fast tokenizer has {fast_tokenizer_class.slow_tokenizer_class} and you passed {slow_tokenizer_class}. Fix one of those so they match!')
if config_class in TOKENIZER_MAPPING._extra_content:
existing_slow, existing_fast = TOKENIZER_MAPPING[config_class]
if slow_tokenizer_class is None:
slow_tokenizer_class = existing_slow
if fast_tokenizer_class is None:
fast_tokenizer_class = existing_fast
TOKENIZER_MAPPING.register(config_class, (slow_tokenizer_class, fast_tokenizer_class), exist_ok=exist_ok)
|
class AutoTokenizer:
'''
This is a generic tokenizer class that will be instantiated as one of the tokenizer classes of the library when
created with the [`AutoTokenizer.from_pretrained`] class method.
This class cannot be instantiated directly using `__init__()` (throws an error).
'''
def __init__(self):
pass
@classmethod
@replace_list_option_in_docstrings(TOKENIZER_MAPPING_NAMES)
def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
'''
Instantiate one of the tokenizer classes of the library from a pretrained model vocabulary.
The tokenizer class to instantiate is selected based on the `model_type` property of the config object (either
passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's missing, by
falling back to using pattern matching on `pretrained_model_name_or_path`:
List options
Params:
pretrained_model_name_or_path (`str` or `os.PathLike`):
Can be either:
- A string, the *model id* of a predefined tokenizer hosted inside a model repo on huggingface.co.
- A path to a *directory* containing vocabulary files required by the tokenizer, for instance saved
using the [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`.
- A path or url to a single saved vocabulary file if and only if the tokenizer only requires a
single vocabulary file (like Bert or XLNet), e.g.: `./my_model_directory/vocab.txt`. (Not
applicable to all derived classes)
inputs (additional positional arguments, *optional*):
Will be passed along to the Tokenizer `__init__()` method.
config ([`PretrainedConfig`], *optional*)
The configuration object used to determine the tokenizer class to instantiate.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force the (re-)download the model weights and configuration files and override the
cached versions if they exist.
resume_download:
Deprecated and ignored. All downloads are now resumed by default when possible.
Will be removed in v5 of Transformers.
proxies (`dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
subfolder (`str`, *optional*):
In case the relevant files are located inside a subfolder of the model repo on huggingface.co (e.g. for
facebook/rag-token-base), specify it here.
use_fast (`bool`, *optional*, defaults to `True`):
Use a [fast Rust-based tokenizer](https://huggingface.co/docs/tokenizers/index) if it is supported for
a given model. If a fast tokenizer is not available for a given model, a normal Python-based tokenizer
is returned instead.
tokenizer_type (`str`, *optional*):
Tokenizer type to be loaded.
trust_remote_code (`bool`, *optional*, defaults to `False`):
Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
should only be set to `True` for repositories you trust and in which you have read the code, as it will
execute code present on the Hub on your local machine.
kwargs (additional keyword arguments, *optional*):
Will be passed to the Tokenizer `__init__()` method. Can be used to set special tokens like
`bos_token`, `eos_token`, `unk_token`, `sep_token`, `pad_token`, `cls_token`, `mask_token`,
`additional_special_tokens`. See parameters in the `__init__()` for more details.
Examples:
```python
>>> from transformers import AutoTokenizer
>>> # Download vocabulary from huggingface.co and cache.
>>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
>>> # Download vocabulary from huggingface.co (user-uploaded) and cache.
>>> tokenizer = AutoTokenizer.from_pretrained("dbmdz/bert-base-german-cased")
>>> # If vocabulary files are in a directory (e.g. tokenizer was saved using *save_pretrained('./test/saved_model/')*)
>>> # tokenizer = AutoTokenizer.from_pretrained("./test/bert_saved_model/")
>>> # Download vocabulary from huggingface.co and define model-specific arguments
>>> tokenizer = AutoTokenizer.from_pretrained("FacebookAI/roberta-base", add_prefix_space=True)
```'''
pass
@staticmethod
def register(config_class, slow_tokenizer_class=None, fast_tokenizer_class=None, exist_ok=False):
'''
Register a new tokenizer in this mapping.
Args:
config_class ([`PretrainedConfig`]):
The configuration corresponding to the model to register.
slow_tokenizer_class ([`PretrainedTokenizer`], *optional*):
The slow tokenizer to register.
fast_tokenizer_class ([`PretrainedTokenizerFast`], *optional*):
The fast tokenizer to register.
'''
pass
| 7
| 3
| 89
| 10
| 52
| 27
| 12
| 0.55
| 0
| 9
| 4
| 0
| 2
| 0
| 3
| 3
| 280
| 35
| 159
| 27
| 153
| 87
| 101
| 26
| 97
| 28
| 0
| 3
| 37
|
667
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/autoformer/configuration_autoformer.py
|
transformers.models.autoformer.configuration_autoformer.AutoformerConfig
|
from ...configuration_utils import PretrainedConfig
from typing import Optional
class AutoformerConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of an [`AutoformerModel`]. It is used to instantiate an
Autoformer model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the Autoformer
[huggingface/autoformer-tourism-monthly](https://huggingface.co/huggingface/autoformer-tourism-monthly)
architecture.
Configuration objects inherit from [`PretrainedConfig`] can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
prediction_length (`int`):
The prediction length for the decoder. In other words, the prediction horizon of the model.
context_length (`int`, *optional*, defaults to `prediction_length`):
The context length for the encoder. If unset, the context length will be the same as the
`prediction_length`.
distribution_output (`string`, *optional*, defaults to `"student_t"`):
The distribution emission head for the model. Could be either "student_t", "normal" or "negative_binomial".
loss (`string`, *optional*, defaults to `"nll"`):
The loss function for the model corresponding to the `distribution_output` head. For parametric
distributions it is the negative log likelihood (nll) - which currently is the only supported one.
input_size (`int`, *optional*, defaults to 1):
The size of the target variable which by default is 1 for univariate targets. Would be > 1 in case of
multivariate targets.
lags_sequence (`list[int]`, *optional*, defaults to `[1, 2, 3, 4, 5, 6, 7]`):
The lags of the input time series as covariates often dictated by the frequency. Default is `[1, 2, 3, 4,
5, 6, 7]`.
scaling (`bool`, *optional* defaults to `True`):
Whether to scale the input targets.
num_time_features (`int`, *optional*, defaults to 0):
The number of time features in the input time series.
num_dynamic_real_features (`int`, *optional*, defaults to 0):
The number of dynamic real valued features.
num_static_categorical_features (`int`, *optional*, defaults to 0):
The number of static categorical features.
num_static_real_features (`int`, *optional*, defaults to 0):
The number of static real valued features.
cardinality (`list[int]`, *optional*):
The cardinality (number of different values) for each of the static categorical features. Should be a list
of integers, having the same length as `num_static_categorical_features`. Cannot be `None` if
`num_static_categorical_features` is > 0.
embedding_dimension (`list[int]`, *optional*):
The dimension of the embedding for each of the static categorical features. Should be a list of integers,
having the same length as `num_static_categorical_features`. Cannot be `None` if
`num_static_categorical_features` is > 0.
d_model (`int`, *optional*, defaults to 64):
Dimensionality of the transformer layers.
encoder_layers (`int`, *optional*, defaults to 2):
Number of encoder layers.
decoder_layers (`int`, *optional*, defaults to 2):
Number of decoder layers.
encoder_attention_heads (`int`, *optional*, defaults to 2):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_attention_heads (`int`, *optional*, defaults to 2):
Number of attention heads for each attention layer in the Transformer decoder.
encoder_ffn_dim (`int`, *optional*, defaults to 32):
Dimension of the "intermediate" (often named feed-forward) layer in encoder.
decoder_ffn_dim (`int`, *optional*, defaults to 32):
Dimension of the "intermediate" (often named feed-forward) layer in decoder.
activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and decoder. If string, `"gelu"` and
`"relu"` are supported.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the encoder, and decoder.
encoder_layerdrop (`float`, *optional*, defaults to 0.1):
The dropout probability for the attention and fully connected layers for each encoder layer.
decoder_layerdrop (`float`, *optional*, defaults to 0.1):
The dropout probability for the attention and fully connected layers for each decoder layer.
attention_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability used between the two layers of the feed-forward networks.
num_parallel_samples (`int`, *optional*, defaults to 100):
The number of samples to generate in parallel for each time step of inference.
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated normal weight initialization distribution.
use_cache (`bool`, *optional*, defaults to `True`):
Whether to use the past key/values attentions (if applicable to the model) to speed up decoding.
label_length (`int`, *optional*, defaults to 10):
Start token length of the Autoformer decoder, which is used for direct multi-step prediction (i.e.
non-autoregressive generation).
moving_average (`int`, *optional*, defaults to 25):
The window size of the moving average. In practice, it's the kernel size in AvgPool1d of the Decomposition
Layer.
autocorrelation_factor (`int`, *optional*, defaults to 3):
"Attention" (i.e. AutoCorrelation mechanism) factor which is used to find top k autocorrelations delays.
It's recommended in the paper to set it to a number between 1 and 5.
Example:
```python
>>> from transformers import AutoformerConfig, AutoformerModel
>>> # Initializing a default Autoformer configuration
>>> configuration = AutoformerConfig()
>>> # Randomly initializing a model (with random weights) from the configuration
>>> model = AutoformerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'autoformer'
attribute_map = {'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', 'num_hidden_layers': 'encoder_layers'}
def __init__(self, prediction_length: Optional[int]=None, context_length: Optional[int]=None, distribution_output: str='student_t', loss: str='nll', input_size: int=1, lags_sequence: list[int]=[1, 2, 3, 4, 5, 6, 7], scaling: bool=True, num_time_features: int=0, num_dynamic_real_features: int=0, num_static_categorical_features: int=0, num_static_real_features: int=0, cardinality: Optional[list[int]]=None, embedding_dimension: Optional[list[int]]=None, d_model: int=64, encoder_attention_heads: int=2, decoder_attention_heads: int=2, encoder_layers: int=2, decoder_layers: int=2, encoder_ffn_dim: int=32, decoder_ffn_dim: int=32, activation_function: str='gelu', dropout: float=0.1, encoder_layerdrop: float=0.1, decoder_layerdrop: float=0.1, attention_dropout: float=0.1, activation_dropout: float=0.1, num_parallel_samples: int=100, init_std: float=0.02, use_cache: bool=True, is_encoder_decoder=True, label_length: int=10, moving_average: int=25, autocorrelation_factor: int=3, **kwargs):
self.prediction_length = prediction_length
self.context_length = context_length if context_length is not None else prediction_length
self.distribution_output = distribution_output
self.loss = loss
self.input_size = input_size
self.num_time_features = num_time_features
self.lags_sequence = lags_sequence
self.scaling = scaling
self.num_dynamic_real_features = num_dynamic_real_features
self.num_static_real_features = num_static_real_features
self.num_static_categorical_features = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(cardinality) != num_static_categorical_features:
raise ValueError('The cardinality should be a list of the same length as `num_static_categorical_features`')
self.cardinality = cardinality
else:
self.cardinality = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(embedding_dimension) != num_static_categorical_features:
raise ValueError('The embedding dimension should be a list of the same length as `num_static_categorical_features`')
self.embedding_dimension = embedding_dimension
else:
self.embedding_dimension = [min(50, (cat + 1) // 2) for cat in self.cardinality]
self.num_parallel_samples = num_parallel_samples
self.feature_size = input_size * len(self.lags_sequence) + self._number_of_features
self.d_model = d_model
self.encoder_attention_heads = encoder_attention_heads
self.decoder_attention_heads = decoder_attention_heads
self.encoder_ffn_dim = encoder_ffn_dim
self.decoder_ffn_dim = decoder_ffn_dim
self.encoder_layers = encoder_layers
self.decoder_layers = decoder_layers
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.activation_function = activation_function
self.init_std = init_std
self.use_cache = use_cache
self.label_length = label_length
self.moving_average = moving_average
self.autocorrelation_factor = autocorrelation_factor
super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
@property
def _number_of_features(self) -> int:
return sum(self.embedding_dimension) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2
|
class AutoformerConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of an [`AutoformerModel`]. It is used to instantiate an
Autoformer model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the Autoformer
[huggingface/autoformer-tourism-monthly](https://huggingface.co/huggingface/autoformer-tourism-monthly)
architecture.
Configuration objects inherit from [`PretrainedConfig`] can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
prediction_length (`int`):
The prediction length for the decoder. In other words, the prediction horizon of the model.
context_length (`int`, *optional*, defaults to `prediction_length`):
The context length for the encoder. If unset, the context length will be the same as the
`prediction_length`.
distribution_output (`string`, *optional*, defaults to `"student_t"`):
The distribution emission head for the model. Could be either "student_t", "normal" or "negative_binomial".
loss (`string`, *optional*, defaults to `"nll"`):
The loss function for the model corresponding to the `distribution_output` head. For parametric
distributions it is the negative log likelihood (nll) - which currently is the only supported one.
input_size (`int`, *optional*, defaults to 1):
The size of the target variable which by default is 1 for univariate targets. Would be > 1 in case of
multivariate targets.
lags_sequence (`list[int]`, *optional*, defaults to `[1, 2, 3, 4, 5, 6, 7]`):
The lags of the input time series as covariates often dictated by the frequency. Default is `[1, 2, 3, 4,
5, 6, 7]`.
scaling (`bool`, *optional* defaults to `True`):
Whether to scale the input targets.
num_time_features (`int`, *optional*, defaults to 0):
The number of time features in the input time series.
num_dynamic_real_features (`int`, *optional*, defaults to 0):
The number of dynamic real valued features.
num_static_categorical_features (`int`, *optional*, defaults to 0):
The number of static categorical features.
num_static_real_features (`int`, *optional*, defaults to 0):
The number of static real valued features.
cardinality (`list[int]`, *optional*):
The cardinality (number of different values) for each of the static categorical features. Should be a list
of integers, having the same length as `num_static_categorical_features`. Cannot be `None` if
`num_static_categorical_features` is > 0.
embedding_dimension (`list[int]`, *optional*):
The dimension of the embedding for each of the static categorical features. Should be a list of integers,
having the same length as `num_static_categorical_features`. Cannot be `None` if
`num_static_categorical_features` is > 0.
d_model (`int`, *optional*, defaults to 64):
Dimensionality of the transformer layers.
encoder_layers (`int`, *optional*, defaults to 2):
Number of encoder layers.
decoder_layers (`int`, *optional*, defaults to 2):
Number of decoder layers.
encoder_attention_heads (`int`, *optional*, defaults to 2):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_attention_heads (`int`, *optional*, defaults to 2):
Number of attention heads for each attention layer in the Transformer decoder.
encoder_ffn_dim (`int`, *optional*, defaults to 32):
Dimension of the "intermediate" (often named feed-forward) layer in encoder.
decoder_ffn_dim (`int`, *optional*, defaults to 32):
Dimension of the "intermediate" (often named feed-forward) layer in decoder.
activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and decoder. If string, `"gelu"` and
`"relu"` are supported.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the encoder, and decoder.
encoder_layerdrop (`float`, *optional*, defaults to 0.1):
The dropout probability for the attention and fully connected layers for each encoder layer.
decoder_layerdrop (`float`, *optional*, defaults to 0.1):
The dropout probability for the attention and fully connected layers for each decoder layer.
attention_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability used between the two layers of the feed-forward networks.
num_parallel_samples (`int`, *optional*, defaults to 100):
The number of samples to generate in parallel for each time step of inference.
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated normal weight initialization distribution.
use_cache (`bool`, *optional*, defaults to `True`):
Whether to use the past key/values attentions (if applicable to the model) to speed up decoding.
label_length (`int`, *optional*, defaults to 10):
Start token length of the Autoformer decoder, which is used for direct multi-step prediction (i.e.
non-autoregressive generation).
moving_average (`int`, *optional*, defaults to 25):
The window size of the moving average. In practice, it's the kernel size in AvgPool1d of the Decomposition
Layer.
autocorrelation_factor (`int`, *optional*, defaults to 3):
"Attention" (i.e. AutoCorrelation mechanism) factor which is used to find top k autocorrelations delays.
It's recommended in the paper to set it to a number between 1 and 5.
Example:
```python
>>> from transformers import AutoformerConfig, AutoformerModel
>>> # Initializing a default Autoformer configuration
>>> configuration = AutoformerConfig()
>>> # Randomly initializing a model (with random weights) from the configuration
>>> model = AutoformerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, prediction_length: Optional[int]=None, context_length: Optional[int]=None, distribution_output: str='student_t', loss: str='nll', input_size: int=1, lags_sequence: list[int]=[1, 2, 3, 4, 5, 6, 7], scaling: bool=True, num_time_features: int=0, num_dynamic_real_features: int=0, num_static_categorical_features: int=0, num_static_real_features: int=0, cardinality: Optional[list[int]]=None, embedding_dimension: Optional[list[int]]=None, d_model: int=64, encoder_attention_heads: int=2, decoder_attention_heads: int=2, encoder_layers: int=2, decoder_layers: int=2, encoder_ffn_dim: int=32, decoder_ffn_dim: int=32, activation_function: str='gelu', dropout: float=0.1, encoder_layerdrop: float=0.1, decoder_layerdrop: float=0.1, attention_dropout: float=0.1, activation_dropout: float=0.1, num_parallel_samples: int=100, init_std: float=0.02, use_cache: bool=True, is_encoder_decoder=True, label_length: int=10, moving_average: int=25, autocorrelation_factor: int=3, **kwargs):
pass
@property
def _number_of_features(self) -> int:
pass
| 4
| 1
| 52
| 3
| 47
| 3
| 4
| 0.99
| 1
| 6
| 0
| 0
| 2
| 33
| 2
| 2
| 217
| 17
| 101
| 75
| 61
| 100
| 48
| 38
| 45
| 6
| 1
| 2
| 7
|
668
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/autoformer/modeling_autoformer.py
|
transformers.models.autoformer.modeling_autoformer.AutoFormerDecoderOutput
|
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Optional, Union
from ...utils import auto_docstring, is_torch_flex_attn_available, logging
from ...modeling_outputs import BaseModelOutput, ModelOutput, SampleTSPredictionOutput, Seq2SeqTSPredictionOutput
from dataclasses import dataclass
import torch
@dataclass
@auto_docstring(custom_intro="\n Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).\n ")
class AutoFormerDecoderOutput(ModelOutput):
"""
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
hidden_size)` is output.
trend (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Trend tensor for each time series.
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
`config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`
input) to speed up sequential decoding.
cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
trend: Optional[torch.FloatTensor] = None
past_key_values: Optional[Cache] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
cross_attentions: Optional[tuple[torch.FloatTensor]] = None
|
@dataclass
@auto_docstring(custom_intro="\n Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).\n ")
class AutoFormerDecoderOutput(ModelOutput):
'''
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
hidden_size)` is output.
trend (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Trend tensor for each time series.
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
`config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`
input) to speed up sequential decoding.
cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 4.57
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 46
| 7
| 7
| 7
| 6
| 32
| 7
| 7
| 6
| 0
| 1
| 0
| 0
|
669
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/autoformer/modeling_autoformer.py
|
transformers.models.autoformer.modeling_autoformer.AutoformerAttention
|
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Optional, Union
from torch import nn
import math
import torch
from ...utils.deprecation import deprecate_kwarg
class AutoformerAttention(nn.Module):
"""
AutoCorrelation Mechanism with the following two phases:
(1) period-based dependencies discovery (2) time delay aggregation
This block replace the canonical self-attention mechanism.
"""
def __init__(self, embed_dim: int, num_heads: int, dropout: Optional[float]=0.0, is_decoder: Optional[bool]=False, bias: Optional[bool]=True, autocorrelation_factor: Optional[int]=3, layer_idx: Optional[int]=None):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
if self.head_dim * num_heads != self.embed_dim:
raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).')
self.scaling = self.head_dim ** (-0.5)
self.is_decoder = is_decoder
self.layer_idx = layer_idx
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.autocorrelation_factor = autocorrelation_factor
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
is_cross_attention = key_value_states is not None
bsz, tgt_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states)
is_updated = False
if past_key_values is not None:
if isinstance(past_key_values, EncoderDecoderCache):
is_updated = past_key_values.is_updated.get(self.layer_idx)
if is_cross_attention:
curr_past_key_value = past_key_values.cross_attention_cache
else:
curr_past_key_value = past_key_values.self_attention_cache
else:
curr_past_key_value = past_key_values
current_states = key_value_states if is_cross_attention else hidden_states
if is_cross_attention and past_key_values is not None and is_updated:
key_states = curr_past_key_value.layers[self.layer_idx].keys
value_states = curr_past_key_value.layers[self.layer_idx].values
else:
key_states = self.k_proj(current_states)
value_states = self.v_proj(current_states)
key_states = key_states.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2)
if past_key_values is not None:
cache_position = cache_position if not is_cross_attention else None
key_states, value_states = curr_past_key_value.update(key_states, value_states, self.layer_idx, {'cache_position': cache_position})
if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache):
past_key_values.is_updated[self.layer_idx] = True
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = query_states.view(bsz, tgt_len, self.num_heads, self.head_dim).transpose(1, 2)
query_states = query_states.reshape(*proj_shape)
key_states = key_states.reshape(*proj_shape)
value_states = value_states.reshape(*proj_shape)
queries_time_length = query_states.size(1)
values_time_length = value_states.size(1)
if queries_time_length > values_time_length:
query_states = query_states[:, :queries_time_length - values_time_length, :]
zeros = torch.zeros_like(query_states).float()
value_states = torch.cat([value_states, zeros], dim=1)
key_states = torch.cat([key_states, zeros], dim=1)
else:
value_states = value_states[:, :queries_time_length, :]
key_states = key_states[:, :queries_time_length, :]
query_states_fft = torch.fft.rfft(query_states, n=tgt_len, dim=1)
key_states_fft = torch.fft.rfft(key_states, n=tgt_len, dim=1)
attn_weights = query_states_fft * torch.conj(key_states_fft)
attn_weights = torch.fft.irfft(attn_weights, n=tgt_len, dim=1)
src_len = key_states.size(1)
channel = key_states.size(2)
if attn_weights.size() != (bsz * self.num_heads, tgt_len, channel):
raise ValueError(f'Attention weights should be of size {(bsz * self.num_heads, tgt_len, channel)}, but is {attn_weights.size()}')
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}')
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if layer_head_mask is not None:
if layer_head_mask.size() != (self.num_heads,):
raise ValueError(f'Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}')
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, channel)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, channel)
if output_attentions:
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, channel)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, channel)
else:
attn_weights_reshaped = None
time_length = value_states.size(1)
autocorrelations = attn_weights.view(bsz, self.num_heads, tgt_len, channel)
top_k = int(self.autocorrelation_factor * math.log(time_length))
autocorrelations_mean_on_head_channel = torch.mean(autocorrelations, dim=(1, -1))
if self.training:
autocorrelations_mean_on_bsz = torch.mean(autocorrelations_mean_on_head_channel, dim=0)
_, top_k_delays_index = torch.topk(autocorrelations_mean_on_bsz, top_k)
top_k_autocorrelations = torch.stack([autocorrelations_mean_on_head_channel[:, top_k_delays_index[i]] for i in range(top_k)], dim=-1)
else:
top_k_autocorrelations, top_k_delays_index = torch.topk(autocorrelations_mean_on_head_channel, top_k, dim=1)
top_k_autocorrelations = torch.softmax(top_k_autocorrelations, dim=-1)
if not self.training:
tmp_values = value_states.repeat(1, 2, 1)
init_index = torch.arange(time_length).view(1, -1, 1).repeat(bsz * self.num_heads, 1, channel).to(value_states.device)
delays_agg = torch.zeros_like(value_states).float()
for i in range(top_k):
if not self.training:
tmp_delay = init_index + top_k_delays_index[:, i].view(-1, 1, 1).repeat(self.num_heads, tgt_len, channel)
value_states_roll_delay = torch.gather(tmp_values, dim=1, index=tmp_delay)
else:
value_states_roll_delay = value_states.roll(shifts=-int(top_k_delays_index[i]), dims=1)
top_k_autocorrelations_at_delay = top_k_autocorrelations[:, i].view(-1, 1, 1).repeat(self.num_heads, tgt_len, channel)
delays_agg += value_states_roll_delay * top_k_autocorrelations_at_delay
attn_output = delays_agg.contiguous()
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(f'`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}')
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
attn_output = self.out_proj(attn_output)
return (attn_output, attn_weights_reshaped)
|
class AutoformerAttention(nn.Module):
'''
AutoCorrelation Mechanism with the following two phases:
(1) period-based dependencies discovery (2) time delay aggregation
This block replace the canonical self-attention mechanism.
'''
def __init__(self, embed_dim: int, num_heads: int, dropout: Optional[float]=0.0, is_decoder: Optional[bool]=False, bias: Optional[bool]=True, autocorrelation_factor: Optional[int]=3, layer_idx: Optional[int]=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
'''Input shape: Batch x Time x Channel'''
pass
| 4
| 2
| 71
| 9
| 51
| 12
| 7
| 0.27
| 1
| 7
| 0
| 0
| 3
| 11
| 3
| 13
| 221
| 30
| 153
| 60
| 133
| 42
| 99
| 44
| 95
| 17
| 1
| 2
| 20
|
670
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/autoformer/modeling_autoformer.py
|
transformers.models.autoformer.modeling_autoformer.AutoformerDecoder
|
from .configuration_autoformer import AutoformerConfig
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Optional, Union
from torch import nn
import torch
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa
class AutoformerDecoder(AutoformerPreTrainedModel):
"""
Transformer decoder consisting of `config.decoder_layers` layers. Each layer is a [`AutoformerDecoderLayer`]
Args:
config: AutoformerConfig
"""
def __init__(self, config: AutoformerConfig):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
if config.prediction_length is None:
raise ValueError('The `prediction_length` config needs to be specified.')
self.value_embedding = AutoformerValueEmbedding(feature_size=config.feature_size, d_model=config.d_model)
self.embed_positions = AutoformerSinusoidalPositionalEmbedding(config.context_length + config.prediction_length, config.d_model)
self.layers = nn.ModuleList([AutoformerDecoderLayer(config, layer_idx=i) for i in range(config.decoder_layers)])
self.layernorm_embedding = nn.LayerNorm(config.d_model)
self.seasonality_projection = nn.Linear(config.d_model, config.feature_size)
self.gradient_checkpointing = False
self.post_init()
def forward(self, trend: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple, AutoFormerDecoderOutput]:
"""
Args:
trend (`torch.FloatTensor` of shape `(batch_size, prediction_length, feature_size)`, *optional*):
The trend sequence to be fed to the decoder.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing
cross-attention on hidden heads. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
use_cache (`bool`, *optional*):
If `use_cache` is True, `past_key_values` key value states are returned and can be used to speed up
decoding (see `past_key_values`).
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.gradient_checkpointing and self.training and use_cache:
logger.warning('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...')
use_cache = False
input_shape = inputs_embeds.size()[:-1]
if self.gradient_checkpointing and use_cache:
logger.warning('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...')
use_cache = False
if use_cache and past_key_values is None:
past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config))
if use_cache and isinstance(past_key_values, tuple):
logger.warning_once('Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. You should pass an instance of `EncoderDecoderCache` instead, e.g. `past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`.')
past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values)
if encoder_hidden_states is not None and encoder_attention_mask is not None:
encoder_attention_mask = _prepare_4d_attention_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
hidden_states = self.value_embedding(inputs_embeds)
embed_pos = self.embed_positions(inputs_embeds.size(), past_key_values_length=self.config.context_length - self.config.label_length)
hidden_states = self.layernorm_embedding(hidden_states + embed_pos)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if output_attentions and encoder_hidden_states is not None else None
for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ['head_mask', 'cross_attn_head_mask']):
if attn_mask is not None:
if attn_mask.size()[0] != len(self.layers):
raise ValueError(f'The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.')
for idx, decoder_layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states += (hidden_states,)
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop:
continue
layer_outputs = decoder_layer(hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position)
hidden_states, residual_trend = layer_outputs[0]
trend = trend + residual_trend
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
hidden_states = self.seasonality_projection(hidden_states)
if output_hidden_states:
all_hidden_states += (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, trend, past_key_values, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None))
return AutoFormerDecoderOutput(last_hidden_state=hidden_states, trend=trend, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions)
|
class AutoformerDecoder(AutoformerPreTrainedModel):
'''
Transformer decoder consisting of `config.decoder_layers` layers. Each layer is a [`AutoformerDecoderLayer`]
Args:
config: AutoformerConfig
'''
def __init__(self, config: AutoformerConfig):
pass
def forward(self, trend: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple, AutoFormerDecoderOutput]:
'''
Args:
trend (`torch.FloatTensor` of shape `(batch_size, prediction_length, feature_size)`, *optional*):
The trend sequence to be fed to the decoder.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing
cross-attention on hidden heads. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
use_cache (`bool`, *optional*):
If `use_cache` is True, `past_key_values` key value states are returned and can be used to speed up
decoding (see `past_key_values`).
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
pass
| 3
| 2
| 106
| 14
| 62
| 31
| 17
| 0.53
| 1
| 13
| 5
| 0
| 2
| 8
| 2
| 3
| 221
| 30
| 125
| 39
| 108
| 66
| 63
| 25
| 60
| 31
| 2
| 3
| 33
|
671
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/autoformer/modeling_autoformer.py
|
transformers.models.autoformer.modeling_autoformer.AutoformerDecoderLayer
|
from .configuration_autoformer import AutoformerConfig
from ...utils.deprecation import deprecate_kwarg
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Optional, Union
from ...activations import ACT2FN
from ...modeling_layers import GradientCheckpointingLayer
from torch import nn
import torch
class AutoformerDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: AutoformerConfig, layer_idx=None):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = AutoformerAttention(embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, autocorrelation_factor=config.autocorrelation_factor, layer_idx=layer_idx)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.encoder_attn = AutoformerAttention(self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, autocorrelation_factor=config.autocorrelation_factor, layer_idx=layer_idx)
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = AutoformerLayernorm(config)
self.decomp1 = AutoformerSeriesDecompositionLayer(config)
self.decomp2 = AutoformerSeriesDecompositionLayer(config)
self.decomp3 = AutoformerSeriesDecompositionLayer(config)
self.trend_projection = nn.Conv1d(in_channels=self.embed_dim, out_channels=config.feature_size, kernel_size=3, stride=1, padding=1, padding_mode='circular', bias=False)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, cross_attn_layer_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`.
cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
size `(decoder_attention_heads,)`.
past_key_values (`Cache`): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache: (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the `present_key_value` state to be used for subsequent
decoding.
"""
residual = hidden_states
hidden_states, self_attn_weights = self.self_attn(hidden_states=hidden_states, past_key_values=past_key_values, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, cache_position=cache_position)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states, trend1 = self.decomp1(hidden_states)
hidden_states = self.self_attn_layer_norm(hidden_states)
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states, cross_attn_weights = self.encoder_attn(hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_values=past_key_values, output_attentions=output_attentions, cache_position=cache_position)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states, trend2 = self.decomp2(hidden_states)
hidden_states = self.encoder_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states, trend3 = self.decomp3(hidden_states)
hidden_states = self.final_layer_norm(hidden_states)
if encoder_hidden_states is not None:
residual_trend = trend1 + trend2 + trend3
else:
residual_trend = trend1 + trend3
residual_trend = self.trend_projection(residual_trend.permute(0, 2, 1)).transpose(1, 2)
outputs = ((hidden_states, residual_trend),)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
return outputs
|
class AutoformerDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: AutoformerConfig, layer_idx=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, cross_attn_layer_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
'''
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`.
cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
size `(decoder_attention_heads,)`.
past_key_values (`Cache`): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache: (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the `present_key_value` state to be used for subsequent
decoding.
'''
pass
| 4
| 1
| 72
| 7
| 50
| 16
| 4
| 0.31
| 1
| 7
| 4
| 0
| 2
| 15
| 2
| 12
| 145
| 14
| 100
| 40
| 86
| 31
| 55
| 29
| 52
| 7
| 1
| 1
| 8
|
672
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/autoformer/modeling_autoformer.py
|
transformers.models.autoformer.modeling_autoformer.AutoformerEncoder
|
from torch import nn
import torch
from .configuration_autoformer import AutoformerConfig
from typing import Optional, Union
from ...modeling_outputs import BaseModelOutput, ModelOutput, SampleTSPredictionOutput, Seq2SeqTSPredictionOutput
class AutoformerEncoder(AutoformerPreTrainedModel):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
[`AutoformerEncoderLayer`].
Args:
config: AutoformerConfig
"""
def __init__(self, config: AutoformerConfig):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
if config.prediction_length is None:
raise ValueError('The `prediction_length` config needs to be specified.')
self.value_embedding = AutoformerValueEmbedding(feature_size=config.feature_size, d_model=config.d_model)
self.embed_positions = AutoformerSinusoidalPositionalEmbedding(config.context_length + config.prediction_length, config.d_model)
self.layers = nn.ModuleList([AutoformerEncoderLayer(config) for _ in range(config.encoder_layers)])
self.layernorm_embedding = nn.LayerNorm(config.d_model)
self.gradient_checkpointing = False
self.post_init()
def forward(self, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]:
"""
Args:
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
hidden_states = self.value_embedding(inputs_embeds)
embed_pos = self.embed_positions(inputs_embeds.size())
hidden_states = self.layernorm_embedding(hidden_states + embed_pos)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
attention_mask = self._update_full_mask(attention_mask, inputs_embeds)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
if head_mask is not None:
if head_mask.size()[0] != len(self.layers):
raise ValueError(f'The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.')
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
to_drop = False
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop:
to_drop = True
if to_drop:
layer_outputs = (None, None)
else:
layer_outputs = encoder_layer(hidden_states, attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, output_attentions=output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
|
class AutoformerEncoder(AutoformerPreTrainedModel):
'''
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
[`AutoformerEncoderLayer`].
Args:
config: AutoformerConfig
'''
def __init__(self, config: AutoformerConfig):
pass
def forward(self, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]:
'''
Args:
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
pass
| 3
| 2
| 63
| 9
| 39
| 15
| 11
| 0.46
| 1
| 12
| 5
| 0
| 2
| 7
| 2
| 3
| 135
| 21
| 79
| 26
| 68
| 36
| 49
| 18
| 46
| 20
| 2
| 3
| 22
|
673
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/autoformer/modeling_autoformer.py
|
transformers.models.autoformer.modeling_autoformer.AutoformerEncoderLayer
|
from .configuration_autoformer import AutoformerConfig
from typing import Optional, Union
from ...activations import ACT2FN
from ...modeling_layers import GradientCheckpointingLayer
from torch import nn
import torch
class AutoformerEncoderLayer(GradientCheckpointingLayer):
def __init__(self, config: AutoformerConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = AutoformerAttention(embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, autocorrelation_factor=config.autocorrelation_factor)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = AutoformerLayernorm(config)
self.decomp1 = AutoformerSeriesDecompositionLayer(config)
self.decomp2 = AutoformerSeriesDecompositionLayer(config)
def forward(self, hidden_states: torch.FloatTensor, attention_mask: torch.FloatTensor, layer_head_mask: torch.FloatTensor, output_attentions: Optional[bool]=False) -> tuple[torch.FloatTensor, Optional[torch.FloatTensor]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, _ = self.decomp1(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states, _ = self.decomp2(hidden_states)
hidden_states = self.final_layer_norm(hidden_states)
if hidden_states.dtype == torch.float16 and (torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()):
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
|
class AutoformerEncoderLayer(GradientCheckpointingLayer):
def __init__(self, config: AutoformerConfig):
pass
def forward(self, hidden_states: torch.FloatTensor, attention_mask: torch.FloatTensor, layer_head_mask: torch.FloatTensor, output_attentions: Optional[bool]=False) -> tuple[torch.FloatTensor, Optional[torch.FloatTensor]]:
'''
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
'''
pass
| 3
| 1
| 35
| 3
| 27
| 6
| 2
| 0.22
| 1
| 6
| 4
| 0
| 2
| 11
| 2
| 12
| 72
| 6
| 54
| 24
| 45
| 12
| 36
| 18
| 33
| 3
| 1
| 1
| 4
|
674
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/autoformer/modeling_autoformer.py
|
transformers.models.autoformer.modeling_autoformer.AutoformerFeatureEmbedder
|
import torch
from torch import nn
class AutoformerFeatureEmbedder(nn.Module):
"""
Embed a sequence of categorical features.
Args:
cardinalities (`list[int]`):
List of cardinalities of the categorical features.
embedding_dims (`list[int]`):
List of embedding dimensions of the categorical features.
"""
def __init__(self, cardinalities: list[int], embedding_dims: list[int]) -> None:
super().__init__()
self.num_features = len(cardinalities)
self.embedders = nn.ModuleList([nn.Embedding(c, d) for c, d in zip(cardinalities, embedding_dims)])
def forward(self, features: torch.Tensor) -> torch.Tensor:
if self.num_features > 1:
cat_feature_slices = torch.chunk(features, self.num_features, dim=-1)
else:
cat_feature_slices = [features]
return torch.cat([embed(cat_feature_slice.squeeze(-1)) for embed, cat_feature_slice in zip(self.embedders, cat_feature_slices)], dim=-1)
|
class AutoformerFeatureEmbedder(nn.Module):
'''
Embed a sequence of categorical features.
Args:
cardinalities (`list[int]`):
List of cardinalities of the categorical features.
embedding_dims (`list[int]`):
List of embedding dimensions of the categorical features.
'''
def __init__(self, cardinalities: list[int], embedding_dims: list[int]) -> None:
pass
def forward(self, features: torch.Tensor) -> torch.Tensor:
pass
| 3
| 1
| 10
| 1
| 8
| 1
| 2
| 0.59
| 1
| 4
| 0
| 0
| 2
| 2
| 2
| 12
| 32
| 5
| 17
| 6
| 14
| 10
| 10
| 6
| 7
| 2
| 1
| 1
| 3
|
675
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/autoformer/modeling_autoformer.py
|
transformers.models.autoformer.modeling_autoformer.AutoformerForPrediction
|
from .configuration_autoformer import AutoformerConfig
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Optional, Union
from ...utils import auto_docstring, is_torch_flex_attn_available, logging
from ...modeling_outputs import BaseModelOutput, ModelOutput, SampleTSPredictionOutput, Seq2SeqTSPredictionOutput
from ...time_series_utils import NegativeBinomialOutput, NormalOutput, StudentTOutput
import torch
@auto_docstring
class AutoformerForPrediction(AutoformerPreTrainedModel):
def __init__(self, config: AutoformerConfig):
super().__init__(config)
self.model = AutoformerModel(config)
if config.distribution_output == 'student_t':
self.distribution_output = StudentTOutput(dim=config.input_size)
elif config.distribution_output == 'normal':
self.distribution_output = NormalOutput(dim=config.input_size)
elif config.distribution_output == 'negative_binomial':
self.distribution_output = NegativeBinomialOutput(dim=config.input_size)
else:
raise ValueError(f'Unknown distribution output {config.distribution_output}')
self.parameter_projection = self.distribution_output.get_parameter_projection(self.model.config.feature_size)
self.target_shape = self.distribution_output.event_shape
if config.loss == 'nll':
self.loss = nll
else:
raise ValueError(f'Unknown loss function {config.loss}')
self.post_init()
def output_params(self, decoder_output):
return self.parameter_projection(decoder_output[:, -self.config.prediction_length:, :])
def get_encoder(self):
return self.model.get_encoder()
def get_decoder(self):
return self.model.get_decoder()
@torch.jit.ignore
def output_distribution(self, params, loc=None, scale=None, trailing_n=None) -> torch.distributions.Distribution:
sliced_params = params
if trailing_n is not None:
sliced_params = [p[:, -trailing_n:] for p in params]
return self.distribution_output.distribution(sliced_params, loc=loc, scale=scale)
@auto_docstring
def forward(self, past_values: torch.Tensor, past_time_features: torch.Tensor, past_observed_mask: torch.Tensor, static_categorical_features: Optional[torch.Tensor]=None, static_real_features: Optional[torch.Tensor]=None, future_values: Optional[torch.Tensor]=None, future_time_features: Optional[torch.Tensor]=None, future_observed_mask: Optional[torch.Tensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[list[torch.FloatTensor]]=None, past_key_values: Optional[Cache]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, use_cache: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Seq2SeqTSPredictionOutput, tuple]:
"""
past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Past values of the time series, that serve as context in order to predict the future. These values may
contain lags, i.e. additional values from the past which are added in order to serve as "extra context".
The `past_values` is what the Transformer encoder gets as input (with optional additional features, such as
`static_categorical_features`, `static_real_features`, `past_time_features`).
The sequence length here is equal to `context_length` + `max(config.lags_sequence)`.
Missing values need to be replaced with zeros.
past_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`, *optional*):
Optional time features, which the model internally will add to `past_values`. These could be things like
"month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These
could also be so-called "age" features, which basically help the model know "at which point in life" a
time-series is. Age features have small values for distant past time steps and increase monotonically the
more we approach the current time step.
These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where
the position encodings are learned from scratch internally as parameters of the model, the Time Series
Transformer requires to provide additional time features.
The Autoformer only learns additional embeddings for `static_categorical_features`.
past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in
`[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
static_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*):
Optional static categorical features for which the model will learn an embedding, which it will add to the
values of the time series.
Static categorical features are features which have the same value for all time steps (static over time).
A typical example of a static categorical feature is a time series ID.
static_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*):
Optional static real features which the model will add to the values of the time series.
Static real features are features which have the same value for all time steps (static over time).
A typical example of a static real feature is promotion information.
future_values (`torch.FloatTensor` of shape `(batch_size, prediction_length)`):
Future values of the time series, that serve as labels for the model. The `future_values` is what the
Transformer needs to learn to output, given the `past_values`.
See the demo notebook and code snippets for details.
Missing values need to be replaced with zeros.
future_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`, *optional*):
Optional time features, which the model internally will add to `future_values`. These could be things like
"month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These
could also be so-called "age" features, which basically help the model know "at which point in life" a
time-series is. Age features have small values for distant past time steps and increase monotonically the
more we approach the current time step.
These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where
the position encodings are learned from scratch internally as parameters of the model, the Time Series
Transformer requires to provide additional features.
The Autoformer only learns additional embeddings for `static_categorical_features`.
future_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*):
Boolean mask to indicate which `future_values` were observed and which were missing. Mask values selected
in `[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
This mask is used to filter out missing values for the final loss calculation.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
Tuple consists of `last_hidden_state`, `hidden_states` (*optional*) and `attentions` (*optional*)
`last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` (*optional*) is a sequence of
hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
Examples:
```python
>>> from huggingface_hub import hf_hub_download
>>> import torch
>>> from transformers import AutoformerForPrediction
>>> file = hf_hub_download(
... repo_id="hf-internal-testing/tourism-monthly-batch", filename="train-batch.pt", repo_type="dataset"
... )
>>> batch = torch.load(file)
>>> model = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly")
>>> # during training, one provides both past and future values
>>> # as well as possible additional features
>>> outputs = model(
... past_values=batch["past_values"],
... past_time_features=batch["past_time_features"],
... past_observed_mask=batch["past_observed_mask"],
... static_categorical_features=batch["static_categorical_features"],
... future_values=batch["future_values"],
... future_time_features=batch["future_time_features"],
... )
>>> loss = outputs.loss
>>> loss.backward()
>>> # during inference, one only provides past values
>>> # as well as possible additional features
>>> # the model autoregressively generates future values
>>> outputs = model.generate(
... past_values=batch["past_values"],
... past_time_features=batch["past_time_features"],
... past_observed_mask=batch["past_observed_mask"],
... static_categorical_features=batch["static_categorical_features"],
... future_time_features=batch["future_time_features"],
... )
>>> mean_prediction = outputs.sequences.mean(dim=1)
```
<Tip>
The AutoformerForPrediction can also use static_real_features. To do so, set num_static_real_features in
AutoformerConfig based on number of such features in the dataset (in case of tourism_monthly dataset it
is equal to 1), initialize the model and call as shown below:
```
>>> from huggingface_hub import hf_hub_download
>>> import torch
>>> from transformers import AutoformerConfig, AutoformerForPrediction
>>> file = hf_hub_download(
... repo_id="hf-internal-testing/tourism-monthly-batch", filename="train-batch.pt", repo_type="dataset"
... )
>>> batch = torch.load(file)
>>> # check number of static real features
>>> num_static_real_features = batch["static_real_features"].shape[-1]
>>> # load configuration of pretrained model and override num_static_real_features
>>> configuration = AutoformerConfig.from_pretrained(
... "huggingface/autoformer-tourism-monthly",
... num_static_real_features=num_static_real_features,
... )
>>> # we also need to update feature_size as it is not recalculated
>>> configuration.feature_size += num_static_real_features
>>> model = AutoformerForPrediction(configuration)
>>> outputs = model(
... past_values=batch["past_values"],
... past_time_features=batch["past_time_features"],
... past_observed_mask=batch["past_observed_mask"],
... static_categorical_features=batch["static_categorical_features"],
... static_real_features=batch["static_real_features"],
... future_values=batch["future_values"],
... future_time_features=batch["future_time_features"],
... )
```
</Tip>
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if future_values is not None:
use_cache = False
outputs = self.model(past_values=past_values, past_time_features=past_time_features, past_observed_mask=past_observed_mask, static_categorical_features=static_categorical_features, static_real_features=static_real_features, future_values=future_values, future_time_features=future_time_features, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, past_key_values=past_key_values, output_hidden_states=output_hidden_states, output_attentions=output_attentions, use_cache=use_cache, return_dict=return_dict)
prediction_loss = None
params = None
if future_values is not None:
params = self.output_params(outputs[0] + outputs[1])
distribution = self.output_distribution(params, loc=outputs[-3], scale=outputs[-2])
loss = self.loss(distribution, future_values)
if future_observed_mask is None:
future_observed_mask = torch.ones_like(future_values)
if len(self.target_shape) == 0:
loss_weights = future_observed_mask
else:
loss_weights, _ = future_observed_mask.min(dim=-1, keepdim=False)
prediction_loss = weighted_average(loss, weights=loss_weights)
if not return_dict:
outputs = (params,) + outputs[2:] if params is not None else outputs[2:]
return (prediction_loss,) + outputs if prediction_loss is not None else outputs
return Seq2SeqTSPredictionOutput(loss=prediction_loss, params=params, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, loc=outputs.loc, scale=outputs.scale, static_features=outputs.static_features)
@torch.no_grad()
def generate(self, past_values: torch.Tensor, past_time_features: torch.Tensor, future_time_features: torch.Tensor, past_observed_mask: Optional[torch.Tensor]=None, static_categorical_features: Optional[torch.Tensor]=None, static_real_features: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None) -> SampleTSPredictionOutput:
"""
Greedily generate sequences of sample predictions from a model with a probability distribution head.
Parameters:
past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`):
Past values of the time series, that serve as context in order to predict the future. The sequence size
of this tensor must be larger than the `context_length` of the model, since the model will use the
larger size to construct lag features, i.e. additional values from the past which are added in order to
serve as "extra context".
The `sequence_length` here is equal to `config.context_length` + `max(config.lags_sequence)`, which if
no `lags_sequence` is configured, is equal to `config.context_length` + 7 (as by default, the largest
look-back index in `config.lags_sequence` is 7). The property `_past_length` returns the actual length
of the past.
The `past_values` is what the Transformer encoder gets as input (with optional additional features,
such as `static_categorical_features`, `static_real_features`, `past_time_features` and lags).
Optionally, missing values need to be replaced with zeros and indicated via the `past_observed_mask`.
For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number
of variates in the time series per time step.
past_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`):
Required time features, which the model internally will add to `past_values`. These could be things
like "month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features).
These could also be so-called "age" features, which basically help the model know "at which point in
life" a time-series is. Age features have small values for distant past time steps and increase
monotonically the more we approach the current time step. Holiday features are also a good example of
time features.
These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT,
where the position encodings are learned from scratch internally as parameters of the model, the Time
Series Transformer requires to provide additional time features. The Time Series Transformer only
learns additional embeddings for `static_categorical_features`.
Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these
features must but known at prediction time.
The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
future_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`):
Required time features for the prediction window, which the model internally will add to sampled
predictions. These could be things like "month of year", "day of the month", etc. encoded as vectors
(for instance as Fourier features). These could also be so-called "age" features, which basically help
the model know "at which point in life" a time-series is. Age features have small values for distant
past time steps and increase monotonically the more we approach the current time step. Holiday features
are also a good example of time features.
These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT,
where the position encodings are learned from scratch internally as parameters of the model, the Time
Series Transformer requires to provide additional time features. The Time Series Transformer only
learns additional embeddings for `static_categorical_features`.
Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these
features must but known at prediction time.
The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected
in `[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
static_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*):
Optional static categorical features for which the model will learn an embedding, which it will add to
the values of the time series.
Static categorical features are features which have the same value for all time steps (static over
time).
A typical example of a static categorical feature is a time series ID.
static_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*):
Optional static real features which the model will add to the values of the time series.
Static real features are features which have the same value for all time steps (static over time).
A typical example of a static real feature is promotion information.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers.
Return:
[`SampleTSPredictionOutput`] where the outputs `sequences` tensor will have shape `(batch_size, number of
samples, prediction_length)` or `(batch_size, number of samples, prediction_length, input_size)` for
multivariate predictions.
"""
outputs = self(static_categorical_features=static_categorical_features, static_real_features=static_real_features, past_time_features=past_time_features, past_values=past_values, past_observed_mask=past_observed_mask, future_time_features=None, future_values=None, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, use_cache=False)
decoder = self.model.get_decoder()
enc_last_hidden = outputs.encoder_last_hidden_state
loc = outputs.loc
scale = outputs.scale
static_feat = outputs.static_features
num_parallel_samples = self.config.num_parallel_samples
repeated_loc = loc.repeat_interleave(repeats=num_parallel_samples, dim=0)
repeated_scale = scale.repeat_interleave(repeats=num_parallel_samples, dim=0)
repeated_past_values = (past_values.repeat_interleave(repeats=num_parallel_samples, dim=0) - repeated_loc) / repeated_scale
time_features = torch.cat((past_time_features, future_time_features), dim=1)
expanded_static_feat = static_feat.unsqueeze(1).expand(-1, time_features.shape[1], -1)
features = torch.cat((expanded_static_feat, time_features), dim=-1)
repeated_features = features.repeat_interleave(repeats=num_parallel_samples, dim=0)
repeated_enc_last_hidden = enc_last_hidden.repeat_interleave(repeats=num_parallel_samples, dim=0)
lagged_sequence = self.model.get_lagged_subsequences(sequence=repeated_past_values, subsequences_length=self.config.context_length)
lags_shape = lagged_sequence.shape
reshaped_lagged_sequence = lagged_sequence.reshape(lags_shape[0], lags_shape[1], -1)
seasonal_input, trend_input = self.model.decomposition_layer(reshaped_lagged_sequence)
mean = torch.mean(reshaped_lagged_sequence, dim=1).unsqueeze(1).repeat(1, self.config.prediction_length, 1)
zeros = torch.zeros([reshaped_lagged_sequence.shape[0], self.config.prediction_length, reshaped_lagged_sequence.shape[2]], device=reshaped_lagged_sequence.device)
decoder_input = torch.cat((torch.cat((seasonal_input[:, -self.config.label_length:, ...], zeros), dim=1), repeated_features[:, -self.config.prediction_length - self.config.label_length:, ...]), dim=-1)
trend_init = torch.cat((torch.cat((trend_input[:, -self.config.label_length:, ...], mean), dim=1), repeated_features[:, -self.config.prediction_length - self.config.label_length:, ...]), dim=-1)
decoder_outputs = decoder(trend=trend_init, inputs_embeds=decoder_input, encoder_hidden_states=repeated_enc_last_hidden)
decoder_last_hidden = decoder_outputs.last_hidden_state
trend = decoder_outputs.trend
params = self.output_params(decoder_last_hidden + trend)
distr = self.output_distribution(params, loc=repeated_loc, scale=repeated_scale)
future_samples = distr.sample()
return SampleTSPredictionOutput(sequences=future_samples.reshape((-1, num_parallel_samples, self.config.prediction_length) + self.target_shape))
|
@auto_docstring
class AutoformerForPrediction(AutoformerPreTrainedModel):
def __init__(self, config: AutoformerConfig):
pass
def output_params(self, decoder_output):
pass
def get_encoder(self):
pass
def get_decoder(self):
pass
@torch.jit.ignore
def output_distribution(self, params, loc=None, scale=None, trailing_n=None) -> torch.distributions.Distribution:
pass
@auto_docstring
def forward(self, past_values: torch.Tensor, past_time_features: torch.Tensor, past_observed_mask: torch.Tensor, static_categorical_features: Optional[torch.Tensor]=None, static_real_features: Optional[torch.Tensor]=None, future_values: Optional[torch.Tensor]=None, future_time_features: Optional[torch.Tensor]=None, future_observed_mask: Optional[torch.Tensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[list[torch.FloatTensor]]=None, past_key_values: Optional[Cache]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, use_cache: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Seq2SeqTSPredictionOutput, tuple]:
'''
past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Past values of the time series, that serve as context in order to predict the future. These values may
contain lags, i.e. additional values from the past which are added in order to serve as "extra context".
The `past_values` is what the Transformer encoder gets as input (with optional additional features, such as
`static_categorical_features`, `static_real_features`, `past_time_features`).
The sequence length here is equal to `context_length` + `max(config.lags_sequence)`.
Missing values need to be replaced with zeros.
past_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`, *optional*):
Optional time features, which the model internally will add to `past_values`. These could be things like
"month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These
could also be so-called "age" features, which basically help the model know "at which point in life" a
time-series is. Age features have small values for distant past time steps and increase monotonically the
more we approach the current time step.
These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where
the position encodings are learned from scratch internally as parameters of the model, the Time Series
Transformer requires to provide additional time features.
The Autoformer only learns additional embeddings for `static_categorical_features`.
past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in
`[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
static_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*):
Optional static categorical features for which the model will learn an embedding, which it will add to the
values of the time series.
Static categorical features are features which have the same value for all time steps (static over time).
A typical example of a static categorical feature is a time series ID.
static_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*):
Optional static real features which the model will add to the values of the time series.
Static real features are features which have the same value for all time steps (static over time).
A typical example of a static real feature is promotion information.
future_values (`torch.FloatTensor` of shape `(batch_size, prediction_length)`):
Future values of the time series, that serve as labels for the model. The `future_values` is what the
Transformer needs to learn to output, given the `past_values`.
See the demo notebook and code snippets for details.
Missing values need to be replaced with zeros.
future_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`, *optional*):
Optional time features, which the model internally will add to `future_values`. These could be things like
"month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These
could also be so-called "age" features, which basically help the model know "at which point in life" a
time-series is. Age features have small values for distant past time steps and increase monotonically the
more we approach the current time step.
These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where
the position encodings are learned from scratch internally as parameters of the model, the Time Series
Transformer requires to provide additional features.
The Autoformer only learns additional embeddings for `static_categorical_features`.
future_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*):
Boolean mask to indicate which `future_values` were observed and which were missing. Mask values selected
in `[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
This mask is used to filter out missing values for the final loss calculation.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
Tuple consists of `last_hidden_state`, `hidden_states` (*optional*) and `attentions` (*optional*)
`last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` (*optional*) is a sequence of
hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
Examples:
```python
>>> from huggingface_hub import hf_hub_download
>>> import torch
>>> from transformers import AutoformerForPrediction
>>> file = hf_hub_download(
... repo_id="hf-internal-testing/tourism-monthly-batch", filename="train-batch.pt", repo_type="dataset"
... )
>>> batch = torch.load(file)
>>> model = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly")
>>> # during training, one provides both past and future values
>>> # as well as possible additional features
>>> outputs = model(
... past_values=batch["past_values"],
... past_time_features=batch["past_time_features"],
... past_observed_mask=batch["past_observed_mask"],
... static_categorical_features=batch["static_categorical_features"],
... future_values=batch["future_values"],
... future_time_features=batch["future_time_features"],
... )
>>> loss = outputs.loss
>>> loss.backward()
>>> # during inference, one only provides past values
>>> # as well as possible additional features
>>> # the model autoregressively generates future values
>>> outputs = model.generate(
... past_values=batch["past_values"],
... past_time_features=batch["past_time_features"],
... past_observed_mask=batch["past_observed_mask"],
... static_categorical_features=batch["static_categorical_features"],
... future_time_features=batch["future_time_features"],
... )
>>> mean_prediction = outputs.sequences.mean(dim=1)
```
<Tip>
The AutoformerForPrediction can also use static_real_features. To do so, set num_static_real_features in
AutoformerConfig based on number of such features in the dataset (in case of tourism_monthly dataset it
is equal to 1), initialize the model and call as shown below:
```
>>> from huggingface_hub import hf_hub_download
>>> import torch
>>> from transformers import AutoformerConfig, AutoformerForPrediction
>>> file = hf_hub_download(
... repo_id="hf-internal-testing/tourism-monthly-batch", filename="train-batch.pt", repo_type="dataset"
... )
>>> batch = torch.load(file)
>>> # check number of static real features
>>> num_static_real_features = batch["static_real_features"].shape[-1]
>>> # load configuration of pretrained model and override num_static_real_features
>>> configuration = AutoformerConfig.from_pretrained(
... "huggingface/autoformer-tourism-monthly",
... num_static_real_features=num_static_real_features,
... )
>>> # we also need to update feature_size as it is not recalculated
>>> configuration.feature_size += num_static_real_features
>>> model = AutoformerForPrediction(configuration)
>>> outputs = model(
... past_values=batch["past_values"],
... past_time_features=batch["past_time_features"],
... past_observed_mask=batch["past_observed_mask"],
... static_categorical_features=batch["static_categorical_features"],
... static_real_features=batch["static_real_features"],
... future_values=batch["future_values"],
... future_time_features=batch["future_time_features"],
... )
```
</Tip>
'''
pass
@torch.no_grad()
def generate(self, past_values: torch.Tensor, past_time_features: torch.Tensor, future_time_features: torch.Tensor, past_observed_mask: Optional[torch.Tensor]=None, static_categorical_features: Optional[torch.Tensor]=None, static_real_features: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None) -> SampleTSPredictionOutput:
'''
Greedily generate sequences of sample predictions from a model with a probability distribution head.
Parameters:
past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`):
Past values of the time series, that serve as context in order to predict the future. The sequence size
of this tensor must be larger than the `context_length` of the model, since the model will use the
larger size to construct lag features, i.e. additional values from the past which are added in order to
serve as "extra context".
The `sequence_length` here is equal to `config.context_length` + `max(config.lags_sequence)`, which if
no `lags_sequence` is configured, is equal to `config.context_length` + 7 (as by default, the largest
look-back index in `config.lags_sequence` is 7). The property `_past_length` returns the actual length
of the past.
The `past_values` is what the Transformer encoder gets as input (with optional additional features,
such as `static_categorical_features`, `static_real_features`, `past_time_features` and lags).
Optionally, missing values need to be replaced with zeros and indicated via the `past_observed_mask`.
For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number
of variates in the time series per time step.
past_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`):
Required time features, which the model internally will add to `past_values`. These could be things
like "month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features).
These could also be so-called "age" features, which basically help the model know "at which point in
life" a time-series is. Age features have small values for distant past time steps and increase
monotonically the more we approach the current time step. Holiday features are also a good example of
time features.
These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT,
where the position encodings are learned from scratch internally as parameters of the model, the Time
Series Transformer requires to provide additional time features. The Time Series Transformer only
learns additional embeddings for `static_categorical_features`.
Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these
features must but known at prediction time.
The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
future_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`):
Required time features for the prediction window, which the model internally will add to sampled
predictions. These could be things like "month of year", "day of the month", etc. encoded as vectors
(for instance as Fourier features). These could also be so-called "age" features, which basically help
the model know "at which point in life" a time-series is. Age features have small values for distant
past time steps and increase monotonically the more we approach the current time step. Holiday features
are also a good example of time features.
These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT,
where the position encodings are learned from scratch internally as parameters of the model, the Time
Series Transformer requires to provide additional time features. The Time Series Transformer only
learns additional embeddings for `static_categorical_features`.
Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these
features must but known at prediction time.
The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected
in `[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
static_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*):
Optional static categorical features for which the model will learn an embedding, which it will add to
the values of the time series.
Static categorical features are features which have the same value for all time steps (static over
time).
A typical example of a static categorical feature is a time series ID.
static_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*):
Optional static real features which the model will add to the values of the time series.
Static real features are features which have the same value for all time steps (static over time).
A typical example of a static real feature is promotion information.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers.
Return:
[`SampleTSPredictionOutput`] where the outputs `sequences` tensor will have shape `(batch_size, number of
samples, prediction_length)` or `(batch_size, number of samples, prediction_length, input_size)` for
multivariate predictions.
'''
pass
| 12
| 2
| 54
| 8
| 26
| 20
| 3
| 0.77
| 1
| 12
| 7
| 0
| 7
| 5
| 7
| 8
| 390
| 63
| 185
| 83
| 143
| 142
| 78
| 50
| 70
| 9
| 2
| 2
| 20
|
676
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/autoformer/modeling_autoformer.py
|
transformers.models.autoformer.modeling_autoformer.AutoformerLayernorm
|
from .configuration_autoformer import AutoformerConfig
import torch
from torch import nn
class AutoformerLayernorm(nn.Module):
"""
Special designed layer normalization for the seasonal part, calculated as: AutoformerLayernorm(x) = nn.LayerNorm(x)
- torch.mean(nn.LayerNorm(x))
"""
def __init__(self, config: AutoformerConfig):
super().__init__()
self.layernorm = nn.LayerNorm(config.d_model)
def forward(self, x):
x_hat = self.layernorm(x)
bias = torch.mean(x_hat, dim=1).unsqueeze(1).repeat(1, x.shape[1], 1)
return x_hat - bias
|
class AutoformerLayernorm(nn.Module):
'''
Special designed layer normalization for the seasonal part, calculated as: AutoformerLayernorm(x) = nn.LayerNorm(x)
- torch.mean(nn.LayerNorm(x))
'''
def __init__(self, config: AutoformerConfig):
pass
def forward(self, x):
pass
| 3
| 1
| 4
| 0
| 4
| 0
| 1
| 0.5
| 1
| 2
| 1
| 0
| 2
| 1
| 2
| 12
| 14
| 2
| 8
| 6
| 5
| 4
| 8
| 6
| 5
| 1
| 1
| 0
| 2
|
677
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/autoformer/modeling_autoformer.py
|
transformers.models.autoformer.modeling_autoformer.AutoformerMeanScaler
|
import torch
from torch import nn
from .configuration_autoformer import AutoformerConfig
class AutoformerMeanScaler(nn.Module):
"""
Computes a scaling factor as the weighted average absolute value along the first dimension, and scales the data
accordingly.
"""
def __init__(self, config: AutoformerConfig):
super().__init__()
self.dim = config.scaling_dim if hasattr(config, 'scaling_dim') else 1
self.keepdim = config.keepdim if hasattr(config, 'keepdim') else True
self.minimum_scale = config.minimum_scale if hasattr(config, 'minimum_scale') else 1e-10
self.default_scale = config.default_scale if hasattr(config, 'default_scale') else None
def forward(self, data: torch.Tensor, observed_indicator: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Parameters:
data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
input for Batch norm calculation
observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`):
Calculating the scale on the observed indicator.
Returns:
tuple of `torch.Tensor` of shapes
(`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
`(batch_size, 1, num_input_channels)`)
"""
ts_sum = (data * observed_indicator).abs().sum(self.dim, keepdim=True)
num_observed = observed_indicator.sum(self.dim, keepdim=True)
scale = ts_sum / torch.clamp(num_observed, min=1)
if self.default_scale is None:
batch_sum = ts_sum.sum(dim=0)
batch_observations = torch.clamp(num_observed.sum(0), min=1)
default_scale = torch.squeeze(batch_sum / batch_observations)
else:
default_scale = self.default_scale * torch.ones_like(scale)
scale = torch.where(num_observed > 0, scale, default_scale)
scale = torch.clamp(scale, min=self.minimum_scale)
scaled_data = data / scale
if not self.keepdim:
scale = scale.squeeze(dim=self.dim)
return (scaled_data, torch.zeros_like(scale), scale)
|
class AutoformerMeanScaler(nn.Module):
'''
Computes a scaling factor as the weighted average absolute value along the first dimension, and scales the data
accordingly.
'''
def __init__(self, config: AutoformerConfig):
pass
def forward(self, data: torch.Tensor, observed_indicator: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
'''
Parameters:
data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
input for Batch norm calculation
observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`):
Calculating the scale on the observed indicator.
Returns:
tuple of `torch.Tensor` of shapes
(`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
`(batch_size, 1, num_input_channels)`)
'''
pass
| 3
| 2
| 23
| 3
| 12
| 8
| 4
| 0.76
| 1
| 3
| 1
| 0
| 2
| 4
| 2
| 12
| 52
| 8
| 25
| 16
| 20
| 19
| 22
| 14
| 19
| 5
| 1
| 1
| 8
|
678
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/autoformer/modeling_autoformer.py
|
transformers.models.autoformer.modeling_autoformer.AutoformerModel
|
from .configuration_autoformer import AutoformerConfig
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Optional, Union
from ...utils import auto_docstring, is_torch_flex_attn_available, logging
from ...modeling_outputs import BaseModelOutput, ModelOutput, SampleTSPredictionOutput, Seq2SeqTSPredictionOutput
import torch
@auto_docstring
class AutoformerModel(AutoformerPreTrainedModel):
def __init__(self, config: AutoformerConfig):
super().__init__(config)
if config.scaling == 'mean' or config.scaling is True:
self.scaler = AutoformerMeanScaler(config)
elif config.scaling == 'std':
self.scaler = AutoformerStdScaler(config)
else:
self.scaler = AutoformerNOPScaler(config)
if config.num_static_categorical_features > 0:
self.embedder = AutoformerFeatureEmbedder(cardinalities=config.cardinality, embedding_dims=config.embedding_dimension)
self.encoder = AutoformerEncoder(config)
self.decoder = AutoformerDecoder(config)
self.decomposition_layer = AutoformerSeriesDecompositionLayer(config)
self.post_init()
@property
def _past_length(self) -> int:
return self.config.context_length + max(self.config.lags_sequence)
def get_lagged_subsequences(self, sequence: torch.Tensor, subsequences_length: int, shift: int=0) -> torch.Tensor:
"""
Returns lagged subsequences of a given sequence. Returns a tensor of shape (batch_size, subsequences_length,
feature_size, indices_length), containing lagged subsequences. Specifically, lagged[i, j, :, k] = sequence[i,
-indices[k]-subsequences_length+j, :].
Args:
sequence (`torch.Tensor` or shape `(batch_size, context_length,
feature_size)`): The sequence from which lagged subsequences should be extracted.
subsequences_length (`int`):
Length of the subsequences to be extracted.
shift (`int`, *optional* defaults to 0):
Shift the lags by this amount back in the time index.
"""
indices = [lag - shift for lag in self.config.lags_sequence]
sequence_length = sequence.shape[1]
if max(indices) + subsequences_length > sequence_length:
raise ValueError(f'lags cannot go further than history length, found lag {max(indices)} while history length is only {sequence_length}')
lagged_values = []
for lag_index in indices:
begin_index = -lag_index - subsequences_length
end_index = -lag_index if lag_index > 0 else None
lagged_values.append(sequence[:, begin_index:end_index, ...])
return torch.stack(lagged_values, dim=-1)
def create_network_inputs(self, past_values: torch.Tensor, past_time_features: torch.Tensor, static_categorical_features: Optional[torch.Tensor]=None, static_real_features: Optional[torch.Tensor]=None, past_observed_mask: Optional[torch.Tensor]=None, future_values: Optional[torch.Tensor]=None, future_time_features: Optional[torch.Tensor]=None) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Creates the inputs for the network given the past and future values, time features, and static features.
Args:
past_values (`torch.Tensor`):
A tensor of shape `(batch_size, past_length, input_size)` containing the past values.
past_time_features (`torch.Tensor`):
A tensor of shape `(batch_size, past_length, num_features)` containing the past time features.
static_categorical_features (`Optional[torch.Tensor]`):
An optional tensor of shape `(batch_size, num_categorical_features)` containing the static categorical
features.
static_real_features (`Optional[torch.Tensor]`):
An optional tensor of shape `(batch_size, num_real_features)` containing the static real features.
past_observed_mask (`Optional[torch.Tensor]`):
An optional tensor of shape `(batch_size, past_length, input_size)` containing the mask of observed
values in the past.
future_values (`Optional[torch.Tensor]`):
An optional tensor of shape `(batch_size, future_length, input_size)` containing the future values.
Returns:
A tuple containing the following tensors:
- reshaped_lagged_sequence (`torch.Tensor`): A tensor of shape `(batch_size, sequence_length, num_lags *
input_size)` containing the lagged subsequences of the inputs.
- features (`torch.Tensor`): A tensor of shape `(batch_size, sequence_length, num_features)` containing the
concatenated static and time features.
- loc (`torch.Tensor`): A tensor of shape `(batch_size, input_size)` containing the mean of the input
values.
- scale (`torch.Tensor`): A tensor of shape `(batch_size, input_size)` containing the std of the input
values.
- static_feat (`torch.Tensor`): A tensor of shape `(batch_size, num_static_features)` containing the
concatenated static features.
"""
time_feat = torch.cat((past_time_features[:, self._past_length - self.config.context_length:, ...], future_time_features), dim=1) if future_values is not None else past_time_features[:, self._past_length - self.config.context_length:, ...]
if past_observed_mask is None:
past_observed_mask = torch.ones_like(past_values)
context = past_values[:, -self.config.context_length:]
observed_context = past_observed_mask[:, -self.config.context_length:]
_, loc, scale = self.scaler(context, observed_context)
inputs = (torch.cat((past_values, future_values), dim=1) - loc) / scale if future_values is not None else (past_values - loc) / scale
log_abs_loc = loc.abs().log1p() if self.config.input_size == 1 else loc.squeeze(1).abs().log1p()
log_scale = scale.log() if self.config.input_size == 1 else scale.squeeze(1).log()
static_feat = torch.cat((log_abs_loc, log_scale), dim=1)
if static_real_features is not None:
static_feat = torch.cat((static_real_features, static_feat), dim=1)
if static_categorical_features is not None:
embedded_cat = self.embedder(static_categorical_features)
static_feat = torch.cat((embedded_cat, static_feat), dim=1)
expanded_static_feat = static_feat.unsqueeze(1).expand(-1, time_feat.shape[1], -1)
features = torch.cat((expanded_static_feat, time_feat), dim=-1)
subsequences_length = self.config.context_length + self.config.prediction_length if future_values is not None else self.config.context_length
lagged_sequence = self.get_lagged_subsequences(sequence=inputs, subsequences_length=subsequences_length)
lags_shape = lagged_sequence.shape
reshaped_lagged_sequence = lagged_sequence.reshape(lags_shape[0], lags_shape[1], -1)
if reshaped_lagged_sequence.shape[1] != time_feat.shape[1]:
raise ValueError(f'input length {reshaped_lagged_sequence.shape[1]} and time feature lengths {time_feat.shape[1]} does not match')
return (reshaped_lagged_sequence, features, loc, scale, static_feat)
def get_encoder(self):
return self.encoder
@auto_docstring
def forward(self, past_values: torch.Tensor, past_time_features: torch.Tensor, past_observed_mask: torch.Tensor, static_categorical_features: Optional[torch.Tensor]=None, static_real_features: Optional[torch.Tensor]=None, future_values: Optional[torch.Tensor]=None, future_time_features: Optional[torch.Tensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[list[torch.FloatTensor]]=None, past_key_values: Optional[Cache]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, use_cache: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[AutoformerModelOutput, tuple]:
"""
past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Past values of the time series, that serve as context in order to predict the future. These values may
contain lags, i.e. additional values from the past which are added in order to serve as "extra context".
The `past_values` is what the Transformer encoder gets as input (with optional additional features, such as
`static_categorical_features`, `static_real_features`, `past_time_features`).
The sequence length here is equal to `context_length` + `max(config.lags_sequence)`.
Missing values need to be replaced with zeros.
past_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`, *optional*):
Optional time features, which the model internally will add to `past_values`. These could be things like
"month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These
could also be so-called "age" features, which basically help the model know "at which point in life" a
time-series is. Age features have small values for distant past time steps and increase monotonically the
more we approach the current time step.
These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where
the position encodings are learned from scratch internally as parameters of the model, the Time Series
Transformer requires to provide additional time features.
The Autoformer only learns additional embeddings for `static_categorical_features`.
past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in
`[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
static_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*):
Optional static categorical features for which the model will learn an embedding, which it will add to the
values of the time series.
Static categorical features are features which have the same value for all time steps (static over time).
A typical example of a static categorical feature is a time series ID.
static_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*):
Optional static real features which the model will add to the values of the time series.
Static real features are features which have the same value for all time steps (static over time).
A typical example of a static real feature is promotion information.
future_values (`torch.FloatTensor` of shape `(batch_size, prediction_length)`):
Future values of the time series, that serve as labels for the model. The `future_values` is what the
Transformer needs to learn to output, given the `past_values`.
See the demo notebook and code snippets for details.
Missing values need to be replaced with zeros.
future_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`, *optional*):
Optional time features, which the model internally will add to `future_values`. These could be things like
"month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These
could also be so-called "age" features, which basically help the model know "at which point in life" a
time-series is. Age features have small values for distant past time steps and increase monotonically the
more we approach the current time step.
These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where
the position encodings are learned from scratch internally as parameters of the model, the Time Series
Transformer requires to provide additional features.
The Autoformer only learns additional embeddings for `static_categorical_features`.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
Tuple consists of `last_hidden_state`, `hidden_states` (*optional*) and `attentions` (*optional*)
`last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` (*optional*) is a sequence of
hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
Examples:
```python
>>> from huggingface_hub import hf_hub_download
>>> import torch
>>> from transformers import AutoformerModel
>>> file = hf_hub_download(
... repo_id="hf-internal-testing/tourism-monthly-batch", filename="train-batch.pt", repo_type="dataset"
... )
>>> batch = torch.load(file)
>>> model = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly")
>>> # during training, one provides both past and future values
>>> # as well as possible additional features
>>> outputs = model(
... past_values=batch["past_values"],
... past_time_features=batch["past_time_features"],
... past_observed_mask=batch["past_observed_mask"],
... static_categorical_features=batch["static_categorical_features"],
... future_values=batch["future_values"],
... future_time_features=batch["future_time_features"],
... )
>>> last_hidden_state = outputs.last_hidden_state
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_inputs, temporal_features, loc, scale, static_feat = self.create_network_inputs(past_values=past_values, past_time_features=past_time_features, past_observed_mask=past_observed_mask, static_categorical_features=static_categorical_features, static_real_features=static_real_features, future_values=future_values, future_time_features=future_time_features)
if encoder_outputs is None:
enc_input = torch.cat((transformer_inputs[:, :self.config.context_length, ...], temporal_features[:, :self.config.context_length, ...]), dim=-1)
encoder_outputs = self.encoder(inputs_embeds=enc_input, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)):
encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None)
if future_values is not None:
seasonal_input, trend_input = self.decomposition_layer(transformer_inputs[:, :self.config.context_length, ...])
mean = torch.mean(transformer_inputs[:, :self.config.context_length, ...], dim=1).unsqueeze(1).repeat(1, self.config.prediction_length, 1)
zeros = torch.zeros([transformer_inputs.shape[0], self.config.prediction_length, transformer_inputs.shape[2]], device=enc_input.device)
decoder_input = torch.cat((torch.cat((seasonal_input[:, -self.config.label_length:, ...], zeros), dim=1), temporal_features[:, self.config.context_length - self.config.label_length:, ...]), dim=-1)
trend_init = torch.cat((torch.cat((trend_input[:, -self.config.label_length:, ...], mean), dim=1), temporal_features[:, self.config.context_length - self.config.label_length:, ...]), dim=-1)
decoder_outputs = self.decoder(trend=trend_init, inputs_embeds=decoder_input, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position)
else:
decoder_outputs = AutoFormerDecoderOutput()
if not return_dict:
return decoder_outputs + encoder_outputs + (loc, scale, static_feat)
return AutoformerModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, trend=decoder_outputs.trend, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, loc=loc, scale=scale, static_features=static_feat)
|
@auto_docstring
class AutoformerModel(AutoformerPreTrainedModel):
def __init__(self, config: AutoformerConfig):
pass
@property
def _past_length(self) -> int:
pass
def get_lagged_subsequences(self, sequence: torch.Tensor, subsequences_length: int, shift: int=0) -> torch.Tensor:
'''
Returns lagged subsequences of a given sequence. Returns a tensor of shape (batch_size, subsequences_length,
feature_size, indices_length), containing lagged subsequences. Specifically, lagged[i, j, :, k] = sequence[i,
-indices[k]-subsequences_length+j, :].
Args:
sequence (`torch.Tensor` or shape `(batch_size, context_length,
feature_size)`): The sequence from which lagged subsequences should be extracted.
subsequences_length (`int`):
Length of the subsequences to be extracted.
shift (`int`, *optional* defaults to 0):
Shift the lags by this amount back in the time index.
'''
pass
def create_network_inputs(self, past_values: torch.Tensor, past_time_features: torch.Tensor, static_categorical_features: Optional[torch.Tensor]=None, static_real_features: Optional[torch.Tensor]=None, past_observed_mask: Optional[torch.Tensor]=None, future_values: Optional[torch.Tensor]=None, future_time_features: Optional[torch.Tensor]=None) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
'''
Creates the inputs for the network given the past and future values, time features, and static features.
Args:
past_values (`torch.Tensor`):
A tensor of shape `(batch_size, past_length, input_size)` containing the past values.
past_time_features (`torch.Tensor`):
A tensor of shape `(batch_size, past_length, num_features)` containing the past time features.
static_categorical_features (`Optional[torch.Tensor]`):
An optional tensor of shape `(batch_size, num_categorical_features)` containing the static categorical
features.
static_real_features (`Optional[torch.Tensor]`):
An optional tensor of shape `(batch_size, num_real_features)` containing the static real features.
past_observed_mask (`Optional[torch.Tensor]`):
An optional tensor of shape `(batch_size, past_length, input_size)` containing the mask of observed
values in the past.
future_values (`Optional[torch.Tensor]`):
An optional tensor of shape `(batch_size, future_length, input_size)` containing the future values.
Returns:
A tuple containing the following tensors:
- reshaped_lagged_sequence (`torch.Tensor`): A tensor of shape `(batch_size, sequence_length, num_lags *
input_size)` containing the lagged subsequences of the inputs.
- features (`torch.Tensor`): A tensor of shape `(batch_size, sequence_length, num_features)` containing the
concatenated static and time features.
- loc (`torch.Tensor`): A tensor of shape `(batch_size, input_size)` containing the mean of the input
values.
- scale (`torch.Tensor`): A tensor of shape `(batch_size, input_size)` containing the std of the input
values.
- static_feat (`torch.Tensor`): A tensor of shape `(batch_size, num_static_features)` containing the
concatenated static features.
'''
pass
def get_encoder(self):
pass
@auto_docstring
def forward(self, past_values: torch.Tensor, past_time_features: torch.Tensor, past_observed_mask: torch.Tensor, static_categorical_features: Optional[torch.Tensor]=None, static_real_features: Optional[torch.Tensor]=None, future_values: Optional[torch.Tensor]=None, future_time_features: Optional[torch.Tensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[list[torch.FloatTensor]]=None, past_key_values: Optional[Cache]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, use_cache: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[AutoformerModelOutput, tuple]:
'''
past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Past values of the time series, that serve as context in order to predict the future. These values may
contain lags, i.e. additional values from the past which are added in order to serve as "extra context".
The `past_values` is what the Transformer encoder gets as input (with optional additional features, such as
`static_categorical_features`, `static_real_features`, `past_time_features`).
The sequence length here is equal to `context_length` + `max(config.lags_sequence)`.
Missing values need to be replaced with zeros.
past_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`, *optional*):
Optional time features, which the model internally will add to `past_values`. These could be things like
"month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These
could also be so-called "age" features, which basically help the model know "at which point in life" a
time-series is. Age features have small values for distant past time steps and increase monotonically the
more we approach the current time step.
These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where
the position encodings are learned from scratch internally as parameters of the model, the Time Series
Transformer requires to provide additional time features.
The Autoformer only learns additional embeddings for `static_categorical_features`.
past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in
`[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
static_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*):
Optional static categorical features for which the model will learn an embedding, which it will add to the
values of the time series.
Static categorical features are features which have the same value for all time steps (static over time).
A typical example of a static categorical feature is a time series ID.
static_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*):
Optional static real features which the model will add to the values of the time series.
Static real features are features which have the same value for all time steps (static over time).
A typical example of a static real feature is promotion information.
future_values (`torch.FloatTensor` of shape `(batch_size, prediction_length)`):
Future values of the time series, that serve as labels for the model. The `future_values` is what the
Transformer needs to learn to output, given the `past_values`.
See the demo notebook and code snippets for details.
Missing values need to be replaced with zeros.
future_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`, *optional*):
Optional time features, which the model internally will add to `future_values`. These could be things like
"month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These
could also be so-called "age" features, which basically help the model know "at which point in life" a
time-series is. Age features have small values for distant past time steps and increase monotonically the
more we approach the current time step.
These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where
the position encodings are learned from scratch internally as parameters of the model, the Time Series
Transformer requires to provide additional features.
The Autoformer only learns additional embeddings for `static_categorical_features`.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
Tuple consists of `last_hidden_state`, `hidden_states` (*optional*) and `attentions` (*optional*)
`last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` (*optional*) is a sequence of
hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
Examples:
```python
>>> from huggingface_hub import hf_hub_download
>>> import torch
>>> from transformers import AutoformerModel
>>> file = hf_hub_download(
... repo_id="hf-internal-testing/tourism-monthly-batch", filename="train-batch.pt", repo_type="dataset"
... )
>>> batch = torch.load(file)
>>> model = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly")
>>> # during training, one provides both past and future values
>>> # as well as possible additional features
>>> outputs = model(
... past_values=batch["past_values"],
... past_time_features=batch["past_time_features"],
... past_observed_mask=batch["past_observed_mask"],
... static_categorical_features=batch["static_categorical_features"],
... future_values=batch["future_values"],
... future_time_features=batch["future_time_features"],
... )
>>> last_hidden_state = outputs.last_hidden_state
```'''
pass
| 10
| 3
| 46
| 5
| 29
| 12
| 5
| 0.39
| 1
| 16
| 11
| 0
| 7
| 5
| 7
| 8
| 330
| 39
| 210
| 74
| 169
| 81
| 76
| 42
| 68
| 11
| 2
| 1
| 32
|
679
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/autoformer/modeling_autoformer.py
|
transformers.models.autoformer.modeling_autoformer.AutoformerModelOutput
|
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Optional, Union
from ...utils import auto_docstring, is_torch_flex_attn_available, logging
from ...modeling_outputs import BaseModelOutput, ModelOutput, SampleTSPredictionOutput, Seq2SeqTSPredictionOutput
from dataclasses import dataclass
import torch
@dataclass
@auto_docstring(custom_intro='\n Autoformer model output that contains the additional trend output.\n ')
class AutoformerModelOutput(ModelOutput):
"""
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the decoder of the model.
If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
hidden_size)` is output.
trend (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Trend tensor for each time series.
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
loc (`torch.FloatTensor` of shape `(batch_size,)` or `(batch_size, input_size)`, *optional*):
Shift values of each time series' context window which is used to give the model inputs of the same
magnitude and then used to shift back to the original magnitude.
scale (`torch.FloatTensor` of shape `(batch_size,)` or `(batch_size, input_size)`, *optional*):
Scaling values of each time series' context window which is used to give the model inputs of the same
magnitude and then used to rescale back to the original magnitude.
static_features: (`torch.FloatTensor` of shape `(batch_size, feature size)`, *optional*):
Static features of each time series' in a batch which are copied to the covariates at inference time.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
trend: Optional[torch.FloatTensor] = None
past_key_values: Optional[Cache] = None
decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[tuple[torch.FloatTensor]] = None
cross_attentions: Optional[tuple[torch.FloatTensor]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[tuple[torch.FloatTensor]] = None
loc: Optional[torch.FloatTensor] = None
scale: Optional[torch.FloatTensor] = None
static_features: Optional[torch.FloatTensor] = None
|
@dataclass
@auto_docstring(custom_intro='\n Autoformer model output that contains the additional trend output.\n ')
class AutoformerModelOutput(ModelOutput):
'''
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the decoder of the model.
If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
hidden_size)` is output.
trend (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Trend tensor for each time series.
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
loc (`torch.FloatTensor` of shape `(batch_size,)` or `(batch_size, input_size)`, *optional*):
Shift values of each time series' context window which is used to give the model inputs of the same
magnitude and then used to shift back to the original magnitude.
scale (`torch.FloatTensor` of shape `(batch_size,)` or `(batch_size, input_size)`, *optional*):
Scaling values of each time series' context window which is used to give the model inputs of the same
magnitude and then used to rescale back to the original magnitude.
static_features: (`torch.FloatTensor` of shape `(batch_size, feature size)`, *optional*):
Static features of each time series' in a batch which are copied to the covariates at inference time.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.77
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 71
| 9
| 13
| 13
| 12
| 49
| 13
| 13
| 12
| 0
| 1
| 0
| 0
|
680
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/autoformer/modeling_autoformer.py
|
transformers.models.autoformer.modeling_autoformer.AutoformerNOPScaler
|
import torch
from typing import Optional, Union
from .configuration_autoformer import AutoformerConfig
from torch import nn
class AutoformerNOPScaler(nn.Module):
"""
Assigns a scaling factor equal to 1 along the first dimension, and therefore applies no scaling to the input data.
"""
def __init__(self, config: AutoformerConfig):
super().__init__()
self.dim = config.scaling_dim if hasattr(config, 'scaling_dim') else 1
self.keepdim = config.keepdim if hasattr(config, 'keepdim') else True
def forward(self, data: torch.Tensor, observed_indicator: Optional[torch.Tensor]=None) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Parameters:
data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
input for Batch norm calculation
Returns:
tuple of `torch.Tensor` of shapes
(`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
`(batch_size, 1, num_input_channels)`)
"""
scale = torch.ones_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim)
loc = torch.zeros_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim)
return (data, loc, scale)
|
class AutoformerNOPScaler(nn.Module):
'''
Assigns a scaling factor equal to 1 along the first dimension, and therefore applies no scaling to the input data.
'''
def __init__(self, config: AutoformerConfig):
pass
def forward(self, data: torch.Tensor, observed_indicator: Optional[torch.Tensor]=None) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
'''
Parameters:
data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
input for Batch norm calculation
Returns:
tuple of `torch.Tensor` of shapes
(`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
`(batch_size, 1, num_input_channels)`)
'''
pass
| 3
| 2
| 10
| 0
| 5
| 5
| 2
| 1.09
| 1
| 3
| 1
| 0
| 2
| 2
| 2
| 12
| 25
| 2
| 11
| 9
| 6
| 12
| 9
| 7
| 6
| 3
| 1
| 0
| 4
|
681
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/autoformer/modeling_autoformer.py
|
transformers.models.autoformer.modeling_autoformer.AutoformerPreTrainedModel
|
from .configuration_autoformer import AutoformerConfig
from typing import Optional, Union
from ...utils import auto_docstring, is_torch_flex_attn_available, logging
from torch import nn
import torch
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa
from ...modeling_utils import PreTrainedModel
@auto_docstring
class AutoformerPreTrainedModel(PreTrainedModel):
config: AutoformerConfig
base_model_prefix = 'model'
main_input_name = 'past_values'
supports_gradient_checkpointing = True
def _init_weights(self, module: nn.Module):
std = self.config.init_std
if isinstance(module, (nn.Linear, nn.Conv1d)):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, AutoformerSinusoidalPositionalEmbedding):
module._init_weight()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.weight.data.fill_(1.0)
module.bias.data.zero_()
def _update_full_mask(self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor):
if attention_mask is not None:
if 'flash' in self.config._attn_implementation:
attention_mask = attention_mask if 0 in attention_mask else None
elif self.config._attn_implementation == 'sdpa':
attention_mask = _prepare_4d_attention_mask_for_sdpa(attention_mask, inputs_embeds.dtype)
elif self.config._attn_implementation == 'flex_attention':
if isinstance(attention_mask, torch.Tensor):
attention_mask = make_flex_block_causal_mask(attention_mask, is_causal=False)
else:
attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
return attention_mask
|
@auto_docstring
class AutoformerPreTrainedModel(PreTrainedModel):
def _init_weights(self, module: nn.Module):
pass
def _update_full_mask(self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor):
pass
| 4
| 0
| 12
| 0
| 12
| 0
| 6
| 0
| 1
| 1
| 1
| 4
| 1
| 0
| 1
| 1
| 18
| 1
| 17
| 7
| 15
| 0
| 15
| 7
| 13
| 6
| 1
| 2
| 6
|
682
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/autoformer/modeling_autoformer.py
|
transformers.models.autoformer.modeling_autoformer.AutoformerSeriesDecompositionLayer
|
from .configuration_autoformer import AutoformerConfig
from torch import nn
import torch
class AutoformerSeriesDecompositionLayer(nn.Module):
"""
Returns the trend and the seasonal parts of the time series. Calculated as:
x_trend = AvgPool(Padding(X)) and x_seasonal = X - x_trend
"""
def __init__(self, config: AutoformerConfig):
super().__init__()
self.kernel_size = config.moving_average
self.avg = nn.AvgPool1d(kernel_size=self.kernel_size, stride=1, padding=0)
def forward(self, x):
"""Input shape: Batch x Time x EMBED_DIM"""
num_of_pads = (self.kernel_size - 1) // 2
front = x[:, 0:1, :].repeat(1, num_of_pads, 1)
end = x[:, -1:, :].repeat(1, num_of_pads, 1)
x_padded = torch.cat([front, x, end], dim=1)
x_trend = self.avg(x_padded.permute(0, 2, 1)).permute(0, 2, 1)
x_seasonal = x - x_trend
return (x_seasonal, x_trend)
|
class AutoformerSeriesDecompositionLayer(nn.Module):
'''
Returns the trend and the seasonal parts of the time series. Calculated as:
x_trend = AvgPool(Padding(X)) and x_seasonal = X - x_trend
'''
def __init__(self, config: AutoformerConfig):
pass
def forward(self, x):
'''Input shape: Batch x Time x EMBED_DIM'''
pass
| 3
| 2
| 8
| 1
| 6
| 2
| 1
| 0.54
| 1
| 2
| 1
| 0
| 2
| 2
| 2
| 12
| 24
| 4
| 13
| 11
| 10
| 7
| 13
| 11
| 10
| 1
| 1
| 0
| 2
|
683
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/autoformer/modeling_autoformer.py
|
transformers.models.autoformer.modeling_autoformer.AutoformerSinusoidalPositionalEmbedding
|
from typing import Optional, Union
import torch
from torch import nn
import numpy as np
class AutoformerSinusoidalPositionalEmbedding(nn.Embedding):
"""This module produces sinusoidal positional embeddings of any length."""
def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int]=None) -> None:
super().__init__(num_positions, embedding_dim)
def _init_weight(self):
"""
Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in
the 2nd half of the vector. [dim // 2:]
"""
n_pos, dim = self.weight.shape
position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)])
out = torch.empty(n_pos, dim, dtype=self.weight.dtype, requires_grad=False)
sentinel = dim // 2 if dim % 2 == 0 else dim // 2 + 1
out[:, 0:sentinel] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
out[:, sentinel:] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
self.weight = nn.Parameter(out, requires_grad=False)
@torch.no_grad()
def forward(self, input_ids_shape: torch.Size, past_key_values_length: int=0, position_ids: Optional[torch.Tensor]=None) -> torch.Tensor:
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
if position_ids is None:
bsz, seq_len = input_ids_shape[:2]
position_ids = torch.arange(past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device)
return super().forward(position_ids)
|
class AutoformerSinusoidalPositionalEmbedding(nn.Embedding):
'''This module produces sinusoidal positional embeddings of any length.'''
def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int]=None) -> None:
pass
def _init_weight(self):
'''
Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in
the 2nd half of the vector. [dim // 2:]
'''
pass
@torch.no_grad()
def forward(self, input_ids_shape: torch.Size, past_key_values_length: int=0, position_ids: Optional[torch.Tensor]=None) -> torch.Tensor:
'''`input_ids_shape` is expected to be [bsz x seqlen].'''
pass
| 5
| 3
| 8
| 0
| 7
| 2
| 1
| 0.3
| 1
| 4
| 0
| 0
| 2
| 1
| 3
| 3
| 32
| 3
| 23
| 12
| 17
| 7
| 17
| 10
| 13
| 2
| 1
| 0
| 4
|
684
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/autoformer/modeling_autoformer.py
|
transformers.models.autoformer.modeling_autoformer.AutoformerStdScaler
|
from torch import nn
from .configuration_autoformer import AutoformerConfig
import torch
class AutoformerStdScaler(nn.Module):
"""
Standardize features by calculating the mean and scaling along the first dimension, and then normalizes it by
subtracting from the mean and dividing by the standard deviation.
"""
def __init__(self, config: AutoformerConfig):
super().__init__()
self.dim = config.scaling_dim if hasattr(config, 'scaling_dim') else 1
self.keepdim = config.keepdim if hasattr(config, 'keepdim') else True
self.minimum_scale = config.minimum_scale if hasattr(config, 'minimum_scale') else 1e-05
def forward(self, data: torch.Tensor, observed_indicator: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Parameters:
data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
input for Batch norm calculation
observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`):
Calculating the scale on the observed indicator.
Returns:
tuple of `torch.Tensor` of shapes
(`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
`(batch_size, 1, num_input_channels)`)
"""
denominator = observed_indicator.sum(self.dim, keepdim=self.keepdim)
denominator = denominator.clamp_min(1.0)
loc = (data * observed_indicator).sum(self.dim, keepdim=self.keepdim) / denominator
variance = (((data - loc) * observed_indicator) ** 2).sum(self.dim, keepdim=self.keepdim) / denominator
scale = torch.sqrt(variance + self.minimum_scale)
return ((data - loc) / scale, loc, scale)
|
class AutoformerStdScaler(nn.Module):
'''
Standardize features by calculating the mean and scaling along the first dimension, and then normalizes it by
subtracting from the mean and dividing by the standard deviation.
'''
def __init__(self, config: AutoformerConfig):
pass
def forward(self, data: torch.Tensor, observed_indicator: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
'''
Parameters:
data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
input for Batch norm calculation
observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`):
Calculating the scale on the observed indicator.
Returns:
tuple of `torch.Tensor` of shapes
(`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
`(batch_size, 1, num_input_channels)`)
'''
pass
| 3
| 2
| 13
| 1
| 7
| 6
| 3
| 1
| 1
| 3
| 1
| 0
| 2
| 3
| 2
| 12
| 33
| 3
| 15
| 12
| 10
| 15
| 13
| 10
| 10
| 4
| 1
| 0
| 5
|
685
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/autoformer/modeling_autoformer.py
|
transformers.models.autoformer.modeling_autoformer.AutoformerValueEmbedding
|
from torch import nn
class AutoformerValueEmbedding(nn.Module):
def __init__(self, feature_size, d_model):
super().__init__()
self.value_projection = nn.Linear(in_features=feature_size, out_features=d_model, bias=False)
def forward(self, x):
return self.value_projection(x)
|
class AutoformerValueEmbedding(nn.Module):
def __init__(self, feature_size, d_model):
pass
def forward(self, x):
pass
| 3
| 0
| 3
| 0
| 3
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 2
| 1
| 2
| 12
| 7
| 1
| 6
| 4
| 3
| 0
| 6
| 4
| 3
| 1
| 1
| 0
| 2
|
686
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bamba/configuration_bamba.py
|
transformers.models.bamba.configuration_bamba.BambaConfig
|
from ...configuration_utils import PretrainedConfig
class BambaConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`BambaModel`]. It is used to instantiate a
BambaModel model according to the specified arguments, defining the model architecture. Instantiating a configuration
with defaults taken from [ibm-fms/Bamba-9.8b-2.2T-hf](https://huggingface.co/ibm-fms/Bamba-9.8b-2.2T-hf).
The BambaModel is a hybrid [mamba2](https://github.com/state-spaces/mamba) architecture with SwiGLU.
The checkpoints are jointly trained by IBM, Princeton, and UIUC.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 128000):
Vocabulary size of the Bamba model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`BambaModel`]
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether the model's input and output word embeddings should be tied. Note that this is only relevant if the
model has an output word embedding layer.
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 14336):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer encoder.
num_key_value_heads (`int`, *optional*, defaults to 8):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `8`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
num_logits_to_keep (`int` or `None`, *optional*, defaults to 1):
Number of prompt logits to calculate during generation. If `None`, all logits will be calculated. If an
integer value, only last `num_logits_to_keep` logits will be calculated. Default is 1 because only the
logits of the last prompt token are needed for generation. For long sequences, the logits for the entire
sequence may use a lot of memory so, setting `num_logits_to_keep=1` will reduce memory footprint
significantly.
pad_token_id (`int`, *optional*, defaults to 0):
The id of the padding token.
bos_token_id (`int`, *optional*, defaults to 1):
The id of the "beginning-of-sequence" token.
eos_token_id (`int`, *optional*, defaults to 2):
The id of the "end-of-sequence" token.
max_position_embeddings (`int`, *optional*, defaults to 262144):
Max cached sequence length for the model
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
attn_layer_indices (`list`, *optional*):
Specifies the layer indices that will have full attention. Must contain values at most num_hidden_layers.
mamba_n_heads (`int`, *optional*, defaults to 128):
The number of mamba heads used in the v2 implementation.
mamba_d_head (`int`, *optional*, defaults to `"auto"`):
Head embedding dimension size
mamba_n_groups (`int`, *optional*, defaults to 1):
The number of the mamba groups used in the v2 implementation.
mamba_d_state (`int`, *optional*, defaults to 256):
The dimension the mamba state space latents
mamba_d_conv (`int`, *optional*, defaults to 4):
The size of the mamba convolution kernel
mamba_expand (`int`, *optional*, defaults to 2):
Expanding factor (relative to hidden_size) used to determine the mamba intermediate size
mamba_chunk_size (`int`, *optional*, defaults to 256):
The chunks in which to break the sequence when doing prefill/training
mamba_conv_bias (`bool`, *optional*, defaults to `True`):
Flag indicating whether or not to use bias in the convolution layer of the mamba mixer block.
mamba_proj_bias (`bool`, *optional*, defaults to `False`):
Flag indicating whether or not to use bias in the input and output projections (["in_proj", "out_proj"]) of the mamba mixer block
z_loss_coefficient (`float`, *optional*, defaults to 0.0):
Coefficient for auxiliary z-loss used to control logit growth during training
"""
model_type = 'bamba'
keys_to_ignore_at_inference = ['past_key_values']
def __init__(self, vocab_size=128000, tie_word_embeddings=False, hidden_size=4096, intermediate_size=14336, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=8, hidden_act='silu', initializer_range=0.02, rms_norm_eps=1e-05, use_cache=True, num_logits_to_keep=1, pad_token_id=0, bos_token_id=1, eos_token_id=2, max_position_embeddings=262144, attention_dropout=0.0, attn_layer_indices=None, mamba_n_heads=128, mamba_d_head='auto', mamba_n_groups=1, mamba_d_state=256, mamba_d_conv=4, mamba_expand=2, mamba_chunk_size=256, mamba_conv_bias=True, mamba_proj_bias=False, z_loss_coefficient=0.0, **kwargs):
self.vocab_size = vocab_size
self.tie_word_embeddings = tie_word_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.max_position_embeddings = max_position_embeddings
self.attention_dropout = attention_dropout
self.attention_bias = False
self.mlp_bias = False
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.num_logits_to_keep = num_logits_to_keep
self.attn_layer_indices = attn_layer_indices
self.rope_theta = 10000.0
self.rope_scaling = None
self.partial_rotary_factor = 0.5
mamba_intermediate = mamba_expand * hidden_size
if mamba_intermediate % mamba_n_heads != 0:
raise ValueError('mamba_n_heads must divide mamba_expand * hidden_size')
if mamba_d_head == 'auto':
mamba_d_head = mamba_intermediate // mamba_n_heads
if mamba_d_head * mamba_n_heads != mamba_intermediate:
raise ValueError('The dimensions for the Mamba head state do not match the model intermediate_size')
self.mamba_n_heads = mamba_n_heads
self.mamba_d_head = mamba_d_head
self.mamba_n_groups = mamba_n_groups
self.mamba_d_state = mamba_d_state
self.mamba_d_conv = mamba_d_conv
self.mamba_expand = mamba_expand
self.mamba_chunk_size = mamba_chunk_size
self.mamba_conv_bias = mamba_conv_bias
self.mamba_proj_bias = mamba_proj_bias
self.z_loss_coefficient = z_loss_coefficient
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs)
@property
def layers_block_type(self):
return ['attention' if self.attn_layer_indices and i in self.attn_layer_indices else 'mamba' for i in range(self.num_hidden_layers)]
|
class BambaConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`BambaModel`]. It is used to instantiate a
BambaModel model according to the specified arguments, defining the model architecture. Instantiating a configuration
with defaults taken from [ibm-fms/Bamba-9.8b-2.2T-hf](https://huggingface.co/ibm-fms/Bamba-9.8b-2.2T-hf).
The BambaModel is a hybrid [mamba2](https://github.com/state-spaces/mamba) architecture with SwiGLU.
The checkpoints are jointly trained by IBM, Princeton, and UIUC.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 128000):
Vocabulary size of the Bamba model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`BambaModel`]
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether the model's input and output word embeddings should be tied. Note that this is only relevant if the
model has an output word embedding layer.
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 14336):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer encoder.
num_key_value_heads (`int`, *optional*, defaults to 8):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `8`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
num_logits_to_keep (`int` or `None`, *optional*, defaults to 1):
Number of prompt logits to calculate during generation. If `None`, all logits will be calculated. If an
integer value, only last `num_logits_to_keep` logits will be calculated. Default is 1 because only the
logits of the last prompt token are needed for generation. For long sequences, the logits for the entire
sequence may use a lot of memory so, setting `num_logits_to_keep=1` will reduce memory footprint
significantly.
pad_token_id (`int`, *optional*, defaults to 0):
The id of the padding token.
bos_token_id (`int`, *optional*, defaults to 1):
The id of the "beginning-of-sequence" token.
eos_token_id (`int`, *optional*, defaults to 2):
The id of the "end-of-sequence" token.
max_position_embeddings (`int`, *optional*, defaults to 262144):
Max cached sequence length for the model
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
attn_layer_indices (`list`, *optional*):
Specifies the layer indices that will have full attention. Must contain values at most num_hidden_layers.
mamba_n_heads (`int`, *optional*, defaults to 128):
The number of mamba heads used in the v2 implementation.
mamba_d_head (`int`, *optional*, defaults to `"auto"`):
Head embedding dimension size
mamba_n_groups (`int`, *optional*, defaults to 1):
The number of the mamba groups used in the v2 implementation.
mamba_d_state (`int`, *optional*, defaults to 256):
The dimension the mamba state space latents
mamba_d_conv (`int`, *optional*, defaults to 4):
The size of the mamba convolution kernel
mamba_expand (`int`, *optional*, defaults to 2):
Expanding factor (relative to hidden_size) used to determine the mamba intermediate size
mamba_chunk_size (`int`, *optional*, defaults to 256):
The chunks in which to break the sequence when doing prefill/training
mamba_conv_bias (`bool`, *optional*, defaults to `True`):
Flag indicating whether or not to use bias in the convolution layer of the mamba mixer block.
mamba_proj_bias (`bool`, *optional*, defaults to `False`):
Flag indicating whether or not to use bias in the input and output projections (["in_proj", "out_proj"]) of the mamba mixer block
z_loss_coefficient (`float`, *optional*, defaults to 0.0):
Coefficient for auxiliary z-loss used to control logit growth during training
'''
def __init__(self, vocab_size=128000, tie_word_embeddings=False, hidden_size=4096, intermediate_size=14336, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=8, hidden_act='silu', initializer_range=0.02, rms_norm_eps=1e-05, use_cache=True, num_logits_to_keep=1, pad_token_id=0, bos_token_id=1, eos_token_id=2, max_position_embeddings=262144, attention_dropout=0.0, attn_layer_indices=None, mamba_n_heads=128, mamba_d_head='auto', mamba_n_groups=1, mamba_d_state=256, mamba_d_conv=4, mamba_expand=2, mamba_chunk_size=256, mamba_conv_bias=True, mamba_proj_bias=False, z_loss_coefficient=0.0, **kwargs):
pass
@property
def layers_block_type(self):
pass
| 4
| 1
| 47
| 5
| 41
| 1
| 4
| 0.92
| 1
| 3
| 0
| 0
| 2
| 29
| 2
| 2
| 180
| 17
| 85
| 66
| 51
| 78
| 45
| 35
| 42
| 5
| 1
| 1
| 7
|
687
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bamba/modeling_bamba.py
|
transformers.models.bamba.modeling_bamba.BambaAttention
|
from ...cache_utils import Cache
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
from ...utils.deprecation import deprecate_kwarg
from .configuration_bamba import BambaConfig
from typing import Any, Callable, Optional, TypedDict, Union
from ...processing_utils import Unpack
from torch import nn
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
import torch
class BambaAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: BambaConfig, layer_idx: int):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, 'head_dim', config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim ** (-0.5)
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias)
self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias)
self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias)
self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor, torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return (attn_output, attn_weights)
|
class BambaAttention(nn.Module):
'''Multi-headed attention from 'Attention Is All You Need' paper'''
def __init__(self, config: BambaConfig, layer_idx: int):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor, torch.Tensor]:
pass
| 4
| 1
| 35
| 4
| 31
| 1
| 3
| 0.03
| 1
| 6
| 3
| 0
| 2
| 11
| 2
| 12
| 74
| 9
| 63
| 31
| 52
| 2
| 34
| 23
| 31
| 5
| 1
| 2
| 6
|
688
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bamba/modeling_bamba.py
|
transformers.models.bamba.modeling_bamba.BambaDecoderLayer
|
from .configuration_bamba import BambaConfig
import torch
from ...processing_utils import Unpack
from typing import Any, Callable, Optional, TypedDict, Union
from ...modeling_layers import GradientCheckpointingLayer
from ...utils.deprecation import deprecate_kwarg
class BambaDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: BambaConfig, layer_idx: int, layer_type: str='mamba'):
super().__init__()
num_experts = 1
ffn_layer_class = BambaMLP if num_experts == 1 else None
self.feed_forward = ffn_layer_class(config)
self.input_layernorm = BambaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.pre_ff_layernorm = BambaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.layer_type = layer_type
if layer_type == 'mamba':
self.mamba = BambaMixer(config=config, layer_idx=layer_idx)
elif layer_type == 'attention':
self.self_attn = BambaAttention(config, layer_idx)
else:
raise ValueError('Invalid layer_type')
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[HybridMambaAttentionDynamicCache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs: Unpack[BambaFlashAttentionKwargs]) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, sequence_length)` where padding elements are indicated by 0.
past_key_values (`HybridMambaAttentionDynamicCache`, *optional*): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence.
position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
with `head_dim` being the embedding dimension of each attention head.
kwargs (`dict`, *optional*):
Arbitrary kwargs. Can be used to provide `BambaFlashAttentionKwargs` for
padding-free training and/or improve torch.compile performance.
"""
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
if self.layer_type == 'mamba':
hidden_states = self.mamba(hidden_states=hidden_states, cache_params=past_key_values, cache_position=cache_position, attention_mask=attention_mask, **kwargs)
self_attn_weights = None
elif self.layer_type == 'attention':
hidden_states, self_attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.pre_ff_layernorm(hidden_states)
hidden_states = self.feed_forward(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
return outputs
|
class BambaDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: BambaConfig, layer_idx: int, layer_type: str='mamba'):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[HybridMambaAttentionDynamicCache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs: Unpack[BambaFlashAttentionKwargs]) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
'''
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, sequence_length)` where padding elements are indicated by 0.
past_key_values (`HybridMambaAttentionDynamicCache`, *optional*): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence.
position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
with `head_dim` being the embedding dimension of each attention head.
kwargs (`dict`, *optional*):
Arbitrary kwargs. Can be used to provide `BambaFlashAttentionKwargs` for
padding-free training and/or improve torch.compile performance.
'''
pass
| 4
| 1
| 46
| 5
| 29
| 13
| 4
| 0.43
| 1
| 12
| 6
| 0
| 2
| 6
| 2
| 12
| 93
| 11
| 58
| 25
| 44
| 25
| 29
| 14
| 26
| 4
| 1
| 1
| 8
|
689
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bamba/modeling_bamba.py
|
transformers.models.bamba.modeling_bamba.BambaForCausalLM
|
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from typing import Any, Callable, Optional, TypedDict, Union
from torch import nn
import torch
from ...generation import GenerationMixin
@auto_docstring
class BambaForCausalLM(BambaPreTrainedModel, GenerationMixin):
_tied_weights_keys = ['lm_head.weight']
_tp_plan = {'lm_head': 'colwise_rep'}
_pp_plan = {'lm_head': (['hidden_states'], ['logits'])}
def __init__(self, config):
super().__init__(config)
self.model = BambaModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.z_loss_coefficient = config.z_loss_coefficient
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[HybridMambaAttentionDynamicCache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs) -> CausalLMOutputWithPast:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, BambaForCausalLM
>>> model = BambaForCausalLM.from_pretrained("...")
>>> tokenizer = AutoTokenizer.from_pretrained("...")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\\nI'm not conscious, but I can talk to you."
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
outputs: BaseModelOutputWithPast = self.model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, cache_position=cache_position, **kwargs)
hidden_states = outputs.last_hidden_state
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
if self.z_loss_coefficient > 0:
z_loss = logits.logsumexp(dim=-1).to(dtype=loss.dtype).pow(2).mean()
loss = loss + self.z_loss_coefficient * z_loss
return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, position_ids=None, use_cache=True, **kwargs):
empty_past_kv = past_key_values is None
if not empty_past_kv:
if inputs_embeds is not None or cache_position[-1] >= input_ids.shape[1]:
input_ids = input_ids[:, -cache_position.shape[0]:]
elif input_ids.shape[1] != cache_position.shape[0]:
input_ids = input_ids[:, cache_position]
else:
past_key_values = HybridMambaAttentionDynamicCache(self.config, input_ids.shape[0], self.dtype, device=self.device)
if attention_mask is not None and position_ids is None:
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
if not empty_past_kv:
position_ids = position_ids[:, -input_ids.shape[1]:]
if inputs_embeds is not None and empty_past_kv:
model_inputs = {'inputs_embeds': inputs_embeds}
else:
model_inputs = {'input_ids': input_ids.contiguous()}
model_inputs.update({'position_ids': position_ids, 'past_key_values': past_key_values, 'use_cache': use_cache, 'attention_mask': attention_mask, 'logits_to_keep': self.config.num_logits_to_keep, 'cache_position': cache_position})
for key, value in kwargs.items():
if key not in model_inputs:
model_inputs[key] = value
return model_inputs
|
@auto_docstring
class BambaForCausalLM(BambaPreTrainedModel, GenerationMixin):
def __init__(self, config):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[HybridMambaAttentionDynamicCache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs) -> CausalLMOutputWithPast:
'''
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, BambaForCausalLM
>>> model = BambaForCausalLM.from_pretrained("...")
>>> tokenizer = AutoTokenizer.from_pretrained("...")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```'''
pass
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, position_ids=None, use_cache=True, **kwargs):
pass
| 7
| 1
| 18
| 2
| 12
| 4
| 2
| 0.34
| 2
| 8
| 3
| 0
| 9
| 5
| 9
| 10
| 181
| 27
| 118
| 50
| 80
| 40
| 53
| 23
| 43
| 8
| 2
| 2
| 22
|
690
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bamba/modeling_bamba.py
|
transformers.models.bamba.modeling_bamba.BambaMLP
|
from torch import nn
from transformers.activations import ACT2FN
class BambaMLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
return down_proj
|
class BambaMLP(nn.Module):
def __init__(self, config):
pass
def forward(self, x):
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 2
| 7
| 2
| 12
| 14
| 1
| 13
| 11
| 10
| 0
| 13
| 11
| 10
| 1
| 1
| 0
| 2
|
691
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bamba/modeling_bamba.py
|
transformers.models.bamba.modeling_bamba.BambaMixer
|
from transformers.activations import ACT2FN
from torch import nn
import torch
from .configuration_bamba import BambaConfig
from typing import Any, Callable, Optional, TypedDict, Union
class BambaMixer(nn.Module):
"""
Compute ∆, A, B, C, and D the state space parameters and compute the `contextualized_states`.
A, D are input independent (see Mamba paper [1] Section 3.5.2 "Interpretation of A" for why A isn't selective)
∆, B, C are input-dependent (this is a key difference between Mamba and the linear time invariant S4,
and is why Mamba is called **selective** state spaces)
The are a few differences between this and Mamba2Mixer:
- The variable use_precomputed_states is slightly different due to the hybrid cache structure
- There's a few non-obvious bugs fixed with batching in the slow path that exist in main
- Some extra variables that our layer doesn't need have been removed
- We ported most of the refactors in https://github.com/huggingface/transformers/pull/35154, which is (as of Dec 18, 2024) unmerged
"""
def __init__(self, config: BambaConfig, layer_idx: int):
super().__init__()
self.num_heads = config.mamba_n_heads
self.hidden_size = config.hidden_size
self.ssm_state_size = config.mamba_d_state
self.conv_kernel_size = config.mamba_d_conv
self.intermediate_size = int(config.mamba_expand * self.hidden_size)
self.layer_idx = layer_idx
self.use_conv_bias = config.mamba_conv_bias
self.activation = config.hidden_act
self.act = ACT2FN[config.hidden_act]
self.use_bias = config.mamba_proj_bias
self.layer_norm_epsilon = config.rms_norm_eps
self.n_groups = config.mamba_n_groups
self.head_dim = config.mamba_d_head
self.chunk_size = config.mamba_chunk_size
self.time_step_limit = (0.0, float('inf'))
self.time_step_min = 0.001
self.time_step_max = 0.1
self.conv_dim = self.intermediate_size + 2 * self.n_groups * self.ssm_state_size
self.conv1d = nn.Conv1d(in_channels=self.conv_dim, out_channels=self.conv_dim, bias=config.mamba_conv_bias, kernel_size=self.conv_kernel_size, groups=self.conv_dim, padding=self.conv_kernel_size - 1)
projection_size = self.intermediate_size + self.conv_dim + self.num_heads
self.in_proj = nn.Linear(self.hidden_size, projection_size, bias=self.use_bias)
self.dt_bias = nn.Parameter(torch.ones(self.num_heads))
A = torch.arange(1, self.num_heads + 1)
self.A_log = nn.Parameter(torch.log(A))
self.norm = BambaRMSNormGated(self.intermediate_size, eps=self.layer_norm_epsilon)
self.D = nn.Parameter(torch.ones(self.num_heads))
self.out_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=self.use_bias)
if not is_fast_path_available:
logger.warning_once('The fast path is not available because on of `(selective_state_update, causal_conv1d_fn, causal_conv1d_update)` is None. Falling back to the naive implementation. To install follow https://github.com/state-spaces/mamba/#installation and https://github.com/Dao-AILab/causal-conv1d')
else:
logger.warning_once('The fast path for Bamba will be used when running the model on a GPU')
def cuda_kernels_forward(self, hidden_states: torch.Tensor, cache_params: Optional[HybridMambaAttentionDynamicCache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, seq_idx: Optional[torch.IntTensor]=None):
hidden_states = apply_mask_to_padding_states(hidden_states, attention_mask)
projected_states = self.in_proj(hidden_states)
batch_size, seq_len, _ = hidden_states.shape
groups_time_state_size = self.n_groups * self.ssm_state_size
use_precomputed_states = cache_params is not None and cache_params.has_previous_state and (seq_len == 1) and (cache_params.conv_states[self.layer_idx].shape[0] == cache_params.ssm_states[self.layer_idx].shape[0] == batch_size) and (cache_position is not None) and (cache_position[0] > 0)
if use_precomputed_states:
gate, hidden_states_B_C, dt = projected_states.squeeze(1).split([self.intermediate_size, self.conv_dim, self.num_heads], dim=-1)
hidden_states_B_C = causal_conv1d_update(hidden_states_B_C, cache_params.conv_states[self.layer_idx], self.conv1d.weight.squeeze(1), self.conv1d.bias, self.activation)
hidden_states, B, C = torch.split(hidden_states_B_C, [self.intermediate_size, groups_time_state_size, groups_time_state_size], dim=-1)
A = -torch.exp(self.A_log.float())
A = A[:, None, ...][:, :, None].expand(-1, self.head_dim, self.ssm_state_size).to(dtype=torch.float32)
dt = dt[:, :, None].expand(-1, -1, self.head_dim)
dt_bias = self.dt_bias[:, None, ...].expand(-1, self.head_dim)
D = self.D[:, None, ...].expand(-1, self.head_dim)
B = B.view(batch_size, self.n_groups, B.shape[1] // self.n_groups)
C = C.view(batch_size, self.n_groups, C.shape[1] // self.n_groups)
hidden_states_reshaped = hidden_states.view(batch_size, self.num_heads, self.head_dim)
hidden_states = selective_state_update(cache_params.ssm_states[self.layer_idx], hidden_states_reshaped, dt, A, B, C, D, z=None, dt_bias=dt_bias, dt_softplus=True)
hidden_states = hidden_states.view(batch_size, self.num_heads * self.head_dim)
hidden_states = self.norm(hidden_states, gate)
out = self.out_proj(hidden_states)[:, None, ...]
else:
A = -torch.exp(self.A_log.float())
dt_limit_kwargs = {} if self.time_step_limit == (0.0, float('inf')) else {'dt_limit': self.time_step_limit}
if self.training and cache_params is None:
out = mamba_split_conv1d_scan_combined(projected_states, self.conv1d.weight.squeeze(1), self.conv1d.bias, self.dt_bias, A, D=self.D, chunk_size=self.chunk_size, seq_idx=seq_idx, activation=self.activation, rmsnorm_weight=self.norm.weight, rmsnorm_eps=self.norm.variance_epsilon, outproj_weight=self.out_proj.weight, outproj_bias=self.out_proj.bias, headdim=self.head_dim, ngroups=self.n_groups, norm_before_gate=False, return_final_states=False, **dt_limit_kwargs)
else:
gate, hidden_states_B_C, dt = projected_states.split([self.intermediate_size, self.conv_dim, self.num_heads], dim=-1)
if cache_params is not None:
hidden_states_B_C_transposed = hidden_states_B_C.transpose(1, 2)
conv_states = nn.functional.pad(hidden_states_B_C_transposed, (self.conv_kernel_size - hidden_states_B_C_transposed.shape[-1], 0))
cache_params.conv_states[self.layer_idx].copy_(conv_states)
if self.activation not in ['silu', 'swish']:
hidden_states_B_C = self.act(self.conv1d(hidden_states_B_C.transpose(1, 2))[..., :seq_len].transpose(1, 2))
else:
hidden_states_B_C = causal_conv1d_fn(x=hidden_states_B_C.transpose(1, 2), weight=self.conv1d.weight.squeeze(1), bias=self.conv1d.bias, activation=self.activation, seq_idx=seq_idx).transpose(1, 2)
hidden_states_B_C = apply_mask_to_padding_states(hidden_states_B_C, attention_mask)
hidden_states, B, C = torch.split(hidden_states_B_C, [self.intermediate_size, groups_time_state_size, groups_time_state_size], dim=-1)
scan_output, ssm_state = mamba_chunk_scan_combined(hidden_states.view(batch_size, seq_len, -1, self.head_dim), dt, A, B.view(batch_size, seq_len, self.n_groups, -1), C.view(batch_size, seq_len, self.n_groups, -1), chunk_size=self.chunk_size, D=self.D, z=None, seq_idx=seq_idx, return_final_states=True, dt_bias=self.dt_bias, dt_softplus=True, **dt_limit_kwargs)
if ssm_state is not None and cache_params is not None:
cache_params.ssm_states[self.layer_idx].copy_(ssm_state)
scan_output = scan_output.view(batch_size, seq_len, -1)
scan_output = self.norm(scan_output, gate)
out = self.out_proj(scan_output)
return out
def torch_forward(self, input_states, cache_params: Optional[HybridMambaAttentionDynamicCache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None):
batch_size, seq_len, _ = input_states.shape
dtype = input_states.dtype
input_states = apply_mask_to_padding_states(input_states, attention_mask)
projected_states = self.in_proj(input_states)
gate, hidden_states_B_C, dt = projected_states.split([self.intermediate_size, self.conv_dim, self.num_heads], dim=-1)
use_precomputed_states = cache_params is not None and cache_params.has_previous_state and (seq_len == 1) and (cache_params.conv_states[self.layer_idx].shape[0] == cache_params.ssm_states[self.layer_idx].shape[0] == batch_size) and (cache_position is not None) and (cache_position[0] > 0)
if use_precomputed_states:
cache_params.conv_states[self.layer_idx] = cache_params.conv_states[self.layer_idx].roll(shifts=-1, dims=-1)
cache_params.conv_states[self.layer_idx][:, :, -1] = hidden_states_B_C[:, 0, :].to(cache_params.conv_states[self.layer_idx].device)
conv_states = cache_params.conv_states[self.layer_idx].to(device=self.conv1d.weight.device)
hidden_states_B_C = torch.sum(conv_states * self.conv1d.weight.squeeze(1), dim=-1)
if self.use_conv_bias:
hidden_states_B_C = hidden_states_B_C + self.conv1d.bias
hidden_states_B_C = self.act(hidden_states_B_C)
else:
if cache_params is not None:
hidden_states_B_C_transposed = hidden_states_B_C.transpose(1, 2)
conv_states = nn.functional.pad(hidden_states_B_C_transposed, (self.conv_kernel_size - hidden_states_B_C_transposed.shape[-1], 0))
cache_params.conv_states[self.layer_idx].copy_(conv_states)
hidden_states_B_C = self.act(self.conv1d(hidden_states_B_C.transpose(1, 2))[..., :seq_len].transpose(1, 2))
hidden_states_B_C = apply_mask_to_padding_states(hidden_states_B_C, attention_mask)
hidden_states, B, C = torch.split(hidden_states_B_C, [self.intermediate_size, self.n_groups * self.ssm_state_size, self.n_groups * self.ssm_state_size], dim=-1)
A = -torch.exp(self.A_log.float())
if use_precomputed_states:
cache_device = cache_params.ssm_states[self.layer_idx].device
dt = dt[:, 0, :][:, None, ...]
dt = dt.transpose(1, 2).expand(batch_size, dt.shape[-1], self.head_dim)
dt_bias = self.dt_bias[..., None].expand(self.dt_bias.shape[0], self.head_dim)
dt = torch.nn.functional.softplus(dt + dt_bias.to(dt.dtype))
dt = torch.clamp(dt, self.time_step_limit[0], self.time_step_limit[1])
A = A[..., None, None].expand(self.num_heads, self.head_dim, self.ssm_state_size).to(dtype=torch.float32)
dA = torch.exp(dt[..., None] * A).to(device=cache_device)
B = B.reshape(batch_size, self.n_groups, -1)[..., None, :]
B = B.expand(batch_size, self.n_groups, self.num_heads // self.n_groups, B.shape[-1]).contiguous()
B = B.reshape(batch_size, -1, B.shape[-1])
dB = dt[..., None] * B[..., None, :]
hidden_states = hidden_states.reshape(batch_size, -1, self.head_dim)
dBx = (dB * hidden_states[..., None]).to(device=cache_device)
cache_params.ssm_states[self.layer_idx].copy_(cache_params.ssm_states[self.layer_idx] * dA + dBx)
C = C.reshape(batch_size, self.n_groups, -1)[..., None, :]
C = C.expand(batch_size, self.n_groups, self.num_heads // self.n_groups, C.shape[-1]).contiguous()
C = C.reshape(batch_size, -1, C.shape[-1])
ssm_states = cache_params.ssm_states[self.layer_idx].to(device=C.device, dtype=C.dtype)
ssm_states_reshaped = ssm_states.view(batch_size * self.num_heads, self.head_dim, self.ssm_state_size)
C_reshaped = C.view(batch_size * self.num_heads, self.ssm_state_size, 1)
y = torch.bmm(ssm_states_reshaped, C_reshaped)
y = y.view(batch_size, self.num_heads, self.head_dim)
D = self.D[..., None].expand(self.D.shape[0], self.head_dim)
y = (y + hidden_states * D).to(y.dtype)
y = y.reshape(batch_size, -1)[:, None, ...]
else:
dt = nn.functional.softplus(dt + self.dt_bias)
dt = torch.clamp(dt, self.time_step_limit[0], self.time_step_limit[1])
hidden_states = hidden_states.reshape(batch_size, seq_len, -1, self.head_dim).float()
B = B.reshape(batch_size, seq_len, -1, self.ssm_state_size).float()
C = C.reshape(batch_size, seq_len, -1, self.ssm_state_size).float()
B = B.repeat_interleave(self.num_heads // self.n_groups, dim=2, output_size=self.num_heads)
C = C.repeat_interleave(self.num_heads // self.n_groups, dim=2, output_size=self.num_heads)
pad_size = (self.chunk_size - seq_len % self.chunk_size) % self.chunk_size
D_residual = self.D[..., None] * pad_tensor_by_size(hidden_states, pad_size)
hidden_states = hidden_states * dt[..., None]
A = A.to(hidden_states.dtype) * dt
hidden_states, A, B, C = [reshape_into_chunks(t, pad_size, self.chunk_size) for t in (hidden_states, A, B, C)]
A = A.permute(0, 3, 1, 2)
A_cumsum = torch.cumsum(A, dim=-1)
L = torch.exp(segment_sum(A))
G_intermediate = C[:, :, :, None, :, :] * B[:, :, None, :, :, :]
G = G_intermediate.sum(dim=-1)
M_intermediate = G[..., None] * L.permute(0, 2, 3, 4, 1)[..., None]
M = M_intermediate.sum(dim=-1)
Y_diag = (M[..., None] * hidden_states[:, :, None]).sum(dim=3)
decay_states = torch.exp(A_cumsum[:, :, :, -1:] - A_cumsum)
B_decay = B * decay_states.permute(0, -2, -1, 1)[..., None]
states = (B_decay[..., None, :] * hidden_states[..., None]).sum(dim=2)
if use_precomputed_states:
previous_states = cache_params.ssm_states[self.layer_idx][:, None, ...].to(device=states.device)
else:
previous_states = torch.zeros_like(states[:, :1])
states = torch.cat([previous_states, states], dim=1)
decay_chunk = torch.exp(segment_sum(nn.functional.pad(A_cumsum[:, :, :, -1], (1, 0))))
decay_chunk = decay_chunk.transpose(1, 3)
new_states = (decay_chunk[..., None, None] * states[:, :, None, ...]).sum(dim=1)
states, ssm_state = (new_states[:, :-1], new_states[:, -1])
state_decay_out = torch.exp(A_cumsum)
C_times_states = C[..., None, :] * states[:, :, None, ...]
state_decay_out_permuted = state_decay_out.permute(0, 2, 3, 1)
Y_off = C_times_states.sum(-1) * state_decay_out_permuted[..., None]
y = Y_diag + Y_off
y = y.reshape(batch_size, -1, self.num_heads, self.head_dim)
y = y + D_residual
if pad_size > 0:
y = y[:, :seq_len, :, :]
y = y.reshape(batch_size, seq_len, -1)
if ssm_state is not None and cache_params is not None:
cache_params.ssm_states[self.layer_idx].copy_(ssm_state)
scan_output = self.norm(y, gate)
contextualized_states = self.out_proj(scan_output.to(dtype))
return contextualized_states
def forward(self, hidden_states, cache_params: Optional[HybridMambaAttentionDynamicCache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, seq_idx: Optional[torch.IntTensor]=None, **kwargs):
if is_fast_path_available and 'cuda' in self.in_proj.weight.device.type:
return self.cuda_kernels_forward(hidden_states, cache_params, cache_position, attention_mask, seq_idx)
if seq_idx is not None:
raise NotImplementedError('`seq_idx` support requires fast path support. Please install `mamba_ssm` and `causal_conv1d`')
dtype = hidden_states.dtype
if attention_mask is not None and attention_mask.shape[1] > 1 and (attention_mask.shape[0] > 1):
hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype)
return self.torch_forward(hidden_states, cache_params, cache_position, attention_mask)
|
class BambaMixer(nn.Module):
'''
Compute ∆, A, B, C, and D the state space parameters and compute the `contextualized_states`.
A, D are input independent (see Mamba paper [1] Section 3.5.2 "Interpretation of A" for why A isn't selective)
∆, B, C are input-dependent (this is a key difference between Mamba and the linear time invariant S4,
and is why Mamba is called **selective** state spaces)
The are a few differences between this and Mamba2Mixer:
- The variable use_precomputed_states is slightly different due to the hybrid cache structure
- There's a few non-obvious bugs fixed with batching in the slow path that exist in main
- Some extra variables that our layer doesn't need have been removed
- We ported most of the refactors in https://github.com/huggingface/transformers/pull/35154, which is (as of Dec 18, 2024) unmerged
'''
def __init__(self, config: BambaConfig, layer_idx: int):
pass
def cuda_kernels_forward(self, hidden_states: torch.Tensor, cache_params: Optional[HybridMambaAttentionDynamicCache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, seq_idx: Optional[torch.IntTensor]=None):
pass
def torch_forward(self, input_states, cache_params: Optional[HybridMambaAttentionDynamicCache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None):
pass
def forward(self, hidden_states, cache_params: Optional[HybridMambaAttentionDynamicCache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, seq_idx: Optional[torch.IntTensor]=None, **kwargs):
pass
| 5
| 1
| 113
| 15
| 81
| 20
| 5
| 0.29
| 1
| 7
| 3
| 0
| 4
| 25
| 4
| 14
| 471
| 65
| 323
| 107
| 300
| 94
| 181
| 89
| 176
| 8
| 1
| 3
| 20
|
692
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bamba/modeling_bamba.py
|
transformers.models.bamba.modeling_bamba.BambaModel
|
import torch
from ...modeling_attn_mask_utils import AttentionMaskConverter
from torch import nn
from typing import Any, Callable, Optional, TypedDict, Union
from ...processing_utils import Unpack
from .configuration_bamba import BambaConfig
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
@auto_docstring
class BambaModel(BambaPreTrainedModel):
def __init__(self, config: BambaConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
decoder_layers = []
for i in range(config.num_hidden_layers):
decoder_layers.append(BambaDecoderLayer(config, layer_idx=i, layer_type=config.layers_block_type[i]))
self.layers = nn.ModuleList(decoder_layers)
self._attn_implementation = config._attn_implementation
self.final_layernorm = BambaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = BambaRotaryEmbedding(config=config)
self.gradient_checkpointing = False
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[HybridMambaAttentionDynamicCache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[BambaFlashAttentionKwargs]) -> BaseModelOutputWithPast:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
use_cache = use_cache if use_cache is not None else self.config.use_cache
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError('You must specify exactly one of input_ids or inputs_embeds')
if self.gradient_checkpointing and self.training and use_cache:
logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.')
use_cache = False
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
hidden_states = inputs_embeds
if use_cache and past_key_values is None:
logger.warning_once('Bamba requires an initialized `HybridMambaAttentionDynamicCache` to return a cache. None was provided, so no cache will be returned.')
if cache_position is None:
cache_position = torch.arange(hidden_states.shape[1], device=hidden_states.device)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions)
mamba_mask = self._update_mamba_mask(attention_mask, cache_position)
position_embeddings = self.rotary_emb(hidden_states, position_ids)
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
for decoder_layer in self.layers:
layer_mask = mamba_mask if decoder_layer.layer_type == 'mamba' else causal_mask
if output_hidden_states:
all_hidden_states += (hidden_states,)
layer_outputs = decoder_layer(hidden_states, attention_mask=layer_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs)
hidden_states = layer_outputs[0]
if output_attentions:
if layer_outputs[1] is not None:
all_self_attns += (layer_outputs[1],)
hidden_states = self.final_layernorm(hidden_states)
if output_hidden_states:
all_hidden_states += (hidden_states,)
if past_key_values and (not past_key_values.has_previous_state):
past_key_values.has_previous_state = True
next_cache = None if not use_cache else past_key_values
return BaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns)
def _update_causal_mask(self, attention_mask: torch.Tensor, input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: HybridMambaAttentionDynamicCache, output_attentions: bool):
if self.config._attn_implementation == 'flash_attention_2':
if attention_mask is not None and 0.0 in attention_mask:
return attention_mask
return None
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
if self.config._attn_implementation == 'sdpa' and (not output_attentions):
if AttentionMaskConverter._ignore_causal_mask_sdpa(attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training):
return None
dtype = input_tensor.dtype
sequence_length = input_tensor.shape[1]
target_length = attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1
causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, cache_position=cache_position, batch_size=input_tensor.shape[0])
if self.config._attn_implementation == 'sdpa' and attention_mask is not None and (attention_mask.device.type in ['cuda', 'xpu', 'npu']) and (not output_attentions):
min_dtype = torch.finfo(dtype).min
causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
return causal_mask
@staticmethod
def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs):
"""
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
Args:
attention_mask (`torch.Tensor`):
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
`(batch_size, 1, query_length, key_value_length)`.
sequence_length (`int`):
The sequence length being processed.
target_length (`int`):
The target length: when generating with static cache, the mask should be as long as the static cache,
to account for the 0 padding, the part of the cache that is not filled yet.
dtype (`torch.dtype`):
The dtype to use for the 4D attention mask.
cache_position (`torch.Tensor`):
Indices depicting the position of the input sequence tokens in the sequence.
batch_size (`torch.Tensor`):
Batch size.
"""
if attention_mask is not None and attention_mask.dim() == 4:
causal_mask = attention_mask
else:
min_dtype = torch.finfo(dtype).min
causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device)
if sequence_length != 1:
causal_mask = torch.triu(causal_mask, diagonal=1)
causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1)
causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
if attention_mask is not None:
causal_mask = causal_mask.clone()
mask_length = attention_mask.shape[-1]
padding_attention_mask = (attention_mask[:, None, None, :] == attention_mask[:, None, :, None])[:, :, -sequence_length:, :].to(dtype)
padding_mask = causal_mask[:, :, :, :mask_length] + padding_attention_mask
padding_mask = padding_mask == 0
causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype)
return causal_mask
def _update_mamba_mask(self, attention_mask, cache_position):
"""
No need for zeroing states when
1. Cached forward
2. Attending to all inputs
"""
mamba_mask = attention_mask
if cache_position[0] > 0 or (attention_mask is not None and torch.all(attention_mask == 1)):
mamba_mask = None
return mamba_mask
|
@auto_docstring
class BambaModel(BambaPreTrainedModel):
def __init__(self, config: BambaConfig):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[HybridMambaAttentionDynamicCache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[BambaFlashAttentionKwargs]) -> BaseModelOutputWithPast:
pass
def _update_causal_mask(self, attention_mask: torch.Tensor, input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: HybridMambaAttentionDynamicCache, output_attentions: bool):
pass
@staticmethod
def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs):
'''
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
Args:
attention_mask (`torch.Tensor`):
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
`(batch_size, 1, query_length, key_value_length)`.
sequence_length (`int`):
The sequence length being processed.
target_length (`int`):
The target length: when generating with static cache, the mask should be as long as the static cache,
to account for the 0 padding, the part of the cache that is not filled yet.
dtype (`torch.dtype`):
The dtype to use for the 4D attention mask.
cache_position (`torch.Tensor`):
Indices depicting the position of the input sequence tokens in the sequence.
batch_size (`torch.Tensor`):
Batch size.
'''
pass
def _update_mamba_mask(self, attention_mask, cache_position):
'''
No need for zeroing states when
1. Cached forward
2. Attending to all inputs
'''
pass
| 10
| 2
| 38
| 4
| 28
| 6
| 6
| 0.24
| 1
| 14
| 7
| 0
| 6
| 8
| 7
| 8
| 279
| 38
| 196
| 71
| 157
| 47
| 102
| 40
| 94
| 23
| 2
| 3
| 41
|
693
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bamba/modeling_bamba.py
|
transformers.models.bamba.modeling_bamba.BambaPreTrainedModel
|
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
import torch
from .configuration_bamba import BambaConfig
@auto_docstring
class BambaPreTrainedModel(PreTrainedModel):
config: BambaConfig
base_model_prefix = 'model'
supports_gradient_checkpointing = True
_no_split_modules = ['BambaDecoderLayer']
_skip_keys_device_placement = 'past_key_values'
_supports_flash_attn = True
_supports_sdpa = True
_is_stateful = True
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, BambaMixer):
module.dt_bias.data.fill_(1.0)
module.A_log.data = torch.log(torch.arange(1, module.num_heads + 1))
module.D.data.fill_(1.0)
|
@auto_docstring
class BambaPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
pass
| 3
| 0
| 10
| 0
| 10
| 0
| 5
| 0.05
| 1
| 0
| 0
| 2
| 1
| 0
| 1
| 1
| 21
| 1
| 20
| 12
| 18
| 1
| 19
| 12
| 17
| 5
| 1
| 2
| 5
|
694
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bamba/modeling_bamba.py
|
transformers.models.bamba.modeling_bamba.BambaRMSNorm
|
from torch import nn
import torch
from ...integrations import use_kernel_forward_from_hub
@use_kernel_forward_from_hub('RMSNorm')
class BambaRMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-06):
"""
BambaRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f'{tuple(self.weight.shape)}, eps={self.variance_epsilon}'
|
@use_kernel_forward_from_hub('RMSNorm')
class BambaRMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-06):
'''
BambaRMSNorm is equivalent to T5LayerNorm
'''
pass
def forward(self, hidden_states):
pass
def extra_repr(self):
pass
| 5
| 1
| 5
| 0
| 4
| 1
| 1
| 0.23
| 1
| 2
| 0
| 0
| 3
| 2
| 3
| 13
| 18
| 2
| 13
| 8
| 9
| 3
| 13
| 8
| 9
| 1
| 1
| 0
| 3
|
695
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bamba/modeling_bamba.py
|
transformers.models.bamba.modeling_bamba.BambaRMSNormGated
|
from torch import nn
import torch
class BambaRMSNormGated(torch.nn.Module):
def __init__(self, hidden_size, eps=1e-06):
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states, gate=None):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
if gate is not None:
hidden_states = hidden_states * nn.functional.silu(gate.to(torch.float32))
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
|
class BambaRMSNormGated(torch.nn.Module):
def __init__(self, hidden_size, eps=1e-06):
pass
def forward(self, hidden_states, gate=None):
pass
| 3
| 0
| 7
| 1
| 6
| 0
| 2
| 0
| 1
| 1
| 0
| 0
| 2
| 2
| 2
| 12
| 16
| 3
| 13
| 7
| 10
| 0
| 13
| 7
| 10
| 2
| 1
| 1
| 3
|
696
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bamba/modeling_bamba.py
|
transformers.models.bamba.modeling_bamba.BambaRotaryEmbedding
|
from torch import nn
from .configuration_bamba import BambaConfig
import torch
from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
class BambaRotaryEmbedding(nn.Module):
inv_freq: torch.Tensor
def __init__(self, config: BambaConfig, device=None):
super().__init__()
if hasattr(config, 'rope_scaling') and isinstance(config.rope_scaling, dict):
self.rope_type = config.rope_scaling.get('rope_type', config.rope_scaling.get('type'))
else:
self.rope_type = 'default'
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
self.register_buffer('inv_freq', inv_freq, persistent=False)
self.original_inv_freq = self.inv_freq
@torch.no_grad()
@dynamic_rope_update
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != 'mps' else 'cpu'
with torch.autocast(device_type=device_type, enabled=False):
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return (cos.to(dtype=x.dtype), sin.to(dtype=x.dtype))
|
class BambaRotaryEmbedding(nn.Module):
def __init__(self, config: BambaConfig, device=None):
pass
@torch.no_grad()
@dynamic_rope_update
def forward(self, x, position_ids):
pass
| 5
| 0
| 18
| 2
| 13
| 5
| 3
| 0.35
| 1
| 4
| 1
| 0
| 3
| 7
| 3
| 13
| 59
| 8
| 40
| 21
| 35
| 14
| 38
| 20
| 34
| 3
| 1
| 1
| 8
|
697
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bamba/modeling_bamba.py
|
transformers.models.bamba.modeling_bamba.HybridMambaAttentionDynamicCache
|
from .configuration_bamba import BambaConfig
from typing import Any, Callable, Optional, TypedDict, Union
import torch
class HybridMambaAttentionDynamicCache:
"""
A dynamic cache that can handle both the attention cache (which has a seq_len dimension) and the mamba cache
(which has a constant shape regardless of seq_len).
This cache has two sets of lists of tensors: `key_cache` and `value_cache` for attention cache and `conv_states`
and `ssm_states` for mamba cache. Each of these lists has `num_layers` tensors. The expected shape for each tensor
For attention layers, `key_cache` and `value_cache` have a shape of `(batch_size, num_heads, seq_len, head_dim)`,
while `conv_states` and `ssm_states` have a shape of `(batch_size, 0)` (empty tensors).
For mamba layers, `key_cache` and `value_cache` have a shape of `(batch_size, 0)` (empty tensors),
while `conv_states` represents the convolution state and has a shape of `(batch_size, d_inner, d_conv)`,
and `ssm_states` represents the ssm state and has a shape of `(batch_size, d_inner, d_state)`.
"""
is_compileable = False
def __init__(self, config: BambaConfig, batch_size, dtype=torch.float16, device=None):
self.layers_block_type = config.layers_block_type
self.has_previous_state = False
conv_kernel_size = config.mamba_d_conv
ssm_state_size = config.mamba_d_state
self.conv_states = []
self.ssm_states = []
self.transformer_layers = []
for i in range(config.num_hidden_layers):
if self.layers_block_type[i] == 'mamba':
self.conv_states += [torch.zeros(batch_size, config.mamba_expand * config.hidden_size + 2 * config.mamba_n_groups * ssm_state_size, conv_kernel_size, device=device, dtype=dtype)]
self.ssm_states += [torch.zeros(batch_size, config.mamba_n_heads, config.mamba_d_head, ssm_state_size, device=device, dtype=dtype)]
else:
self.conv_states += [torch.tensor([[]] * batch_size, device=device)]
self.ssm_states += [torch.tensor([[]] * batch_size, device=device)]
self.transformer_layers.append(i)
self.key_cache = [torch.tensor([[]] * batch_size, device=device) for _ in range(config.num_hidden_layers)]
self.value_cache = [torch.tensor([[]] * batch_size, device=device) for _ in range(config.num_hidden_layers)]
def update(self, key_states: torch.Tensor, value_states: torch.Tensor, layer_idx: int, cache_kwargs: Optional[dict[str, Any]]=None) -> tuple[torch.Tensor, torch.Tensor]:
if self.key_cache[layer_idx].shape[-1] == 0:
self.key_cache[layer_idx] = key_states
self.value_cache[layer_idx] = value_states
else:
self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=2)
self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=2)
return (self.key_cache[layer_idx], self.value_cache[layer_idx])
def reorder_cache(self, beam_idx: torch.LongTensor):
"""Reorders the cache for beam search, given the selected beam indices."""
for layer_idx in range(len(self.key_cache)):
device = self.key_cache[layer_idx].device
self.key_cache[layer_idx] = self.key_cache[layer_idx].index_select(0, beam_idx.to(device))
device = self.value_cache[layer_idx].device
self.value_cache[layer_idx] = self.value_cache[layer_idx].index_select(0, beam_idx.to(device))
device = self.conv_states[layer_idx].device
self.conv_states[layer_idx] = self.conv_states[layer_idx].index_select(0, beam_idx.to(device))
device = self.ssm_states[layer_idx].device
self.ssm_states[layer_idx] = self.ssm_states[layer_idx].index_select(0, beam_idx.to(device))
def get_seq_length(self, layer_idx: Optional[int]=0) -> int:
"""Returns the sequence length of the cached states. A layer index can be optionally passed."""
layer_idx = self.transformer_layers[0] if layer_idx not in self.transformer_layers else layer_idx
if len(self.key_cache) <= layer_idx:
return 0
return self.key_cache[layer_idx].shape[-2]
| null | 5
| 3
| 38
| 2
| 36
| 1
| 3
| 0.32
| 1
| 3
| 1
| 0
| 1
| 7
| 1
| 39
| 52
| 4
| 37
| 12
| 35
| 12
| 19
| 12
| 17
| 3
| 4
| 2
| 3
|
698
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bamba/modular_bamba.py
|
transformers.models.bamba.modular_bamba.BambaAttention
|
from transformers.models.llama.modeling_llama import LlamaAttention, LlamaForCausalLM, LlamaMLP, LlamaRMSNorm, LlamaRotaryEmbedding, rotate_half
class BambaAttention(LlamaAttention):
pass
|
class BambaAttention(LlamaAttention):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 12
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 2
| 0
| 0
|
699
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bamba/modular_bamba.py
|
transformers.models.bamba.modular_bamba.BambaDecoderLayer
|
from typing import Optional, TypedDict, Union
from ...processing_utils import Unpack
import torch
from transformers.models.jamba.modeling_jamba import HybridMambaAttentionDynamicCache, JambaAttentionDecoderLayer
from ...utils.deprecation import deprecate_kwarg
from .configuration_bamba import BambaConfig
class BambaDecoderLayer(JambaAttentionDecoderLayer):
def __init__(self, config: BambaConfig, layer_idx: int, layer_type: str='mamba'):
super().__init__(config, layer_idx)
del self.self_attn
num_experts = 1
ffn_layer_class = BambaMLP if num_experts == 1 else None
self.feed_forward = ffn_layer_class(config)
self.layer_type = layer_type
if layer_type == 'mamba':
self.mamba = BambaMixer(config=config, layer_idx=layer_idx)
elif layer_type == 'attention':
self.self_attn = BambaAttention(config, layer_idx)
else:
raise ValueError('Invalid layer_type')
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[HybridMambaAttentionDynamicCache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs: Unpack[BambaFlashAttentionKwargs]) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, sequence_length)` where padding elements are indicated by 0.
past_key_values (`HybridMambaAttentionDynamicCache`, *optional*): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence.
position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
with `head_dim` being the embedding dimension of each attention head.
kwargs (`dict`, *optional*):
Arbitrary kwargs. Can be used to provide `BambaFlashAttentionKwargs` for
padding-free training and/or improve torch.compile performance.
"""
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
if self.layer_type == 'mamba':
hidden_states = self.mamba(hidden_states=hidden_states, cache_params=past_key_values, cache_position=cache_position, attention_mask=attention_mask, **kwargs)
self_attn_weights = None
elif self.layer_type == 'attention':
hidden_states, self_attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.pre_ff_layernorm(hidden_states)
hidden_states = self.feed_forward(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
return outputs
|
class BambaDecoderLayer(JambaAttentionDecoderLayer):
def __init__(self, config: BambaConfig, layer_idx: int, layer_type: str='mamba'):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[HybridMambaAttentionDynamicCache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs: Unpack[BambaFlashAttentionKwargs]) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
'''
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, sequence_length)` where padding elements are indicated by 0.
past_key_values (`HybridMambaAttentionDynamicCache`, *optional*): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence.
position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
with `head_dim` being the embedding dimension of each attention head.
kwargs (`dict`, *optional*):
Arbitrary kwargs. Can be used to provide `BambaFlashAttentionKwargs` for
padding-free training and/or improve torch.compile performance.
'''
pass
| 4
| 1
| 46
| 6
| 28
| 13
| 4
| 0.44
| 1
| 11
| 5
| 0
| 2
| 4
| 2
| 14
| 93
| 12
| 57
| 23
| 43
| 25
| 28
| 12
| 25
| 4
| 2
| 1
| 8
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.