id
int64 0
328k
| repository_name
stringlengths 7
58
| file_path
stringlengths 9
302
| class_name
stringlengths 5
256
| human_written_code
stringlengths 16
2.16M
| class_skeleton
stringlengths 18
1.49M
⌀ | total_program_units
int64 1
1.76k
| total_doc_str
int64 0
771
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
297
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
168
| CountClassBase
float64 0
40
| CountClassCoupled
float64 0
583
| CountClassCoupledModified
float64 0
575
| CountClassDerived
float64 0
5.35k
| CountDeclInstanceMethod
float64 0
529
| CountDeclInstanceVariable
float64 0
296
| CountDeclMethod
float64 0
599
| CountDeclMethodAll
float64 0
1.12k
| CountLine
float64 1
40.4k
| CountLineBlank
float64 0
8.16k
| CountLineCode
float64 1
25.7k
| CountLineCodeDecl
float64 1
8.15k
| CountLineCodeExe
float64 0
24.2k
| CountLineComment
float64 0
16.5k
| CountStmt
float64 1
9.71k
| CountStmtDecl
float64 1
8.15k
| CountStmtExe
float64 0
9.69k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
2.9k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4,400
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
|
transformers.models.patchtsmixer.modeling_patchtsmixer.PatchTSMixerForRegressionOutput
|
from ...utils import auto_docstring, logging
from typing import Callable, Optional, Union
from transformers.utils import ModelOutput
from dataclasses import dataclass
import torch.nn as nn
import torch
@dataclass
@auto_docstring(custom_intro='\n Output type of [`PatchTSMixerForRegressionOutput`].\n ')
class PatchTSMixerForRegressionOutput(ModelOutput):
"""
loss (*optional*, returned when `y` is provided, `torch.FloatTensor` of shape `()`):
Total loss.
regression_outputs (`torch.FloatTensor` of shape `(batch_size, num_targets)`):
Prediction output from the regression head.
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_input_channels, num_patches, d_model)`):
Backbone embeddings before passing through the head.
hidden_states (`tuple(torch.FloatTensor)`, *optional*):
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
"""
loss: Optional[torch.FloatTensor] = None
regression_outputs: Optional[torch.FloatTensor] = None
last_hidden_state: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
|
@dataclass
@auto_docstring(custom_intro='\n Output type of [`PatchTSMixerForRegressionOutput`].\n ')
class PatchTSMixerForRegressionOutput(ModelOutput):
'''
loss (*optional*, returned when `y` is provided, `torch.FloatTensor` of shape `()`):
Total loss.
regression_outputs (`torch.FloatTensor` of shape `(batch_size, num_targets)`):
Prediction output from the regression head.
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_input_channels, num_patches, d_model)`):
Backbone embeddings before passing through the head.
hidden_states (`tuple(torch.FloatTensor)`, *optional*):
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 2.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 62
| 19
| 2
| 5
| 5
| 4
| 12
| 5
| 5
| 4
| 0
| 4
| 0
| 0
|
4,401
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
|
transformers.models.patchtsmixer.modeling_patchtsmixer.PatchTSMixerForTimeSeriesClassification
|
import torch.nn as nn
import torch
from .configuration_patchtsmixer import PatchTSMixerConfig
from ...utils import auto_docstring, logging
from typing import Callable, Optional, Union
class PatchTSMixerForTimeSeriesClassification(PatchTSMixerPreTrainedModel):
"""
`PatchTSMixer` for classification application.
Args:
config (`PatchTSMixerConfig`):
Configuration.
Returns:
`None`.
"""
def __init__(self, config: PatchTSMixerConfig):
super().__init__(config)
self.model = PatchTSMixerModel(config)
self.head = PatchTSMixerLinearHead(config=config)
self.use_return_dict = config.use_return_dict
if config.scaling in ['std', 'mean', True]:
self.inject_scale = InjectScalerStatistics4D(d_model=config.d_model, num_patches=config.num_patches)
else:
self.inject_scale = None
if config.post_init:
self.post_init()
@auto_docstring
def forward(self, past_values: torch.Tensor, target_values: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=False, return_loss: bool=True, return_dict: Optional[bool]=None) -> PatchTSMixerForTimeSeriesClassificationOutput:
"""
past_values (`torch.FloatTensor` of shape `(batch_size, seq_length, num_input_channels)`):
Context values of the time series. For a pretraining task, this denotes the input time series to predict
the masked portion. For a forecasting task, this denotes the history/past time series values. Similarly,
for classification or regression tasks, it denotes the appropriate context values of the time series.
For univariate time series, `num_input_channels` dimension should be 1. For multivariate time series, it is
greater than 1.
target_values (`torch.FloatTensor` of shape `(batch_size, target_len, num_input_channels)` for forecasting,
`(batch_size, num_targets)` for regression, or `(batch_size,)` for classification, *optional*):
Target
values of the time series, that serve as labels for the model. The `target_values` is what the
Transformer needs during training to learn to output, given the `past_values`. Note that, this is NOT
required for a pretraining task.
For a forecasting task, the shape is be `(batch_size, target_len, num_input_channels)`. Even if we want
to forecast only specific channels by setting the indices in `prediction_channel_indices` parameter,
pass the target data with all channels, as channel Filtering for both prediction and target will be
manually applied before the loss computation.
For a classification task, it has a shape of `(batch_size,)`.
For a regression task, it has a shape of `(batch_size, num_targets)`.
return_loss (`bool`, *optional*):
Whether to return the loss in the `forward` call.
"""
loss = torch.nn.CrossEntropyLoss()
return_dict = return_dict if return_dict is not None else self.use_return_dict
model_output = self.model(past_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
if isinstance(model_output, tuple):
model_output = PatchTSMixerModelOutput(*model_output)
if self.inject_scale is not None:
model_output.last_hidden_state = self.inject_scale(model_output.last_hidden_state, loc=model_output.loc, scale=model_output.scale)
y_hat = self.head(model_output.last_hidden_state)
if target_values is not None and return_loss is True:
loss_val = loss(y_hat, target_values)
else:
loss_val = None
if not return_dict:
return tuple((v for v in [loss_val, y_hat, model_output.last_hidden_state, model_output.hidden_states]))
return PatchTSMixerForTimeSeriesClassificationOutput(loss=loss_val, prediction_outputs=y_hat, last_hidden_state=model_output.last_hidden_state, hidden_states=model_output.hidden_states)
|
class PatchTSMixerForTimeSeriesClassification(PatchTSMixerPreTrainedModel):
'''
`PatchTSMixer` for classification application.
Args:
config (`PatchTSMixerConfig`):
Configuration.
Returns:
`None`.
'''
def __init__(self, config: PatchTSMixerConfig):
pass
@auto_docstring
def forward(self, past_values: torch.Tensor, target_values: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=False, return_loss: bool=True, return_dict: Optional[bool]=None) -> PatchTSMixerForTimeSeriesClassificationOutput:
'''
past_values (`torch.FloatTensor` of shape `(batch_size, seq_length, num_input_channels)`):
Context values of the time series. For a pretraining task, this denotes the input time series to predict
the masked portion. For a forecasting task, this denotes the history/past time series values. Similarly,
for classification or regression tasks, it denotes the appropriate context values of the time series.
For univariate time series, `num_input_channels` dimension should be 1. For multivariate time series, it is
greater than 1.
target_values (`torch.FloatTensor` of shape `(batch_size, target_len, num_input_channels)` for forecasting,
`(batch_size, num_targets)` for regression, or `(batch_size,)` for classification, *optional*):
Target
values of the time series, that serve as labels for the model. The `target_values` is what the
Transformer needs during training to learn to output, given the `past_values`. Note that, this is NOT
required for a pretraining task.
For a forecasting task, the shape is be `(batch_size, target_len, num_input_channels)`. Even if we want
to forecast only specific channels by setting the indices in `prediction_channel_indices` parameter,
pass the target data with all channels, as channel Filtering for both prediction and target will be
manually applied before the loss computation.
For a classification task, it has a shape of `(batch_size,)`.
For a regression task, it has a shape of `(batch_size, num_targets)`.
return_loss (`bool`, *optional*):
Whether to return the loss in the `forward` call.
'''
pass
| 4
| 2
| 45
| 8
| 29
| 11
| 5
| 0.48
| 1
| 10
| 6
| 0
| 2
| 4
| 2
| 130
| 107
| 19
| 63
| 19
| 48
| 30
| 26
| 11
| 23
| 6
| 3
| 1
| 9
|
4,402
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
|
transformers.models.patchtsmixer.modeling_patchtsmixer.PatchTSMixerForTimeSeriesClassificationOutput
|
from ...utils import auto_docstring, logging
from typing import Callable, Optional, Union
from transformers.utils import ModelOutput
from dataclasses import dataclass
import torch.nn as nn
import torch
@dataclass
@auto_docstring(custom_intro='\n Output type of [`PatchTSMixerForTimeSeriesClassificationOutput`].\n ')
class PatchTSMixerForTimeSeriesClassificationOutput(ModelOutput):
"""
loss (*optional*, returned when `y` is provided, `torch.FloatTensor` of shape `()`):
Total loss.
prediction_outputs (`torch.FloatTensor` of shape `(batch_size, num_labels)`):
Prediction output from the classification head.
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_input_channels, num_patches, d_model)`):
Backbone embeddings before passing through the head.
hidden_states (`tuple(torch.FloatTensor)`, *optional*):
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
"""
loss: Optional[torch.FloatTensor] = None
prediction_outputs: Optional[torch.FloatTensor] = None
last_hidden_state: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
|
@dataclass
@auto_docstring(custom_intro='\n Output type of [`PatchTSMixerForTimeSeriesClassificationOutput`].\n ')
class PatchTSMixerForTimeSeriesClassificationOutput(ModelOutput):
'''
loss (*optional*, returned when `y` is provided, `torch.FloatTensor` of shape `()`):
Total loss.
prediction_outputs (`torch.FloatTensor` of shape `(batch_size, num_labels)`):
Prediction output from the classification head.
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_input_channels, num_patches, d_model)`):
Backbone embeddings before passing through the head.
hidden_states (`tuple(torch.FloatTensor)`, *optional*):
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 2.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 62
| 19
| 2
| 5
| 5
| 4
| 12
| 5
| 5
| 4
| 0
| 4
| 0
| 0
|
4,403
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
|
transformers.models.patchtsmixer.modeling_patchtsmixer.PatchTSMixerGatedAttention
|
import torch.nn as nn
class PatchTSMixerGatedAttention(nn.Module):
"""
Module that applies gated attention to input data.
Args:
in_size (`int`): The input size.
out_size (`int`): The output size.
"""
def __init__(self, in_size: int, out_size: int):
super().__init__()
self.attn_layer = nn.Linear(in_size, out_size)
self.attn_softmax = nn.Softmax(dim=-1)
def forward(self, inputs):
attn_weight = self.attn_softmax(self.attn_layer(inputs))
inputs = inputs * attn_weight
return inputs
|
class PatchTSMixerGatedAttention(nn.Module):
'''
Module that applies gated attention to input data.
Args:
in_size (`int`): The input size.
out_size (`int`): The output size.
'''
def __init__(self, in_size: int, out_size: int):
pass
def forward(self, inputs):
pass
| 3
| 1
| 4
| 0
| 4
| 0
| 1
| 0.67
| 1
| 2
| 0
| 0
| 2
| 2
| 2
| 12
| 18
| 3
| 9
| 6
| 6
| 6
| 9
| 6
| 6
| 1
| 1
| 0
| 2
|
4,404
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
|
transformers.models.patchtsmixer.modeling_patchtsmixer.PatchTSMixerLayer
|
from .configuration_patchtsmixer import PatchTSMixerConfig
import torch.nn as nn
import torch
class PatchTSMixerLayer(nn.Module):
"""
The `PatchTSMixer` layer that does all three kinds of mixing.
Args:
config (`PatchTSMixerConfig`):
Configuration.
"""
def __init__(self, config: PatchTSMixerConfig):
super().__init__()
self.patch_mixer = PatchMixerBlock(config=config)
self.feature_mixer = FeatureMixerBlock(config=config)
self.mode = config.mode
if config.mode == 'mix_channel':
self.channel_feature_mixer = PatchTSMixerChannelFeatureMixerBlock(config=config)
def forward(self, hidden: torch.Tensor):
"""
Args:
hidden (`torch.Tensor` of shape `(batch_size, num_patches, d_model)`):
Input tensor to the layer.
Returns:
`torch.Tensor`: Transformed tensor.
"""
if self.mode == 'mix_channel':
hidden = self.channel_feature_mixer(hidden)
hidden = self.patch_mixer(hidden)
hidden = self.feature_mixer(hidden)
return hidden
|
class PatchTSMixerLayer(nn.Module):
'''
The `PatchTSMixer` layer that does all three kinds of mixing.
Args:
config (`PatchTSMixerConfig`):
Configuration.
'''
def __init__(self, config: PatchTSMixerConfig):
pass
def forward(self, hidden: torch.Tensor):
'''
Args:
hidden (`torch.Tensor` of shape `(batch_size, num_patches, d_model)`):
Input tensor to the layer.
Returns:
`torch.Tensor`: Transformed tensor.
'''
pass
| 3
| 2
| 13
| 3
| 7
| 4
| 2
| 1
| 1
| 6
| 4
| 0
| 2
| 4
| 2
| 12
| 36
| 9
| 14
| 7
| 11
| 14
| 14
| 7
| 11
| 2
| 1
| 1
| 4
|
4,405
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
|
transformers.models.patchtsmixer.modeling_patchtsmixer.PatchTSMixerLinearHead
|
import torch
from .configuration_patchtsmixer import PatchTSMixerConfig
import torch.nn as nn
class PatchTSMixerLinearHead(nn.Module):
"""Linear head for Classification and Regression.
Args:
config (`PatchTSMixerConfig`):
Configuration.
"""
def __init__(self, config: PatchTSMixerConfig, distribution_output=None):
super().__init__()
self.head_aggregation = config.head_aggregation
self.output_range = config.output_range
if config.head_aggregation is None:
mul_factor = config.num_patches
else:
mul_factor = 1
self.distribution_output = distribution_output
if distribution_output is None:
self.projection = nn.Linear(config.d_model * config.num_input_channels * mul_factor, config.num_targets)
else:
self.projection = distribution_output.get_parameter_projection(config.d_model * config.num_input_channels * mul_factor)
if config.head_aggregation is None:
self.flatten = nn.Flatten(start_dim=-3)
else:
self.flatten = nn.Flatten(start_dim=-2)
self.dropout = nn.Dropout(config.head_dropout)
def forward(self, hidden_features):
"""
Args:
hidden_features (`torch.Tensor` of shape `(batch_size x num_patch x d_model)` in `flatten` mode
or `(batch_size x n_vars x num_patch x d_model)` in `common_channel`/`mix_channel` mode.): Input hidden
features.
Returns:
`torch.Tensor` of shape `(batch_size x num_targets)`.
"""
hidden_features = hidden_features.transpose(-1, -2)
if self.head_aggregation == 'use_last':
hidden_features = hidden_features[..., -1]
elif self.head_aggregation == 'max_pool':
hidden_features = hidden_features.max(dim=-1).values
elif self.head_aggregation == 'avg_pool':
hidden_features = hidden_features.mean(dim=-1)
if self.flatten:
hidden_features = self.flatten(hidden_features)
hidden_features = self.dropout(hidden_features)
hidden_features = self.projection(hidden_features)
if self.distribution_output is None and self.output_range is not None:
hidden_features = torch.sigmoid(hidden_features) * (self.output_range[1] - self.output_range[0]) + self.output_range[0]
return hidden_features
|
class PatchTSMixerLinearHead(nn.Module):
'''Linear head for Classification and Regression.
Args:
config (`PatchTSMixerConfig`):
Configuration.
'''
def __init__(self, config: PatchTSMixerConfig, distribution_output=None):
pass
def forward(self, hidden_features):
'''
Args:
hidden_features (`torch.Tensor` of shape `(batch_size x num_patch x d_model)` in `flatten` mode
or `(batch_size x n_vars x num_patch x d_model)` in `common_channel`/`mix_channel` mode.): Input hidden
features.
Returns:
`torch.Tensor` of shape `(batch_size x num_targets)`.
'''
pass
| 3
| 2
| 30
| 4
| 20
| 7
| 5
| 0.44
| 1
| 2
| 1
| 0
| 2
| 6
| 2
| 12
| 69
| 11
| 41
| 10
| 38
| 18
| 29
| 10
| 26
| 6
| 1
| 1
| 10
|
4,406
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
|
transformers.models.patchtsmixer.modeling_patchtsmixer.PatchTSMixerMLP
|
import torch.nn as nn
import torch
class PatchTSMixerMLP(nn.Module):
def __init__(self, in_features, out_features, config):
super().__init__()
num_hidden = in_features * config.expansion_factor
self.fc1 = nn.Linear(in_features, num_hidden)
self.dropout1 = nn.Dropout(config.dropout)
self.fc2 = nn.Linear(num_hidden, out_features)
self.dropout2 = nn.Dropout(config.dropout)
def forward(self, inputs: torch.Tensor):
"""
Args:
inputs (`torch.Tensor` of shape `((batch_size, num_channels, num_patches, d_model))`):
Input to the MLP layer.
Returns:
`torch.Tensor` of the same shape as `inputs`
"""
inputs = self.dropout1(nn.functional.gelu(self.fc1(inputs)))
inputs = self.fc2(inputs)
inputs = self.dropout2(inputs)
return inputs
|
class PatchTSMixerMLP(nn.Module):
def __init__(self, in_features, out_features, config):
pass
def forward(self, inputs: torch.Tensor):
'''
Args:
inputs (`torch.Tensor` of shape `((batch_size, num_channels, num_patches, d_model))`):
Input to the MLP layer.
Returns:
`torch.Tensor` of the same shape as `inputs`
'''
pass
| 3
| 1
| 10
| 0
| 6
| 4
| 1
| 0.54
| 1
| 2
| 0
| 0
| 2
| 4
| 2
| 12
| 21
| 1
| 13
| 8
| 10
| 7
| 13
| 8
| 10
| 1
| 1
| 0
| 2
|
4,407
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
|
transformers.models.patchtsmixer.modeling_patchtsmixer.PatchTSMixerMasking
|
import torch
from .configuration_patchtsmixer import PatchTSMixerConfig
import torch.nn as nn
class PatchTSMixerMasking(nn.Module):
"""
Class to perform random or forecast masking.
Parameters:
config (`PatchTSMixerConfig`): model config
Returns:
x_mask (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`)
Masked patched input
mask (`torch.Tensor` of shape `(batch_size, num_channels, num_patches)`)
Bool tensor indicating True on masked points
"""
def __init__(self, config: PatchTSMixerConfig):
super().__init__()
self.random_mask_ratio = config.random_mask_ratio
self.channel_consistent_masking = config.channel_consistent_masking
self.mask_type = config.mask_type
self.num_forecast_mask_patches = config.num_forecast_mask_patches
self.unmasked_channel_indices = config.unmasked_channel_indices
self.mask_value = config.mask_value
if self.unmasked_channel_indices is not None:
self.unmasked_channel_indices = sorted(self.unmasked_channel_indices)
def forward(self, patch_input: torch.Tensor):
"""
Parameters:
patch_input (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`, *required*):
Patch input
Return:
masked_input (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`)
Masked patched input
mask (`torch.Tensor` of shape `(batch_size, num_channels, num_patches)`)
Bool tensor indicating True on masked points
"""
if self.mask_type == 'random':
masked_input, mask = random_masking(inputs=patch_input, mask_ratio=self.random_mask_ratio, unmasked_channel_indices=self.unmasked_channel_indices, channel_consistent_masking=self.channel_consistent_masking, mask_value=self.mask_value)
elif self.mask_type == 'forecast':
masked_input, mask = forecast_masking(inputs=patch_input, num_forecast_mask_patches=self.num_forecast_mask_patches, unmasked_channel_indices=self.unmasked_channel_indices, mask_value=self.mask_value)
else:
raise ValueError(f'Invalid mask type {self.mask_type}.')
mask = mask.bool()
return (masked_input, mask)
|
class PatchTSMixerMasking(nn.Module):
'''
Class to perform random or forecast masking.
Parameters:
config (`PatchTSMixerConfig`): model config
Returns:
x_mask (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`)
Masked patched input
mask (`torch.Tensor` of shape `(batch_size, num_channels, num_patches)`)
Bool tensor indicating True on masked points
'''
def __init__(self, config: PatchTSMixerConfig):
pass
def forward(self, patch_input: torch.Tensor):
'''
Parameters:
patch_input (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`, *required*):
Patch input
Return:
masked_input (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`)
Masked patched input
mask (`torch.Tensor` of shape `(batch_size, num_channels, num_patches)`)
Bool tensor indicating True on masked points
'''
pass
| 3
| 2
| 22
| 2
| 15
| 6
| 3
| 0.68
| 1
| 4
| 1
| 0
| 2
| 6
| 2
| 12
| 58
| 6
| 31
| 10
| 28
| 21
| 18
| 10
| 15
| 3
| 1
| 1
| 5
|
4,408
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
|
transformers.models.patchtsmixer.modeling_patchtsmixer.PatchTSMixerMeanScaler
|
import torch
from .configuration_patchtsmixer import PatchTSMixerConfig
import torch.nn as nn
class PatchTSMixerMeanScaler(nn.Module):
"""
Computes a scaling factor as the weighted average absolute value along the first dimension, and scales the data
accordingly.
"""
def __init__(self, config: PatchTSMixerConfig):
super().__init__()
self.dim = config.scaling_dim if hasattr(config, 'scaling_dim') else 1
self.keepdim = config.keepdim if hasattr(config, 'keepdim') else True
self.minimum_scale = config.minimum_scale if hasattr(config, 'minimum_scale') else 1e-10
self.default_scale = config.default_scale if hasattr(config, 'default_scale') else None
def forward(self, data: torch.Tensor, observed_indicator: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Parameters:
data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
input for Batch norm calculation
observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`):
Calculating the scale on the observed indicator.
Returns:
tuple of `torch.Tensor` of shapes
(`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
`(batch_size, 1, num_input_channels)`)
"""
ts_sum = (data * observed_indicator).abs().sum(self.dim, keepdim=True)
num_observed = observed_indicator.sum(self.dim, keepdim=True)
scale = ts_sum / torch.clamp(num_observed, min=1)
if self.default_scale is None:
batch_sum = ts_sum.sum(dim=0)
batch_observations = torch.clamp(num_observed.sum(0), min=1)
default_scale = torch.squeeze(batch_sum / batch_observations)
else:
default_scale = self.default_scale * torch.ones_like(scale)
scale = torch.where(num_observed > 0, scale, default_scale)
scale = torch.clamp(scale, min=self.minimum_scale)
scaled_data = data / scale
if not self.keepdim:
scale = scale.squeeze(dim=self.dim)
return (scaled_data, torch.zeros_like(scale), scale)
|
class PatchTSMixerMeanScaler(nn.Module):
'''
Computes a scaling factor as the weighted average absolute value along the first dimension, and scales the data
accordingly.
'''
def __init__(self, config: PatchTSMixerConfig):
pass
def forward(self, data: torch.Tensor, observed_indicator: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
'''
Parameters:
data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
input for Batch norm calculation
observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`):
Calculating the scale on the observed indicator.
Returns:
tuple of `torch.Tensor` of shapes
(`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
`(batch_size, 1, num_input_channels)`)
'''
pass
| 3
| 2
| 23
| 3
| 12
| 8
| 4
| 0.76
| 1
| 3
| 1
| 0
| 2
| 4
| 2
| 12
| 52
| 8
| 25
| 16
| 20
| 19
| 22
| 14
| 19
| 5
| 1
| 1
| 8
|
4,409
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
|
transformers.models.patchtsmixer.modeling_patchtsmixer.PatchTSMixerModel
|
from ...utils import auto_docstring, logging
from typing import Callable, Optional, Union
import torch.nn as nn
import torch
from .configuration_patchtsmixer import PatchTSMixerConfig
@auto_docstring(custom_intro='\n The PatchTSMixer Model for time-series forecasting.\n ')
class PatchTSMixerModel(PatchTSMixerPreTrainedModel):
def __init__(self, config: PatchTSMixerConfig, mask_input: bool=False):
"""
mask_input (bool, *optional*, defaults to `False`):
Whether to mask the input using the [`PatchTSMixerMasking`] module.
"""
super().__init__(config)
self.use_return_dict = config.use_return_dict
self.encoder = PatchTSMixerEncoder(config)
self.patching = PatchTSMixerPatchify(config)
if mask_input is True:
self.masking = PatchTSMixerMasking(config)
else:
self.masking = None
if config.scaling == 'mean':
self.scaler = PatchTSMixerMeanScaler(config)
elif config.scaling == 'std' or config.scaling is True:
self.scaler = PatchTSMixerStdScaler(config)
else:
self.scaler = PatchTSMixerNOPScaler(config)
if config.post_init:
self.post_init()
@auto_docstring
def forward(self, past_values: torch.Tensor, observed_mask: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=None) -> PatchTSMixerModelOutput:
"""
past_values (`torch.FloatTensor` of shape `(batch_size, seq_length, num_input_channels)`):
Context values of the time series. For a pretraining task, this denotes the input time series to predict
the masked portion. For a forecasting task, this denotes the history/past time series values. Similarly,
for classification or regression tasks, it denotes the appropriate context values of the time series.
For univariate time series, `num_input_channels` dimension should be 1. For multivariate time series, it is
greater than 1.
observed_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected
in `[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
"""
return_dict = return_dict if return_dict is not None else self.use_return_dict
mask = None
if observed_mask is None:
observed_mask = torch.ones_like(past_values)
scaled_past_values, loc, scale = self.scaler(past_values, observed_mask)
patched_x = self.patching(scaled_past_values)
enc_input = patched_x
if self.masking is not None:
enc_input, mask = self.masking(patched_x)
encoder_output = self.encoder(enc_input, output_hidden_states=output_hidden_states, return_dict=return_dict)
if isinstance(encoder_output, tuple):
encoder_output = PatchTSMixerEncoderOutput(*encoder_output)
if not return_dict:
return tuple((v for v in [encoder_output.last_hidden_state, encoder_output.hidden_states, patched_x, mask, loc, scale]))
return PatchTSMixerModelOutput(last_hidden_state=encoder_output.last_hidden_state, hidden_states=encoder_output.hidden_states, patch_input=patched_x, mask=mask, loc=loc, scale=scale)
|
@auto_docstring(custom_intro='\n The PatchTSMixer Model for time-series forecasting.\n ')
class PatchTSMixerModel(PatchTSMixerPreTrainedModel):
def __init__(self, config: PatchTSMixerConfig, mask_input: bool=False):
'''
mask_input (bool, *optional*, defaults to `False`):
Whether to mask the input using the [`PatchTSMixerMasking`] module.
'''
pass
@auto_docstring
def forward(self, past_values: torch.Tensor, observed_mask: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=None) -> PatchTSMixerModelOutput:
'''
past_values (`torch.FloatTensor` of shape `(batch_size, seq_length, num_input_channels)`):
Context values of the time series. For a pretraining task, this denotes the input time series to predict
the masked portion. For a forecasting task, this denotes the history/past time series values. Similarly,
for classification or regression tasks, it denotes the appropriate context values of the time series.
For univariate time series, `num_input_channels` dimension should be 1. For multivariate time series, it is
greater than 1.
observed_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected
in `[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
'''
pass
| 5
| 2
| 42
| 7
| 30
| 6
| 6
| 0.19
| 1
| 13
| 9
| 0
| 2
| 5
| 2
| 130
| 88
| 14
| 63
| 20
| 52
| 12
| 31
| 13
| 28
| 6
| 3
| 1
| 11
|
4,410
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
|
transformers.models.patchtsmixer.modeling_patchtsmixer.PatchTSMixerNOPScaler
|
from .configuration_patchtsmixer import PatchTSMixerConfig
import torch.nn as nn
from typing import Callable, Optional, Union
import torch
class PatchTSMixerNOPScaler(nn.Module):
"""
Assigns a scaling factor equal to 1 along the first dimension, and therefore applies no scaling to the input data.
"""
def __init__(self, config: PatchTSMixerConfig):
super().__init__()
self.dim = config.scaling_dim if hasattr(config, 'scaling_dim') else 1
self.keepdim = config.keepdim if hasattr(config, 'keepdim') else True
def forward(self, data: torch.Tensor, observed_indicator: Optional[torch.Tensor]=None) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Parameters:
data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
input for Batch norm calculation
Returns:
tuple of `torch.Tensor` of shapes
(`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
`(batch_size, 1, num_input_channels)`)
"""
scale = torch.ones_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim)
loc = torch.zeros_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim)
return (data, loc, scale)
|
class PatchTSMixerNOPScaler(nn.Module):
'''
Assigns a scaling factor equal to 1 along the first dimension, and therefore applies no scaling to the input data.
'''
def __init__(self, config: PatchTSMixerConfig):
pass
def forward(self, data: torch.Tensor, observed_indicator: Optional[torch.Tensor]=None) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
'''
Parameters:
data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
input for Batch norm calculation
Returns:
tuple of `torch.Tensor` of shapes
(`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
`(batch_size, 1, num_input_channels)`)
'''
pass
| 3
| 2
| 10
| 0
| 5
| 5
| 2
| 1.09
| 1
| 3
| 1
| 0
| 2
| 2
| 2
| 12
| 25
| 2
| 11
| 9
| 6
| 12
| 9
| 7
| 6
| 3
| 1
| 0
| 4
|
4,411
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
|
transformers.models.patchtsmixer.modeling_patchtsmixer.PatchTSMixerNormLayer
|
import torch
from .configuration_patchtsmixer import PatchTSMixerConfig
import torch.nn as nn
class PatchTSMixerNormLayer(nn.Module):
"""Normalization block
Args:
config (`PatchTSMixerConfig`):
Configuration.
"""
def __init__(self, config: PatchTSMixerConfig):
super().__init__()
self.norm_mlp = config.norm_mlp
if 'batch' in config.norm_mlp.lower():
self.norm = PatchTSMixerBatchNorm(config)
else:
self.norm = nn.LayerNorm(config.d_model, eps=config.norm_eps)
def forward(self, inputs: torch.Tensor):
"""
Args:
inputs (`torch.Tensor` of shape `((batch_size, num_channels, num_patches, d_model))`):
Input to the normalization layer.
Returns:
`torch.Tensor` of shape `((batch_size, num_channels, num_patches, d_model))`
"""
if 'batch' in self.norm_mlp.lower():
inputs_reshaped = torch.reshape(inputs, (inputs.shape[0] * inputs.shape[1], inputs.shape[2], inputs.shape[3]))
inputs_reshaped = self.norm(inputs_reshaped)
inputs = torch.reshape(inputs_reshaped, inputs.shape)
else:
inputs = self.norm(inputs)
return inputs
|
class PatchTSMixerNormLayer(nn.Module):
'''Normalization block
Args:
config (`PatchTSMixerConfig`):
Configuration.
'''
def __init__(self, config: PatchTSMixerConfig):
pass
def forward(self, inputs: torch.Tensor):
'''
Args:
inputs (`torch.Tensor` of shape `((batch_size, num_channels, num_patches, d_model))`):
Input to the normalization layer.
Returns:
`torch.Tensor` of shape `((batch_size, num_channels, num_patches, d_model))`
'''
pass
| 3
| 2
| 19
| 3
| 11
| 6
| 2
| 0.7
| 1
| 4
| 2
| 0
| 2
| 2
| 2
| 12
| 47
| 9
| 23
| 6
| 20
| 16
| 14
| 6
| 11
| 2
| 1
| 1
| 4
|
4,412
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
|
transformers.models.patchtsmixer.modeling_patchtsmixer.PatchTSMixerPatchify
|
from .configuration_patchtsmixer import PatchTSMixerConfig
import torch.nn as nn
import torch
class PatchTSMixerPatchify(nn.Module):
"""
A class to patchify the time series sequence into different patches
Returns:
`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`
"""
def __init__(self, config: PatchTSMixerConfig):
super().__init__()
self.sequence_length = config.context_length
self.patch_length = config.patch_length
self.patch_stride = config.patch_stride
if self.sequence_length <= self.patch_length:
raise ValueError(f'Sequence length ({self.sequence_length}) has to be greater than the patch length ({self.patch_length})')
self.num_patches = (max(self.sequence_length, self.patch_length) - self.patch_length) // self.patch_stride + 1
new_sequence_length = self.patch_length + self.patch_stride * (self.num_patches - 1)
self.sequence_start = self.sequence_length - new_sequence_length
def forward(self, past_values: torch.Tensor):
"""
Parameters:
past_values (`torch.Tensor` of shape `(batch_size, sequence_length, num_channels)`, *required*):
Input for patchification
Returns:
`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`
"""
sequence_length = past_values.shape[-2]
if sequence_length != self.sequence_length:
raise ValueError(f"Input sequence length ({sequence_length}) doesn't match model configuration ({self.sequence_length}).")
output = past_values[:, self.sequence_start:, :]
output = output.unfold(dimension=-2, size=self.patch_length, step=self.patch_stride)
output = output.transpose(-2, -3).contiguous()
return output
|
class PatchTSMixerPatchify(nn.Module):
'''
A class to patchify the time series sequence into different patches
Returns:
`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`
'''
def __init__(self, config: PatchTSMixerConfig):
pass
def forward(self, past_values: torch.Tensor):
'''
Parameters:
past_values (`torch.Tensor` of shape `(batch_size, sequence_length, num_channels)`, *required*):
Input for patchification
Returns:
`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`
'''
pass
| 3
| 2
| 19
| 2
| 11
| 6
| 2
| 0.7
| 1
| 4
| 1
| 0
| 2
| 5
| 2
| 12
| 46
| 7
| 23
| 11
| 20
| 16
| 19
| 11
| 16
| 2
| 1
| 1
| 4
|
4,413
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
|
transformers.models.patchtsmixer.modeling_patchtsmixer.PatchTSMixerPositionalEncoding
|
import math
from .configuration_patchtsmixer import PatchTSMixerConfig
import torch.nn as nn
import torch
class PatchTSMixerPositionalEncoding(nn.Module):
"""
Class for positional encoding
"""
def __init__(self, config: PatchTSMixerConfig):
super().__init__()
if config.use_positional_encoding:
self.position_enc = self._init_pe(config)
else:
self.position_enc = nn.Parameter(torch.zeros(config.num_patches, config.d_model))
@staticmethod
def _init_pe(config: PatchTSMixerConfig) -> nn.Parameter:
if config.positional_encoding_type == 'random':
position_enc = nn.Parameter(torch.randn(config.num_patches, config.d_model), requires_grad=True)
elif config.positional_encoding_type == 'sincos':
position_enc = torch.zeros(config.num_patches, config.d_model)
position = torch.arange(0, config.num_patches).unsqueeze(1)
div_term = torch.exp(torch.arange(0, config.d_model, 2) * -(math.log(10000.0) / config.d_model))
position_enc[:, 0::2] = torch.sin(position * div_term)
position_enc[:, 1::2] = torch.cos(position * div_term)
position_enc = position_enc - position_enc.mean()
position_enc = position_enc / (position_enc.std() * 10)
position_enc = nn.Parameter(position_enc, requires_grad=False)
else:
raise ValueError(f"{config.positional_encoding_type} is not a valid positional encoder. Available types are 'random' and 'sincos'.")
return position_enc
def forward(self, patch_input: torch.Tensor):
hidden_state = patch_input + self.position_enc
return hidden_state
|
class PatchTSMixerPositionalEncoding(nn.Module):
'''
Class for positional encoding
'''
def __init__(self, config: PatchTSMixerConfig):
pass
@staticmethod
def _init_pe(config: PatchTSMixerConfig) -> nn.Parameter:
pass
def forward(self, patch_input: torch.Tensor):
pass
| 5
| 1
| 10
| 0
| 9
| 1
| 2
| 0.21
| 1
| 4
| 1
| 0
| 2
| 1
| 3
| 13
| 37
| 3
| 28
| 10
| 23
| 6
| 22
| 9
| 18
| 3
| 1
| 1
| 6
|
4,414
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
|
transformers.models.patchtsmixer.modeling_patchtsmixer.PatchTSMixerPreTrainedModel
|
from .configuration_patchtsmixer import PatchTSMixerConfig
import torch.nn as nn
from transformers.modeling_utils import PreTrainedModel
from ...utils import auto_docstring, logging
@auto_docstring
class PatchTSMixerPreTrainedModel(PreTrainedModel):
config: PatchTSMixerConfig
base_model_prefix = 'model'
main_input_name = 'past_values'
supports_gradient_checkpointing = False
def _init_weights(self, module):
"""Initialize weights"""
if isinstance(module, PatchTSMixerPositionalEncoding):
if self.config.positional_encoding_type == 'random':
nn.init.normal_(module.position_enc, mean=0.0, std=0.1)
elif isinstance(module, (nn.LayerNorm, nn.BatchNorm1d)):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, PatchTSMixerBatchNorm):
module.batchnorm.bias.data.zero_()
module.batchnorm.weight.data.fill_(1.0)
elif isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.init_std)
if module.bias is not None:
module.bias.data.zero_()
|
@auto_docstring
class PatchTSMixerPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
'''Initialize weights'''
pass
| 3
| 1
| 16
| 0
| 14
| 2
| 7
| 0.16
| 1
| 2
| 2
| 6
| 1
| 0
| 1
| 128
| 23
| 1
| 19
| 6
| 17
| 3
| 16
| 6
| 14
| 7
| 2
| 2
| 7
|
4,415
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
|
transformers.models.patchtsmixer.modeling_patchtsmixer.PatchTSMixerPretrainHead
|
from .configuration_patchtsmixer import PatchTSMixerConfig
import torch.nn as nn
class PatchTSMixerPretrainHead(nn.Module):
"""Pretraining head.
Args:
config (`PatchTSMixerConfig`):
Configuration.
"""
def __init__(self, config: PatchTSMixerConfig):
super().__init__()
self.dropout_layer = nn.Dropout(config.head_dropout)
self.base_pt_block = nn.Linear(config.d_model, config.patch_length)
def forward(self, hidden_features):
"""
Args:
hidden_features (`torch.Tensor` of shape `(batch_size x num_patch x d_model)` in `flatten` mode
or `(batch_size x n_vars x num_patch x d_model)` in `common_channel`/`mix_channel` mode.): Input hidden
features.
Returns:
`torch.Tensor` of shape `(batch_size x n_vars x num_patch x patch_length)`.
"""
hidden_features = self.dropout_layer(hidden_features)
forecast = self.base_pt_block(hidden_features)
return forecast
|
class PatchTSMixerPretrainHead(nn.Module):
'''Pretraining head.
Args:
config (`PatchTSMixerConfig`):
Configuration.
'''
def __init__(self, config: PatchTSMixerConfig):
pass
def forward(self, hidden_features):
'''
Args:
hidden_features (`torch.Tensor` of shape `(batch_size x num_patch x d_model)` in `flatten` mode
or `(batch_size x n_vars x num_patch x d_model)` in `common_channel`/`mix_channel` mode.): Input hidden
features.
Returns:
`torch.Tensor` of shape `(batch_size x n_vars x num_patch x patch_length)`.
'''
pass
| 3
| 2
| 10
| 2
| 4
| 5
| 1
| 1.56
| 1
| 2
| 1
| 0
| 2
| 2
| 2
| 12
| 28
| 6
| 9
| 6
| 6
| 14
| 9
| 6
| 6
| 1
| 1
| 0
| 2
|
4,416
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
|
transformers.models.patchtsmixer.modeling_patchtsmixer.PatchTSMixerStdScaler
|
import torch
from .configuration_patchtsmixer import PatchTSMixerConfig
import torch.nn as nn
class PatchTSMixerStdScaler(nn.Module):
"""
Standardize features by calculating the mean and scaling along the first dimension, and then normalizes it by
subtracting from the mean and dividing by the standard deviation.
"""
def __init__(self, config: PatchTSMixerConfig):
super().__init__()
self.dim = config.scaling_dim if hasattr(config, 'scaling_dim') else 1
self.keepdim = config.keepdim if hasattr(config, 'keepdim') else True
self.minimum_scale = config.minimum_scale if hasattr(config, 'minimum_scale') else 1e-05
def forward(self, data: torch.Tensor, observed_indicator: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Parameters:
data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
input for Batch norm calculation
observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`):
Calculating the scale on the observed indicator.
Returns:
tuple of `torch.Tensor` of shapes
(`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
`(batch_size, 1, num_input_channels)`)
"""
denominator = observed_indicator.sum(self.dim, keepdim=self.keepdim)
denominator = denominator.clamp_min(1.0)
loc = (data * observed_indicator).sum(self.dim, keepdim=self.keepdim) / denominator
variance = (((data - loc) * observed_indicator) ** 2).sum(self.dim, keepdim=self.keepdim) / denominator
scale = torch.sqrt(variance + self.minimum_scale)
return ((data - loc) / scale, loc, scale)
|
class PatchTSMixerStdScaler(nn.Module):
'''
Standardize features by calculating the mean and scaling along the first dimension, and then normalizes it by
subtracting from the mean and dividing by the standard deviation.
'''
def __init__(self, config: PatchTSMixerConfig):
pass
def forward(self, data: torch.Tensor, observed_indicator: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
'''
Parameters:
data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
input for Batch norm calculation
observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`):
Calculating the scale on the observed indicator.
Returns:
tuple of `torch.Tensor` of shapes
(`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
`(batch_size, 1, num_input_channels)`)
'''
pass
| 3
| 2
| 13
| 1
| 7
| 6
| 3
| 1
| 1
| 3
| 1
| 0
| 2
| 3
| 2
| 12
| 33
| 3
| 15
| 12
| 10
| 15
| 13
| 10
| 10
| 4
| 1
| 0
| 5
|
4,417
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtst/configuration_patchtst.py
|
transformers.models.patchtst.configuration_patchtst.PatchTSTConfig
|
from transformers.configuration_utils import PretrainedConfig
from typing import Optional, Union
class PatchTSTConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of an [`PatchTSTModel`]. It is used to instantiate an
PatchTST model according to the specified arguments, defining the model architecture.
[ibm/patchtst](https://huggingface.co/ibm/patchtst) architecture.
Configuration objects inherit from [`PretrainedConfig`] can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
num_input_channels (`int`, *optional*, defaults to 1):
The size of the target variable which by default is 1 for univariate targets. Would be > 1 in case of
multivariate targets.
context_length (`int`, *optional*, defaults to 32):
The context length of the input sequence.
distribution_output (`str`, *optional*, defaults to `"student_t"`):
The distribution emission head for the model when loss is "nll". Could be either "student_t", "normal" or
"negative_binomial".
loss (`str`, *optional*, defaults to `"mse"`):
The loss function for the model corresponding to the `distribution_output` head. For parametric
distributions it is the negative log likelihood ("nll") and for point estimates it is the mean squared
error "mse".
patch_length (`int`, *optional*, defaults to 1):
Define the patch length of the patchification process.
patch_stride (`int`, *optional*, defaults to 1):
Define the stride of the patchification process.
num_hidden_layers (`int`, *optional*, defaults to 3):
Number of hidden layers.
d_model (`int`, *optional*, defaults to 128):
Dimensionality of the transformer layers.
num_attention_heads (`int`, *optional*, defaults to 4):
Number of attention heads for each attention layer in the Transformer encoder.
share_embedding (`bool`, *optional*, defaults to `True`):
Sharing the input embedding across all channels.
channel_attention (`bool`, *optional*, defaults to `False`):
Activate channel attention block in the Transformer to allow channels to attend each other.
ffn_dim (`int`, *optional*, defaults to 512):
Dimension of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
norm_type (`str` , *optional*, defaults to `"batchnorm"`):
Normalization at each Transformer layer. Can be `"batchnorm"` or `"layernorm"`.
norm_eps (`float`, *optional*, defaults to 1e-05):
A value added to the denominator for numerical stability of normalization.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for the attention probabilities.
positional_dropout (`float`, *optional*, defaults to 0.0):
The dropout probability in the positional embedding layer.
path_dropout (`float`, *optional*, defaults to 0.0):
The dropout path in the residual block.
ff_dropout (`float`, *optional*, defaults to 0.0):
The dropout probability used between the two layers of the feed-forward networks.
bias (`bool`, *optional*, defaults to `True`):
Whether to add bias in the feed-forward networks.
activation_function (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function (string) in the Transformer.`"gelu"` and `"relu"` are supported.
pre_norm (`bool`, *optional*, defaults to `True`):
Normalization is applied before self-attention if pre_norm is set to `True`. Otherwise, normalization is
applied after residual block.
positional_encoding_type (`str`, *optional*, defaults to `"sincos"`):
Positional encodings. Options `"random"` and `"sincos"` are supported.
use_cls_token (`bool`, *optional*, defaults to `False`):
Whether cls token is used.
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated normal weight initialization distribution.
share_projection (`bool`, *optional*, defaults to `True`):
Sharing the projection layer across different channels in the forecast head.
scaling (`Union`, *optional*, defaults to `"std"`):
Whether to scale the input targets via "mean" scaler, "std" scaler or no scaler if `None`. If `True`, the
scaler is set to "mean".
do_mask_input (`bool`, *optional*):
Apply masking during the pretraining.
mask_type (`str`, *optional*, defaults to `"random"`):
Masking type. Only `"random"` and `"forecast"` are currently supported.
random_mask_ratio (`float`, *optional*, defaults to 0.5):
Masking ratio applied to mask the input data during random pretraining.
num_forecast_mask_patches (`int` or `list`, *optional*, defaults to `[2]`):
Number of patches to be masked at the end of each batch sample. If it is an integer,
all the samples in the batch will have the same number of masked patches. If it is a list,
samples in the batch will be randomly masked by numbers defined in the list. This argument is only used
for forecast pretraining.
channel_consistent_masking (`bool`, *optional*, defaults to `False`):
If channel consistent masking is True, all the channels will have the same masking pattern.
unmasked_channel_indices (`list`, *optional*):
Indices of channels that are not masked during pretraining. Values in the list are number between 1 and
`num_input_channels`
mask_value (`int`, *optional*, defaults to 0):
Values in the masked patches will be filled by `mask_value`.
pooling_type (`str`, *optional*, defaults to `"mean"`):
Pooling of the embedding. `"mean"`, `"max"` and `None` are supported.
head_dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for head.
prediction_length (`int`, *optional*, defaults to 24):
The prediction horizon that the model will output.
num_targets (`int`, *optional*, defaults to 1):
Number of targets for regression and classification tasks. For classification, it is the number of
classes.
output_range (`list`, *optional*):
Output range for regression task. The range of output values can be set to enforce the model to produce
values within a range.
num_parallel_samples (`int`, *optional*, defaults to 100):
The number of samples is generated in parallel for probabilistic prediction.
```python
>>> from transformers import PatchTSTConfig, PatchTSTModel
>>> # Initializing an PatchTST configuration with 12 time steps for prediction
>>> configuration = PatchTSTConfig(prediction_length=12)
>>> # Randomly initializing a model (with random weights) from the configuration
>>> model = PatchTSTModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'patchtst'
attribute_map = {'hidden_size': 'd_model', 'num_attention_heads': 'num_attention_heads', 'num_hidden_layers': 'num_hidden_layers'}
def __init__(self, num_input_channels: int=1, context_length: int=32, distribution_output: str='student_t', loss: str='mse', patch_length: int=1, patch_stride: int=1, num_hidden_layers: int=3, d_model: int=128, num_attention_heads: int=4, share_embedding: bool=True, channel_attention: bool=False, ffn_dim: int=512, norm_type: str='batchnorm', norm_eps: float=1e-05, attention_dropout: float=0.0, positional_dropout: float=0.0, path_dropout: float=0.0, ff_dropout: float=0.0, bias: bool=True, activation_function: str='gelu', pre_norm: bool=True, positional_encoding_type: str='sincos', use_cls_token: bool=False, init_std: float=0.02, share_projection: bool=True, scaling: Optional[Union[str, bool]]='std', do_mask_input: Optional[bool]=None, mask_type: str='random', random_mask_ratio: float=0.5, num_forecast_mask_patches: Optional[Union[list[int], int]]=[2], channel_consistent_masking: Optional[bool]=False, unmasked_channel_indices: Optional[list[int]]=None, mask_value: int=0, pooling_type: str='mean', head_dropout: float=0.0, prediction_length: int=24, num_targets: int=1, output_range: Optional[list]=None, num_parallel_samples: int=100, **kwargs):
self.context_length = context_length
self.num_input_channels = num_input_channels
self.loss = loss
self.distribution_output = distribution_output
self.num_parallel_samples = num_parallel_samples
self.d_model = d_model
self.num_attention_heads = num_attention_heads
self.ffn_dim = ffn_dim
self.num_hidden_layers = num_hidden_layers
self.attention_dropout = attention_dropout
self.share_embedding = share_embedding
self.channel_attention = channel_attention
self.norm_type = norm_type
self.norm_eps = norm_eps
self.positional_dropout = positional_dropout
self.path_dropout = path_dropout
self.ff_dropout = ff_dropout
self.bias = bias
self.activation_function = activation_function
self.pre_norm = pre_norm
self.positional_encoding_type = positional_encoding_type
self.use_cls_token = use_cls_token
self.init_std = init_std
self.scaling = scaling
self.patch_length = patch_length
self.patch_stride = patch_stride
self.do_mask_input = do_mask_input
self.mask_type = mask_type
self.random_mask_ratio = random_mask_ratio
self.num_forecast_mask_patches = num_forecast_mask_patches
self.channel_consistent_masking = channel_consistent_masking
self.unmasked_channel_indices = unmasked_channel_indices
self.mask_value = mask_value
self.pooling_type = pooling_type
self.head_dropout = head_dropout
self.share_projection = share_projection
self.prediction_length = prediction_length
self.num_parallel_samples = num_parallel_samples
self.num_targets = num_targets
self.output_range = output_range
super().__init__(**kwargs)
|
class PatchTSTConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of an [`PatchTSTModel`]. It is used to instantiate an
PatchTST model according to the specified arguments, defining the model architecture.
[ibm/patchtst](https://huggingface.co/ibm/patchtst) architecture.
Configuration objects inherit from [`PretrainedConfig`] can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
num_input_channels (`int`, *optional*, defaults to 1):
The size of the target variable which by default is 1 for univariate targets. Would be > 1 in case of
multivariate targets.
context_length (`int`, *optional*, defaults to 32):
The context length of the input sequence.
distribution_output (`str`, *optional*, defaults to `"student_t"`):
The distribution emission head for the model when loss is "nll". Could be either "student_t", "normal" or
"negative_binomial".
loss (`str`, *optional*, defaults to `"mse"`):
The loss function for the model corresponding to the `distribution_output` head. For parametric
distributions it is the negative log likelihood ("nll") and for point estimates it is the mean squared
error "mse".
patch_length (`int`, *optional*, defaults to 1):
Define the patch length of the patchification process.
patch_stride (`int`, *optional*, defaults to 1):
Define the stride of the patchification process.
num_hidden_layers (`int`, *optional*, defaults to 3):
Number of hidden layers.
d_model (`int`, *optional*, defaults to 128):
Dimensionality of the transformer layers.
num_attention_heads (`int`, *optional*, defaults to 4):
Number of attention heads for each attention layer in the Transformer encoder.
share_embedding (`bool`, *optional*, defaults to `True`):
Sharing the input embedding across all channels.
channel_attention (`bool`, *optional*, defaults to `False`):
Activate channel attention block in the Transformer to allow channels to attend each other.
ffn_dim (`int`, *optional*, defaults to 512):
Dimension of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
norm_type (`str` , *optional*, defaults to `"batchnorm"`):
Normalization at each Transformer layer. Can be `"batchnorm"` or `"layernorm"`.
norm_eps (`float`, *optional*, defaults to 1e-05):
A value added to the denominator for numerical stability of normalization.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for the attention probabilities.
positional_dropout (`float`, *optional*, defaults to 0.0):
The dropout probability in the positional embedding layer.
path_dropout (`float`, *optional*, defaults to 0.0):
The dropout path in the residual block.
ff_dropout (`float`, *optional*, defaults to 0.0):
The dropout probability used between the two layers of the feed-forward networks.
bias (`bool`, *optional*, defaults to `True`):
Whether to add bias in the feed-forward networks.
activation_function (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function (string) in the Transformer.`"gelu"` and `"relu"` are supported.
pre_norm (`bool`, *optional*, defaults to `True`):
Normalization is applied before self-attention if pre_norm is set to `True`. Otherwise, normalization is
applied after residual block.
positional_encoding_type (`str`, *optional*, defaults to `"sincos"`):
Positional encodings. Options `"random"` and `"sincos"` are supported.
use_cls_token (`bool`, *optional*, defaults to `False`):
Whether cls token is used.
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated normal weight initialization distribution.
share_projection (`bool`, *optional*, defaults to `True`):
Sharing the projection layer across different channels in the forecast head.
scaling (`Union`, *optional*, defaults to `"std"`):
Whether to scale the input targets via "mean" scaler, "std" scaler or no scaler if `None`. If `True`, the
scaler is set to "mean".
do_mask_input (`bool`, *optional*):
Apply masking during the pretraining.
mask_type (`str`, *optional*, defaults to `"random"`):
Masking type. Only `"random"` and `"forecast"` are currently supported.
random_mask_ratio (`float`, *optional*, defaults to 0.5):
Masking ratio applied to mask the input data during random pretraining.
num_forecast_mask_patches (`int` or `list`, *optional*, defaults to `[2]`):
Number of patches to be masked at the end of each batch sample. If it is an integer,
all the samples in the batch will have the same number of masked patches. If it is a list,
samples in the batch will be randomly masked by numbers defined in the list. This argument is only used
for forecast pretraining.
channel_consistent_masking (`bool`, *optional*, defaults to `False`):
If channel consistent masking is True, all the channels will have the same masking pattern.
unmasked_channel_indices (`list`, *optional*):
Indices of channels that are not masked during pretraining. Values in the list are number between 1 and
`num_input_channels`
mask_value (`int`, *optional*, defaults to 0):
Values in the masked patches will be filled by `mask_value`.
pooling_type (`str`, *optional*, defaults to `"mean"`):
Pooling of the embedding. `"mean"`, `"max"` and `None` are supported.
head_dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for head.
prediction_length (`int`, *optional*, defaults to 24):
The prediction horizon that the model will output.
num_targets (`int`, *optional*, defaults to 1):
Number of targets for regression and classification tasks. For classification, it is the number of
classes.
output_range (`list`, *optional*):
Output range for regression task. The range of output values can be set to enforce the model to produce
values within a range.
num_parallel_samples (`int`, *optional*, defaults to 100):
The number of samples is generated in parallel for probabilistic prediction.
```python
>>> from transformers import PatchTSTConfig, PatchTSTModel
>>> # Initializing an PatchTST configuration with 12 time steps for prediction
>>> configuration = PatchTSTConfig(prediction_length=12)
>>> # Randomly initializing a model (with random weights) from the configuration
>>> model = PatchTSTModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, num_input_channels: int=1, context_length: int=32, distribution_output: str='student_t', loss: str='mse', patch_length: int=1, patch_stride: int=1, num_hidden_layers: int=3, d_model: int=128, num_attention_heads: int=4, share_embedding: bool=True, channel_attention: bool=False, ffn_dim: int=512, norm_type: str='batchnorm', norm_eps: float=1e-05, attention_dropout: float=0.0, positional_dropout: float=0.0, path_dropout: float=0.0, ff_dropout: float=0.0, bias: bool=True, activation_function: str='gelu', pre_norm: bool=True, positional_encoding_type: str='sincos', use_cls_token: bool=False, init_std: float=0.02, share_projection: bool=True, scaling: Optional[Union[str, bool]]='std', do_mask_input: Optional[bool]=None, mask_type: str='random', random_mask_ratio: float=0.5, num_forecast_mask_patches: Optional[Union[list[int], int]]=[2], channel_consistent_masking: Optional[bool]=False, unmasked_channel_indices: Optional[list[int]]=None, mask_value: int=0, pooling_type: str='mean', head_dropout: float=0.0, prediction_length: int=24, num_targets: int=1, output_range: Optional[list]=None, num_parallel_samples: int=100, **kwargs):
pass
| 2
| 1
| 106
| 8
| 84
| 17
| 1
| 1.35
| 1
| 5
| 0
| 0
| 1
| 39
| 1
| 33
| 228
| 17
| 91
| 85
| 47
| 123
| 45
| 43
| 43
| 1
| 2
| 0
| 1
|
4,418
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtst/modeling_patchtst.py
|
transformers.models.patchtst.modeling_patchtst.PatchTSTAttention
|
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from typing import Callable, Optional, Union
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
import torch
from .configuration_patchtst import PatchTSTConfig
from torch import nn
from ...processing_utils import Unpack
class PatchTSTAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, is_causal: bool=False, config: Optional[PatchTSTConfig]=None):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
self.config = config
if self.head_dim * num_heads != self.embed_dim:
raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).')
self.scaling = self.head_dim ** (-0.5)
self.is_decoder = is_decoder
self.is_causal = is_causal
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
is_cross_attention = key_value_states is not None
bsz, tgt_len = hidden_states.shape[:-1]
src_len = key_value_states.shape[1] if is_cross_attention else tgt_len
q_input_shape = (bsz, tgt_len, -1, self.head_dim)
kv_input_shape = (bsz, src_len, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(*q_input_shape).transpose(1, 2)
current_states = key_value_states if is_cross_attention else hidden_states
key_states = self.k_proj(current_states).view(*kv_input_shape).transpose(1, 2)
value_states = self.v_proj(current_states).view(*kv_input_shape).transpose(1, 2)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.dropout, scaling=self.scaling, output_attentions=output_attentions, head_mask=layer_head_mask, **kwargs)
attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous()
attn_output = self.out_proj(attn_output)
return (attn_output, attn_weights, None)
|
class PatchTSTAttention(nn.Module):
'''Multi-headed attention from 'Attention Is All You Need' paper'''
def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, is_causal: bool=False, config: Optional[PatchTSTConfig]=None):
pass
def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
'''Input shape: Batch x Time x Channel'''
pass
| 3
| 2
| 50
| 7
| 35
| 8
| 5
| 0.24
| 1
| 7
| 1
| 0
| 3
| 12
| 3
| 13
| 156
| 23
| 107
| 44
| 86
| 26
| 68
| 27
| 64
| 12
| 1
| 2
| 15
|
4,419
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtst/modeling_patchtst.py
|
transformers.models.patchtst.modeling_patchtst.PatchTSTBatchNorm
|
import torch
from torch import nn
from .configuration_patchtst import PatchTSTConfig
class PatchTSTBatchNorm(nn.Module):
"""
Compute batch normalization over the sequence length (time) dimension.
"""
def __init__(self, config: PatchTSTConfig):
super().__init__()
self.batchnorm = nn.BatchNorm1d(config.d_model, eps=config.norm_eps)
def forward(self, inputs: torch.Tensor):
"""
Parameters:
inputs (`torch.Tensor` of shape `(batch_size, sequence_length, d_model)`):
input for Batch norm calculation
Returns:
`torch.Tensor` of shape `(batch_size, sequence_length, d_model)`
"""
output = inputs.transpose(1, 2)
output = self.batchnorm(output)
return output.transpose(1, 2)
|
class PatchTSTBatchNorm(nn.Module):
'''
Compute batch normalization over the sequence length (time) dimension.
'''
def __init__(self, config: PatchTSTConfig):
pass
def forward(self, inputs: torch.Tensor):
'''
Parameters:
inputs (`torch.Tensor` of shape `(batch_size, sequence_length, d_model)`):
input for Batch norm calculation
Returns:
`torch.Tensor` of shape `(batch_size, sequence_length, d_model)`
'''
pass
| 3
| 2
| 7
| 0
| 4
| 4
| 1
| 1.38
| 1
| 3
| 1
| 0
| 2
| 1
| 2
| 12
| 20
| 2
| 8
| 5
| 5
| 11
| 8
| 5
| 5
| 1
| 1
| 0
| 2
|
4,420
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtst/modeling_patchtst.py
|
transformers.models.patchtst.modeling_patchtst.PatchTSTClassificationHead
|
import torch
from torch import nn
from .configuration_patchtst import PatchTSTConfig
class PatchTSTClassificationHead(nn.Module):
def __init__(self, config: PatchTSTConfig):
super().__init__()
self.use_cls_token = config.use_cls_token
self.pooling_type = config.pooling_type
self.flatten = nn.Flatten(start_dim=1)
self.dropout = nn.Dropout(config.head_dropout) if config.head_dropout > 0 else nn.Identity()
self.linear = nn.Linear(config.num_input_channels * config.d_model, config.num_targets)
def forward(self, embedding: torch.Tensor):
"""
Parameters:
embedding (`torch.Tensor` of shape `(bs, num_channels, num_patches, d_model)` or
`(bs, num_channels, num_patches+1, d_model)` if `cls_token` is set to True, *required*):
Embedding from the model
Returns:
`torch.Tensor` of shape `(bs, num_targets)`
"""
if self.use_cls_token:
pooled_embedding = embedding[:, :, 0, :]
elif self.pooling_type == 'mean':
pooled_embedding = embedding.mean(dim=2)
elif self.pooling_type == 'max':
pooled_embedding = embedding.max(dim=2).values
else:
raise ValueError(f'pooling operator {self.pooling_type} is not implemented yet')
pooled_embedding = self.flatten(pooled_embedding)
output = self.linear(self.dropout(pooled_embedding))
return output
|
class PatchTSTClassificationHead(nn.Module):
def __init__(self, config: PatchTSTConfig):
pass
def forward(self, embedding: torch.Tensor):
'''
Parameters:
embedding (`torch.Tensor` of shape `(bs, num_channels, num_patches, d_model)` or
`(bs, num_channels, num_patches+1, d_model)` if `cls_token` is set to True, *required*):
Embedding from the model
Returns:
`torch.Tensor` of shape `(bs, num_targets)`
'''
pass
| 3
| 1
| 17
| 1
| 10
| 7
| 3
| 0.65
| 1
| 4
| 1
| 0
| 2
| 5
| 2
| 12
| 35
| 2
| 20
| 10
| 17
| 13
| 17
| 10
| 14
| 4
| 1
| 1
| 6
|
4,421
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtst/modeling_patchtst.py
|
transformers.models.patchtst.modeling_patchtst.PatchTSTEmbedding
|
from torch import nn
import torch
from .configuration_patchtst import PatchTSTConfig
class PatchTSTEmbedding(nn.Module):
def __init__(self, config: PatchTSTConfig):
super().__init__()
self.num_input_channels = config.num_input_channels
self.share_embedding = config.share_embedding
if self.share_embedding:
self.input_embedding = nn.Linear(config.patch_length, config.d_model)
else:
self.input_embedding = nn.ModuleList()
for _ in range(config.num_input_channels):
self.input_embedding.append(nn.Linear(config.patch_length, config.d_model))
def forward(self, patch_input: torch.Tensor):
"""
Parameters:
patch_input (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`, *required*):
Patch input for embedding
return:
`torch.Tensor` of shape `(batch_size, num_channels, num_patches, d_model)`
"""
num_input_channels = patch_input.shape[1]
if num_input_channels != self.num_input_channels:
raise ValueError(f'The defined number of input channels ({self.num_input_channels}) in the config has to be the same as the number of channels in the batch input ({num_input_channels})')
if self.share_embedding:
embeddings = self.input_embedding(patch_input)
else:
embeddings = [self.input_embedding[i](patch_input[:, i, :, :]) for i in range(num_input_channels)]
embeddings = torch.stack(embeddings, dim=1)
return embeddings
|
class PatchTSTEmbedding(nn.Module):
def __init__(self, config: PatchTSTConfig):
pass
def forward(self, patch_input: torch.Tensor):
'''
Parameters:
patch_input (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`, *required*):
Patch input for embedding
return:
`torch.Tensor` of shape `(batch_size, num_channels, num_patches, d_model)`
'''
pass
| 3
| 1
| 16
| 0
| 12
| 5
| 3
| 0.42
| 1
| 5
| 1
| 0
| 2
| 3
| 2
| 12
| 34
| 1
| 24
| 9
| 21
| 10
| 19
| 9
| 16
| 3
| 1
| 2
| 6
|
4,422
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtst/modeling_patchtst.py
|
transformers.models.patchtst.modeling_patchtst.PatchTSTEncoder
|
from torch import nn
from .configuration_patchtst import PatchTSTConfig
import torch
from ...modeling_outputs import BaseModelOutput
from typing import Callable, Optional, Union
class PatchTSTEncoder(PatchTSTPreTrainedModel):
"""
PatchTST Encoder
"""
def __init__(self, config: PatchTSTConfig, num_patches: int):
super().__init__(config)
self.gradient_checkpointing = False
self.embedder = PatchTSTEmbedding(config)
self.positional_encoder = PatchTSTPositionalEncoding(config, num_patches)
self.layers = nn.ModuleList([PatchTSTEncoderLayer(config) for i in range(config.num_hidden_layers)])
self.post_init()
def forward(self, patch_input: torch.Tensor, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None) -> BaseModelOutput:
"""
Parameters:
patch_input (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`, *required*):
Past values of the time series
output_hidden_states (bool, optional): Indicates if hidden states should be outputted.
output_attentions (bool, optional): Indicates if attentions should be outputted.
return:
`BaseModelOutput`
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
patch_input = self.embedder(patch_input)
hidden_state = self.positional_encoder(patch_input)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for encoder_layer in self.layers:
if output_hidden_states:
encoder_states = encoder_states + (hidden_state,)
layer_outputs = encoder_layer(hidden_state=hidden_state, output_attentions=output_attentions)
hidden_state = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
return BaseModelOutput(last_hidden_state=hidden_state, hidden_states=encoder_states, attentions=all_attentions)
|
class PatchTSTEncoder(PatchTSTPreTrainedModel):
'''
PatchTST Encoder
'''
def __init__(self, config: PatchTSTConfig, num_patches: int):
pass
def forward(self, patch_input: torch.Tensor, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None) -> BaseModelOutput:
'''
Parameters:
patch_input (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`, *required*):
Past values of the time series
output_hidden_states (bool, optional): Indicates if hidden states should be outputted.
output_attentions (bool, optional): Indicates if attentions should be outputted.
return:
`BaseModelOutput`
'''
pass
| 3
| 2
| 27
| 3
| 15
| 10
| 5
| 0.73
| 1
| 10
| 5
| 0
| 2
| 4
| 2
| 4
| 60
| 8
| 30
| 17
| 22
| 22
| 23
| 12
| 20
| 8
| 2
| 2
| 9
|
4,423
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtst/modeling_patchtst.py
|
transformers.models.patchtst.modeling_patchtst.PatchTSTEncoderLayer
|
from torch import nn
from .configuration_patchtst import PatchTSTConfig
import torch
from typing import Callable, Optional, Union
from ...activations import ACT2CLS
class PatchTSTEncoderLayer(nn.Module):
"""
PatchTST encoder layer
"""
def __init__(self, config: PatchTSTConfig):
super().__init__()
self.channel_attention = config.channel_attention
self.self_attn = PatchTSTAttention(embed_dim=config.d_model, num_heads=config.num_attention_heads, dropout=config.attention_dropout, config=config)
self.dropout_path1 = nn.Dropout(config.path_dropout) if config.path_dropout > 0 else nn.Identity()
if config.norm_type == 'batchnorm':
self.norm_sublayer1 = PatchTSTBatchNorm(config)
elif config.norm_type == 'layernorm':
self.norm_sublayer1 = nn.LayerNorm(config.d_model, eps=config.norm_eps)
else:
raise ValueError(f'{config.norm_type} is not a supported norm layer type.')
if self.channel_attention:
self.dropout_path2 = nn.Dropout(config.path_dropout) if config.path_dropout > 0 else nn.Identity()
if config.norm_type == 'batchnorm':
self.norm_sublayer2 = PatchTSTBatchNorm(config)
elif config.norm_type == 'layernorm':
self.norm_sublayer2 = nn.LayerNorm(config.d_model, eps=config.norm_eps)
else:
raise ValueError(f'{config.norm_type} is not a supported norm layer type.')
self.ff = nn.Sequential(nn.Linear(config.d_model, config.ffn_dim, bias=config.bias), ACT2CLS[config.activation_function](), nn.Dropout(config.ff_dropout) if config.ff_dropout > 0 else nn.Identity(), nn.Linear(config.ffn_dim, config.d_model, bias=config.bias))
self.dropout_path3 = nn.Dropout(config.path_dropout) if config.path_dropout > 0 else nn.Identity()
if config.norm_type == 'batchnorm':
self.norm_sublayer3 = PatchTSTBatchNorm(config)
elif config.norm_type == 'layernorm':
self.norm_sublayer3 = nn.LayerNorm(config.d_model, eps=config.norm_eps)
else:
raise ValueError(f'{config.norm_type} is not a supported norm layer type.')
self.pre_norm = config.pre_norm
def forward(self, hidden_state: torch.Tensor, output_attentions: Optional[bool]=None):
"""
Parameters:
hidden_state (`torch.Tensor` of shape `(batch_size, num_channels, sequence_length, d_model)`, *required*):
Past values of the time series
output_attentions (`bool`, *optional*):
Whether or not to return the output attention of all layers
Return:
`torch.Tensor` of shape `(batch_size, num_channels, sequence_length, d_model)`
"""
batch_size, num_input_channels, sequence_length, d_model = hidden_state.shape
hidden_state = hidden_state.view(batch_size * num_input_channels, sequence_length, d_model)
if self.pre_norm:
attn_output, attn_weights, _ = self.self_attn(hidden_states=self.norm_sublayer1(hidden_state), output_attentions=output_attentions)
hidden_state = hidden_state + self.dropout_path1(attn_output)
else:
attn_output, attn_weights, _ = self.self_attn(hidden_states=hidden_state, output_attentions=output_attentions)
hidden_state = self.norm_sublayer1(hidden_state + self.dropout_path1(attn_output))
hidden_state = hidden_state.reshape(batch_size, num_input_channels, sequence_length, d_model)
if self.channel_attention:
hidden_state = hidden_state.transpose(2, 1).contiguous()
hidden_state = hidden_state.view(batch_size * sequence_length, num_input_channels, d_model)
if self.pre_norm:
attn_output, channel_attn_weights, _ = self.self_attn(hidden_states=self.norm_sublayer2(hidden_state), output_attentions=output_attentions)
hidden_state = hidden_state + self.dropout_path2(attn_output)
else:
attn_output, channel_attn_weights, _ = self.self_attn(hidden_states=hidden_state, output_attentions=output_attentions)
hidden_state = self.norm_sublayer2(hidden_state + self.dropout_path2(attn_output))
hidden_state = hidden_state.reshape(batch_size, sequence_length, num_input_channels, d_model)
hidden_state = hidden_state.transpose(1, 2).contiguous()
hidden_state = hidden_state.view(batch_size * num_input_channels, sequence_length, d_model)
if self.pre_norm:
hidden_state = hidden_state + self.dropout_path3(self.ff(self.norm_sublayer3(hidden_state)))
else:
hidden_state = self.norm_sublayer3(hidden_state + self.dropout_path3(self.ff(hidden_state)))
hidden_state = hidden_state.reshape(batch_size, num_input_channels, sequence_length, d_model)
outputs = (hidden_state,)
if output_attentions:
outputs += (attn_weights, channel_attn_weights) if self.channel_attention else (attn_weights,)
return outputs
|
class PatchTSTEncoderLayer(nn.Module):
'''
PatchTST encoder layer
'''
def __init__(self, config: PatchTSTConfig):
pass
def forward(self, hidden_state: torch.Tensor, output_attentions: Optional[bool]=None):
'''
Parameters:
hidden_state (`torch.Tensor` of shape `(batch_size, num_channels, sequence_length, d_model)`, *required*):
Past values of the time series
output_attentions (`bool`, *optional*):
Whether or not to return the output attention of all layers
Return:
`torch.Tensor` of shape `(batch_size, num_channels, sequence_length, d_model)`
'''
pass
| 3
| 2
| 65
| 8
| 38
| 19
| 10
| 0.53
| 1
| 7
| 3
| 0
| 2
| 10
| 2
| 12
| 136
| 18
| 77
| 17
| 74
| 41
| 51
| 17
| 48
| 12
| 1
| 2
| 19
|
4,424
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtst/modeling_patchtst.py
|
transformers.models.patchtst.modeling_patchtst.PatchTSTForClassification
|
from torch import nn
from .configuration_patchtst import PatchTSTConfig
import torch
from ...utils import ModelOutput, auto_docstring, logging
from typing import Callable, Optional, Union
@auto_docstring(custom_intro='\n The PatchTST for classification model.\n ')
class PatchTSTForClassification(PatchTSTPreTrainedModel):
def __init__(self, config: PatchTSTConfig):
super().__init__(config)
if config.do_mask_input:
logger.warning('Setting `do_mask_input` parameter to False.')
config.do_mask_input = False
self.model = PatchTSTModel(config)
self.head = PatchTSTClassificationHead(config)
self.post_init()
@auto_docstring
def forward(self, past_values: torch.Tensor, target_values: Optional[torch.Tensor]=None, past_observed_mask: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, PatchTSTForClassificationOutput]:
"""
past_values (`torch.Tensor` of shape `(bs, sequence_length, num_input_channels)`, *required*):
Input sequence to the model
target_values (`torch.Tensor`, *optional*):
Labels associates with the `past_values`
past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected
in `[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
Examples:
```python
>>> from transformers import PatchTSTConfig, PatchTSTForClassification
>>> # classification task with two input channel2 and 3 classes
>>> config = PatchTSTConfig(
... num_input_channels=2,
... num_targets=3,
... context_length=512,
... patch_length=12,
... stride=12,
... use_cls_token=True,
... )
>>> model = PatchTSTForClassification(config=config)
>>> # during inference, one only provides past values
>>> past_values = torch.randn(20, 512, 2)
>>> outputs = model(past_values=past_values)
>>> labels = outputs.prediction_logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
model_output = self.model(past_values=past_values, past_observed_mask=past_observed_mask, output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=True)
y_hat = self.head(model_output.last_hidden_state)
loss_val = None
if target_values is not None:
loss = nn.CrossEntropyLoss()
loss_val = loss(y_hat, target_values)
if not return_dict:
outputs = (y_hat,) + model_output[1:-3]
outputs = (loss_val,) + outputs if loss_val is not None else outputs
return outputs
return PatchTSTForClassificationOutput(loss=loss_val, prediction_logits=y_hat, hidden_states=model_output.hidden_states, attentions=model_output.attentions)
|
@auto_docstring(custom_intro='\n The PatchTST for classification model.\n ')
class PatchTSTForClassification(PatchTSTPreTrainedModel):
def __init__(self, config: PatchTSTConfig):
pass
@auto_docstring
def forward(self, past_values: torch.Tensor, target_values: Optional[torch.Tensor]=None, past_observed_mask: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, PatchTSTForClassificationOutput]:
'''
past_values (`torch.Tensor` of shape `(bs, sequence_length, num_input_channels)`, *required*):
Input sequence to the model
target_values (`torch.Tensor`, *optional*):
Labels associates with the `past_values`
past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected
in `[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
Examples:
```python
>>> from transformers import PatchTSTConfig, PatchTSTForClassification
>>> # classification task with two input channel2 and 3 classes
>>> config = PatchTSTConfig(
... num_input_channels=2,
... num_targets=3,
... context_length=512,
... patch_length=12,
... stride=12,
... use_cls_token=True,
... )
>>> model = PatchTSTForClassification(config=config)
>>> # during inference, one only provides past values
>>> past_values = torch.randn(20, 512, 2)
>>> outputs = model(past_values=past_values)
>>> labels = outputs.prediction_logits
```'''
pass
| 5
| 1
| 47
| 7
| 20
| 20
| 4
| 0.98
| 1
| 8
| 4
| 0
| 2
| 2
| 2
| 4
| 95
| 14
| 41
| 18
| 30
| 40
| 22
| 10
| 19
| 5
| 2
| 1
| 7
|
4,425
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtst/modeling_patchtst.py
|
transformers.models.patchtst.modeling_patchtst.PatchTSTForClassificationOutput
|
from ...utils import ModelOutput, auto_docstring, logging
import torch
from dataclasses import dataclass
from typing import Callable, Optional, Union
@dataclass
@auto_docstring(custom_intro='\n Output type of [`PatchTSTForClassification`].\n ')
class PatchTSTForClassificationOutput(ModelOutput):
"""
loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction
(classification) loss.
prediction_logits (`torch.FloatTensor` of shape `(batch_size, num_targets)`):
Prediction scores of the PatchTST modeling head (scores before SoftMax).
"""
loss: Optional[torch.FloatTensor] = None
prediction_logits: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
|
@dataclass
@auto_docstring(custom_intro='\n Output type of [`PatchTSTForClassification`].\n ')
class PatchTSTForClassificationOutput(ModelOutput):
'''
loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction
(classification) loss.
prediction_logits (`torch.FloatTensor` of shape `(batch_size, num_targets)`):
Prediction scores of the PatchTST modeling head (scores before SoftMax).
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 27
| 4
| 5
| 5
| 4
| 18
| 5
| 5
| 4
| 0
| 1
| 0
| 0
|
4,426
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtst/modeling_patchtst.py
|
transformers.models.patchtst.modeling_patchtst.PatchTSTForPrediction
|
from ...time_series_utils import NegativeBinomialOutput, NormalOutput, StudentTOutput
from torch import nn
from .configuration_patchtst import PatchTSTConfig
import torch
from ...utils import ModelOutput, auto_docstring, logging
from typing import Callable, Optional, Union
@auto_docstring(custom_intro='\n The PatchTST for prediction model.\n ')
class PatchTSTForPrediction(PatchTSTPreTrainedModel):
def __init__(self, config: PatchTSTConfig):
super().__init__(config)
if config.do_mask_input:
logger.warning('Setting `do_mask_input` parameter to False.')
config.do_mask_input = False
self.model = PatchTSTModel(config)
if config.loss == 'mse':
self.distribution_output = None
elif config.distribution_output == 'student_t':
self.distribution_output = StudentTOutput(dim=config.prediction_length)
elif config.distribution_output == 'normal':
self.distribution_output = NormalOutput(dim=config.prediction_length)
elif config.distribution_output == 'negative_binomial':
self.distribution_output = NegativeBinomialOutput(dim=config.prediction_length)
else:
raise ValueError(f'Unknown distribution output {config.distribution_output}')
self.head = PatchTSTPredictionHead(config, self.model.patchifier.num_patches, distribution_output=self.distribution_output)
self.post_init()
def forward(self, past_values: torch.Tensor, past_observed_mask: Optional[torch.Tensor]=None, future_values: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, PatchTSTForPredictionOutput]:
"""
Parameters:
past_values (`torch.Tensor` of shape `(bs, sequence_length, num_input_channels)`, *required*):
Input sequence to the model
past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected
in `[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
future_values (`torch.Tensor` of shape `(bs, forecast_len, num_input_channels)`, *optional*):
Future target values associated with the `past_values`
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers
output_attentions (`bool`, *optional*):
Whether or not to return the output attention of all layers
return_dict (`bool`, *optional*):
Whether or not to return a `ModelOutput` instead of a plain tuple.
Returns:
`PatchTSTForPredictionOutput` or tuple of `torch.Tensor` (if `return_dict`=False or
`config.return_dict`=False)
Examples:
```python
>>> from huggingface_hub import hf_hub_download
>>> import torch
>>> from transformers import PatchTSTConfig, PatchTSTForPrediction
>>> file = hf_hub_download(
... repo_id="hf-internal-testing/etth1-hourly-batch", filename="train-batch.pt", repo_type="dataset"
... )
>>> batch = torch.load(file)
>>> # Prediction task with 7 input channels and prediction length is 96
>>> model = PatchTSTForPrediction.from_pretrained("namctin/patchtst_etth1_forecast")
>>> # during training, one provides both past and future values
>>> outputs = model(
... past_values=batch["past_values"],
... future_values=batch["future_values"],
... )
>>> loss = outputs.loss
>>> loss.backward()
>>> # during inference, one only provides past values, the model outputs future values
>>> outputs = model(past_values=batch["past_values"])
>>> prediction_outputs = outputs.prediction_outputs
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
model_output = self.model(past_values=past_values, past_observed_mask=past_observed_mask, output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=True)
y_hat = self.head(model_output.last_hidden_state)
loss_val = None
if self.distribution_output:
y_hat_out = y_hat
else:
y_hat_out = y_hat * model_output.scale + model_output.loc
if future_values is not None:
if self.distribution_output:
distribution = self.distribution_output.distribution(y_hat, loc=model_output.loc, scale=model_output.scale)
loss_val = nll(distribution, future_values)
loss_val = weighted_average(loss_val)
else:
loss = nn.MSELoss(reduction='mean')
loss_val = loss(y_hat_out, future_values)
loc = model_output.loc
scale = model_output.scale
if not return_dict:
outputs = (y_hat_out,) + model_output[1:-1]
outputs = (loss_val,) + outputs if loss_val is not None else outputs
return outputs
return PatchTSTForPredictionOutput(loss=loss_val, prediction_outputs=y_hat_out, hidden_states=model_output.hidden_states, attentions=model_output.attentions, loc=loc, scale=scale)
@torch.no_grad()
def generate(self, past_values: torch.Tensor, past_observed_mask: Optional[torch.Tensor]=None) -> SamplePatchTSTOutput:
"""
Generate sequences of sample predictions from a model with a probability distribution head.
Parameters:
past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_input_channels)`):
Past values of the time series that serves as context in order to predict the future.
past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected
in `[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
Return:
[`SamplePatchTSTOutput`] where the outputs `sequences` tensor will have shape `(batch_size, number of
samples, prediction_length, 1)` or `(batch_size, number of samples, prediction_length, num_input_channels)`
for multivariate predictions.
"""
num_parallel_samples = self.config.num_parallel_samples
outputs = self(past_values=past_values, future_values=None, past_observed_mask=past_observed_mask, output_hidden_states=False)
if self.distribution_output:
distribution = self.distribution_output.distribution(outputs.prediction_outputs, loc=outputs.loc, scale=outputs.scale)
samples = [distribution.sample() for _ in range(num_parallel_samples)]
samples = torch.stack(samples, dim=1)
else:
samples = outputs.prediction_outputs.unsqueeze(1)
return SamplePatchTSTOutput(sequences=samples)
|
@auto_docstring(custom_intro='\n The PatchTST for prediction model.\n ')
class PatchTSTForPrediction(PatchTSTPreTrainedModel):
def __init__(self, config: PatchTSTConfig):
pass
def forward(self, past_values: torch.Tensor, past_observed_mask: Optional[torch.Tensor]=None, future_values: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, PatchTSTForPredictionOutput]:
'''
Parameters:
past_values (`torch.Tensor` of shape `(bs, sequence_length, num_input_channels)`, *required*):
Input sequence to the model
past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected
in `[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
future_values (`torch.Tensor` of shape `(bs, forecast_len, num_input_channels)`, *optional*):
Future target values associated with the `past_values`
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers
output_attentions (`bool`, *optional*):
Whether or not to return the output attention of all layers
return_dict (`bool`, *optional*):
Whether or not to return a `ModelOutput` instead of a plain tuple.
Returns:
`PatchTSTForPredictionOutput` or tuple of `torch.Tensor` (if `return_dict`=False or
`config.return_dict`=False)
Examples:
```python
>>> from huggingface_hub import hf_hub_download
>>> import torch
>>> from transformers import PatchTSTConfig, PatchTSTForPrediction
>>> file = hf_hub_download(
... repo_id="hf-internal-testing/etth1-hourly-batch", filename="train-batch.pt", repo_type="dataset"
... )
>>> batch = torch.load(file)
>>> # Prediction task with 7 input channels and prediction length is 96
>>> model = PatchTSTForPrediction.from_pretrained("namctin/patchtst_etth1_forecast")
>>> # during training, one provides both past and future values
>>> outputs = model(
... past_values=batch["past_values"],
... future_values=batch["future_values"],
... )
>>> loss = outputs.loss
>>> loss.backward()
>>> # during inference, one only provides past values, the model outputs future values
>>> outputs = model(past_values=batch["past_values"])
>>> prediction_outputs = outputs.prediction_outputs
```'''
pass
@torch.no_grad()
def generate(self, past_values: torch.Tensor, past_observed_mask: Optional[torch.Tensor]=None) -> SamplePatchTSTOutput:
'''
Generate sequences of sample predictions from a model with a probability distribution head.
Parameters:
past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_input_channels)`):
Past values of the time series that serves as context in order to predict the future.
past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected
in `[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
Return:
[`SamplePatchTSTOutput`] where the outputs `sequences` tensor will have shape `(batch_size, number of
samples, prediction_length, 1)` or `(batch_size, number of samples, prediction_length, num_input_channels)`
for multivariate predictions.
'''
pass
| 6
| 2
| 61
| 9
| 30
| 22
| 5
| 0.74
| 1
| 13
| 8
| 0
| 3
| 3
| 3
| 5
| 185
| 28
| 90
| 32
| 74
| 67
| 47
| 20
| 43
| 7
| 2
| 2
| 15
|
4,427
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtst/modeling_patchtst.py
|
transformers.models.patchtst.modeling_patchtst.PatchTSTForPredictionOutput
|
from ...utils import ModelOutput, auto_docstring, logging
from dataclasses import dataclass
import torch
from typing import Callable, Optional, Union
@dataclass
@auto_docstring(custom_intro='\n Output type of [`PatchTSTForPrediction`].\n ')
class PatchTSTForPredictionOutput(ModelOutput):
"""
loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
MSE loss.
prediction_outputs (`torch.FloatTensor` of shape `(batch_size, prediction_length, -1)`):
Prediction outputs of the time series modeling heads.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
loc: (`torch.FloatTensor` of shape `(batch_size, 1, num_channels)`, *optional*)
Mean of the input data (batch_size, sequence_length, num_channels) over the sequence_length
scale: (`torch.FloatTensor` of shape `(batch_size, 1, num_channels)`, *optional*)
Std of the input data (batch_size, sequence_length, num_channels) over the sequence_length
"""
loss: Optional[torch.FloatTensor] = None
prediction_outputs: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
loc: Optional[torch.FloatTensor] = None
scale: Optional[torch.FloatTensor] = None
|
@dataclass
@auto_docstring(custom_intro='\n Output type of [`PatchTSTForPrediction`].\n ')
class PatchTSTForPredictionOutput(ModelOutput):
'''
loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
MSE loss.
prediction_outputs (`torch.FloatTensor` of shape `(batch_size, prediction_length, -1)`):
Prediction outputs of the time series modeling heads.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
loc: (`torch.FloatTensor` of shape `(batch_size, 1, num_channels)`, *optional*)
Mean of the input data (batch_size, sequence_length, num_channels) over the sequence_length
scale: (`torch.FloatTensor` of shape `(batch_size, 1, num_channels)`, *optional*)
Std of the input data (batch_size, sequence_length, num_channels) over the sequence_length
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 32
| 4
| 7
| 7
| 6
| 21
| 7
| 7
| 6
| 0
| 1
| 0
| 0
|
4,428
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtst/modeling_patchtst.py
|
transformers.models.patchtst.modeling_patchtst.PatchTSTForPretraining
|
from torch import nn
from .configuration_patchtst import PatchTSTConfig
import torch
from ...utils import ModelOutput, auto_docstring, logging
from typing import Callable, Optional, Union
@auto_docstring(custom_intro='\n The PatchTST for pretrain model.\n ')
class PatchTSTForPretraining(PatchTSTPreTrainedModel):
def __init__(self, config: PatchTSTConfig):
super().__init__(config)
config.do_mask_input = True
self.model = PatchTSTModel(config=config)
self.head = PatchTSTMaskPretrainHead(config)
self.post_init()
def forward(self, past_values: torch.Tensor, past_observed_mask: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, PatchTSTForPretrainingOutput]:
"""
Parameters:
past_values (`torch.Tensor` of shape `(bs, sequence_length, num_input_channels)`, *required*):
Input sequence to the model
past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected
in `[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers
output_attentions (`bool`, *optional*):
Whether or not to return the output attention of all layers
return_dict (`bool`, *optional*): Whether or not to return a `ModelOutput` instead of a plain tuple.
Returns:
`PatchTSTForPretrainingOutput` or tuple of `torch.Tensor` (if `return_dict`=False or
`config.return_dict`=False)
Examples:
```python
>>> from huggingface_hub import hf_hub_download
>>> import torch
>>> from transformers import PatchTSTConfig, PatchTSTForPretraining
>>> file = hf_hub_download(
... repo_id="hf-internal-testing/etth1-hourly-batch", filename="train-batch.pt", repo_type="dataset"
... )
>>> batch = torch.load(file)
>>> # Config for random mask pretraining
>>> config = PatchTSTConfig(
... num_input_channels=7,
... context_length=512,
... patch_length=12,
... stride=12,
... mask_type='random',
... random_mask_ratio=0.4,
... use_cls_token=True,
... )
>>> # Config for forecast mask pretraining
>>> config = PatchTSTConfig(
... num_input_channels=7,
... context_length=512,
... patch_length=12,
... stride=12,
... mask_type='forecast',
... num_forecast_mask_patches=5,
... use_cls_token=True,
... )
>>> model = PatchTSTForPretraining(config)
>>> # during training, one provides both past and future values
>>> outputs = model(past_values=batch["past_values"])
>>> loss = outputs.loss
>>> loss.backward()
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
model_output = self.model(past_values=past_values, past_observed_mask=past_observed_mask, output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=True)
x_hat = self.head(model_output.last_hidden_state)
loss = nn.MSELoss(reduction='none')
loss_val = loss(x_hat, model_output.patch_input)
masked_loss = (loss_val.mean(dim=-1) * model_output.mask).sum() / (model_output.mask.sum() + 1e-10)
encoder_states = model_output.hidden_states
if not return_dict:
outputs = (x_hat,) + model_output[1:-4]
outputs = (masked_loss,) + outputs if masked_loss is not None else outputs
return outputs
return PatchTSTForPretrainingOutput(loss=masked_loss, prediction_output=x_hat, hidden_states=encoder_states, attentions=model_output.attentions)
|
@auto_docstring(custom_intro='\n The PatchTST for pretrain model.\n ')
class PatchTSTForPretraining(PatchTSTPreTrainedModel):
def __init__(self, config: PatchTSTConfig):
pass
def forward(self, past_values: torch.Tensor, past_observed_mask: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, PatchTSTForPretrainingOutput]:
'''
Parameters:
past_values (`torch.Tensor` of shape `(bs, sequence_length, num_input_channels)`, *required*):
Input sequence to the model
past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected
in `[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers
output_attentions (`bool`, *optional*):
Whether or not to return the output attention of all layers
return_dict (`bool`, *optional*): Whether or not to return a `ModelOutput` instead of a plain tuple.
Returns:
`PatchTSTForPretrainingOutput` or tuple of `torch.Tensor` (if `return_dict`=False or
`config.return_dict`=False)
Examples:
```python
>>> from huggingface_hub import hf_hub_download
>>> import torch
>>> from transformers import PatchTSTConfig, PatchTSTForPretraining
>>> file = hf_hub_download(
... repo_id="hf-internal-testing/etth1-hourly-batch", filename="train-batch.pt", repo_type="dataset"
... )
>>> batch = torch.load(file)
>>> # Config for random mask pretraining
>>> config = PatchTSTConfig(
... num_input_channels=7,
... context_length=512,
... patch_length=12,
... stride=12,
... mask_type='random',
... random_mask_ratio=0.4,
... use_cls_token=True,
... )
>>> # Config for forecast mask pretraining
>>> config = PatchTSTConfig(
... num_input_channels=7,
... context_length=512,
... patch_length=12,
... stride=12,
... mask_type='forecast',
... num_forecast_mask_patches=5,
... use_cls_token=True,
... )
>>> model = PatchTSTForPretraining(config)
>>> # during training, one provides both past and future values
>>> outputs = model(past_values=batch["past_values"])
>>> loss = outputs.loss
>>> loss.backward()
```'''
pass
| 4
| 1
| 54
| 8
| 17
| 29
| 3
| 1.66
| 1
| 7
| 4
| 0
| 2
| 2
| 2
| 4
| 109
| 16
| 35
| 19
| 25
| 58
| 20
| 12
| 17
| 4
| 2
| 1
| 5
|
4,429
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtst/modeling_patchtst.py
|
transformers.models.patchtst.modeling_patchtst.PatchTSTForPretrainingOutput
|
from ...utils import ModelOutput, auto_docstring, logging
import torch
from dataclasses import dataclass
from typing import Callable, Optional, Union
@dataclass
@auto_docstring(custom_intro='\n Output type of [`PatchTSTForPretraining`].\n ')
class PatchTSTForPretrainingOutput(ModelOutput):
"""
loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
MSE loss.
prediction_output (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction outputs of the time series modeling heads.
"""
loss: Optional[torch.FloatTensor] = None
prediction_output: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
|
@dataclass
@auto_docstring(custom_intro='\n Output type of [`PatchTSTForPretraining`].\n ')
class PatchTSTForPretrainingOutput(ModelOutput):
'''
loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
MSE loss.
prediction_output (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction outputs of the time series modeling heads.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 26
| 4
| 5
| 5
| 4
| 17
| 5
| 5
| 4
| 0
| 1
| 0
| 0
|
4,430
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtst/modeling_patchtst.py
|
transformers.models.patchtst.modeling_patchtst.PatchTSTForRegression
|
from torch import nn
from .configuration_patchtst import PatchTSTConfig
import torch
from ...utils import ModelOutput, auto_docstring, logging
from typing import Callable, Optional, Union
from ...time_series_utils import NegativeBinomialOutput, NormalOutput, StudentTOutput
@auto_docstring(custom_intro='\n The PatchTST for regression model.\n ')
class PatchTSTForRegression(PatchTSTPreTrainedModel):
def __init__(self, config: PatchTSTConfig):
super().__init__(config)
if config.do_mask_input:
logger.warning('Setting `do_mask_input` parameter to False.')
config.do_mask_input = False
self.model = PatchTSTModel(config)
if config.loss == 'mse':
self.distribution_output = None
elif config.distribution_output == 'student_t':
self.distribution_output = StudentTOutput(dim=config.num_targets)
elif config.distribution_output == 'normal':
self.distribution_output = NormalOutput(dim=config.num_targets)
elif config.distribution_output == 'negative_binomial':
self.distribution_output = NegativeBinomialOutput(dim=config.num_targets)
else:
raise ValueError(f'Unknown distribution output {config.distribution_output}')
self.head = PatchTSTRegressionHead(config, self.distribution_output)
self.post_init()
@auto_docstring
def forward(self, past_values: torch.Tensor, target_values: Optional[torch.Tensor]=None, past_observed_mask: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, PatchTSTForRegressionOutput]:
"""
past_values (`torch.Tensor` of shape `(bs, sequence_length, num_input_channels)`, *required*):
Input sequence to the model
target_values (`torch.Tensor` of shape `(bs, num_input_channels)`):
Target values associates with the `past_values`
past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected
in `[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
Whether or not to return a `ModelOutput` instead of a plain tuple.
Examples:
```python
>>> from transformers import PatchTSTConfig, PatchTSTForRegression
>>> # Regression task with 6 input channels and regress 2 targets
>>> model = PatchTSTForRegression.from_pretrained("namctin/patchtst_etth1_regression")
>>> # during inference, one only provides past values, the model outputs future values
>>> past_values = torch.randn(20, 512, 6)
>>> outputs = model(past_values=past_values)
>>> regression_outputs = outputs.regression_outputs
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
model_output = self.model(past_values=past_values, past_observed_mask=past_observed_mask, output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=True)
y_hat = self.head(model_output.last_hidden_state)
loss = None
if target_values is not None:
if self.distribution_output:
distribution = self.distribution_output.distribution(y_hat)
y_hat = tuple((item.view(-1, self.config.num_targets) for item in y_hat))
loss = nll(distribution, target_values)
loss = weighted_average(loss)
else:
loss = nn.MSELoss(reduction='mean')
loss = loss(y_hat, target_values)
if not return_dict:
outputs = (y_hat,) + model_output[1:-3]
outputs = (loss,) + outputs if loss is not None else outputs
return outputs
return PatchTSTForRegressionOutput(loss=loss, regression_outputs=y_hat, hidden_states=model_output.hidden_states, attentions=model_output.attentions)
@torch.no_grad()
def generate(self, past_values: torch.Tensor, past_observed_mask: Optional[torch.Tensor]=None) -> SamplePatchTSTOutput:
"""
Generate sequences of sample predictions from a model with a probability distribution head.
Parameters:
past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_input_channels)`):
Past values of the time series that serves as context in order to predict the future.
past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected
in `[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
Return:
[`SamplePatchTSTOutput`] where the outputs `sequences` tensor will have shape `(batch_size, number of
samples, num_targets)`.
"""
num_parallel_samples = self.config.num_parallel_samples
outputs = self(past_values=past_values, target_values=None, past_observed_mask=past_observed_mask, output_hidden_states=False)
distribution = self.distribution_output.distribution(outputs.regression_outputs)
samples = [distribution.sample() for _ in range(num_parallel_samples)]
samples = torch.stack(samples, dim=1).view(-1, num_parallel_samples, self.config.num_targets)
return SamplePatchTSTOutput(sequences=samples)
|
@auto_docstring(custom_intro='\n The PatchTST for regression model.\n ')
class PatchTSTForRegression(PatchTSTPreTrainedModel):
def __init__(self, config: PatchTSTConfig):
pass
@auto_docstring
def forward(self, past_values: torch.Tensor, target_values: Optional[torch.Tensor]=None, past_observed_mask: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, PatchTSTForRegressionOutput]:
'''
past_values (`torch.Tensor` of shape `(bs, sequence_length, num_input_channels)`, *required*):
Input sequence to the model
target_values (`torch.Tensor` of shape `(bs, num_input_channels)`):
Target values associates with the `past_values`
past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected
in `[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
Whether or not to return a `ModelOutput` instead of a plain tuple.
Examples:
```python
>>> from transformers import PatchTSTConfig, PatchTSTForRegression
>>> # Regression task with 6 input channels and regress 2 targets
>>> model = PatchTSTForRegression.from_pretrained("namctin/patchtst_etth1_regression")
>>> # during inference, one only provides past values, the model outputs future values
>>> past_values = torch.randn(20, 512, 6)
>>> outputs = model(past_values=past_values)
>>> regression_outputs = outputs.regression_outputs
```'''
pass
@torch.no_grad()
def generate(self, past_values: torch.Tensor, past_observed_mask: Optional[torch.Tensor]=None) -> SamplePatchTSTOutput:
'''
Generate sequences of sample predictions from a model with a probability distribution head.
Parameters:
past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_input_channels)`):
Past values of the time series that serves as context in order to predict the future.
past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected
in `[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
Return:
[`SamplePatchTSTOutput`] where the outputs `sequences` tensor will have shape `(batch_size, number of
samples, num_targets)`.
'''
pass
| 7
| 2
| 49
| 6
| 24
| 18
| 4
| 0.74
| 1
| 14
| 8
| 0
| 3
| 3
| 3
| 5
| 150
| 21
| 74
| 28
| 58
| 55
| 41
| 16
| 37
| 6
| 2
| 2
| 13
|
4,431
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtst/modeling_patchtst.py
|
transformers.models.patchtst.modeling_patchtst.PatchTSTForRegressionOutput
|
from ...utils import ModelOutput, auto_docstring, logging
import torch
from dataclasses import dataclass
from typing import Callable, Optional, Union
@dataclass
@auto_docstring(custom_intro='\n Output type of [`PatchTSTForRegression`].\n ')
class PatchTSTForRegressionOutput(ModelOutput):
"""
loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
MSE loss.
regression_outputs (`torch.FloatTensor` of shape `(batch_size, num_targets)`):
Regression outputs of the time series modeling heads.
"""
loss: Optional[torch.FloatTensor] = None
regression_outputs: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
|
@dataclass
@auto_docstring(custom_intro='\n Output type of [`PatchTSTForRegression`].\n ')
class PatchTSTForRegressionOutput(ModelOutput):
'''
loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
MSE loss.
regression_outputs (`torch.FloatTensor` of shape `(batch_size, num_targets)`):
Regression outputs of the time series modeling heads.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 26
| 4
| 5
| 5
| 4
| 17
| 5
| 5
| 4
| 0
| 1
| 0
| 0
|
4,432
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtst/modeling_patchtst.py
|
transformers.models.patchtst.modeling_patchtst.PatchTSTMaskPretrainHead
|
from torch import nn
from .configuration_patchtst import PatchTSTConfig
import torch
class PatchTSTMaskPretrainHead(nn.Module):
"""
Pretraining head for mask modelling
"""
def __init__(self, config: PatchTSTConfig):
super().__init__()
self.dropout = nn.Dropout(config.head_dropout) if config.head_dropout > 0 else nn.Identity()
self.linear = nn.Linear(config.d_model, config.patch_length)
self.use_cls_token = config.use_cls_token
def forward(self, embedding: torch.Tensor) -> torch.Tensor:
"""
Parameters:
embedding (`torch.Tensor` of shape `(bs, num_channels, num_patches, d_model)` or
`(bs, num_channels, num_patches+1, d_model)` if `cls_token` is set to True, *required*):
Embedding from the model
Returns:
`torch.Tensor` of shape `(bs, num_channels, num_patches, d_model)` or
`(bs, num_channels, num_patches+1, d_model)` if `cls_token` is set to True
"""
embedding = self.linear(self.dropout(embedding))
if self.use_cls_token:
embedding = embedding[:, :, 1:, :]
return embedding
|
class PatchTSTMaskPretrainHead(nn.Module):
'''
Pretraining head for mask modelling
'''
def __init__(self, config: PatchTSTConfig):
pass
def forward(self, embedding: torch.Tensor) -> torch.Tensor:
'''
Parameters:
embedding (`torch.Tensor` of shape `(bs, num_channels, num_patches, d_model)` or
`(bs, num_channels, num_patches+1, d_model)` if `cls_token` is set to True, *required*):
Embedding from the model
Returns:
`torch.Tensor` of shape `(bs, num_channels, num_patches, d_model)` or
`(bs, num_channels, num_patches+1, d_model)` if `cls_token` is set to True
'''
pass
| 3
| 2
| 10
| 1
| 5
| 6
| 2
| 1.27
| 1
| 3
| 1
| 0
| 2
| 3
| 2
| 12
| 26
| 3
| 11
| 6
| 8
| 14
| 11
| 6
| 8
| 2
| 1
| 1
| 4
|
4,433
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtst/modeling_patchtst.py
|
transformers.models.patchtst.modeling_patchtst.PatchTSTMasking
|
from torch import nn
import torch
from .configuration_patchtst import PatchTSTConfig
class PatchTSTMasking(nn.Module):
"""
Class to perform random or forecast masking.
Parameters:
config (`PatchTSTConfig`): model config
Returns:
x_mask (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`)
Masked patched input
mask (`torch.Tensor` of shape `(batch_size, num_channels, num_patches)`)
Bool tensor indicating True on masked points
"""
def __init__(self, config: PatchTSTConfig):
super().__init__()
self.random_mask_ratio = config.random_mask_ratio
self.channel_consistent_masking = config.channel_consistent_masking
self.mask_type = config.mask_type
self.num_forecast_mask_patches = config.num_forecast_mask_patches
self.unmasked_channel_indices = config.unmasked_channel_indices
self.mask_value = config.mask_value
if self.unmasked_channel_indices is not None:
self.unmasked_channel_indices = sorted(self.unmasked_channel_indices)
def forward(self, patch_input: torch.Tensor):
"""
Parameters:
patch_input (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`, *required*):
Patch input
Return:
masked_input (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`)
Masked patched input
mask (`torch.Tensor` of shape `(batch_size, num_channels, num_patches)`)
Bool tensor indicating True on masked points
"""
if self.mask_type == 'random':
masked_input, mask = random_masking(inputs=patch_input, mask_ratio=self.random_mask_ratio, unmasked_channel_indices=self.unmasked_channel_indices, channel_consistent_masking=self.channel_consistent_masking, mask_value=self.mask_value)
elif self.mask_type == 'forecast':
masked_input, mask = forecast_masking(inputs=patch_input, num_forecast_mask_patches=self.num_forecast_mask_patches, unmasked_channel_indices=self.unmasked_channel_indices, mask_value=self.mask_value)
else:
raise ValueError(f'Invalid mask type {self.mask_type}.')
mask = mask.bool()
return (masked_input, mask)
|
class PatchTSTMasking(nn.Module):
'''
Class to perform random or forecast masking.
Parameters:
config (`PatchTSTConfig`): model config
Returns:
x_mask (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`)
Masked patched input
mask (`torch.Tensor` of shape `(batch_size, num_channels, num_patches)`)
Bool tensor indicating True on masked points
'''
def __init__(self, config: PatchTSTConfig):
pass
def forward(self, patch_input: torch.Tensor):
'''
Parameters:
patch_input (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`, *required*):
Patch input
Return:
masked_input (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`)
Masked patched input
mask (`torch.Tensor` of shape `(batch_size, num_channels, num_patches)`)
Bool tensor indicating True on masked points
'''
pass
| 3
| 2
| 22
| 2
| 15
| 6
| 3
| 0.68
| 1
| 4
| 1
| 0
| 2
| 6
| 2
| 12
| 58
| 6
| 31
| 10
| 28
| 21
| 18
| 10
| 15
| 3
| 1
| 1
| 5
|
4,434
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtst/modeling_patchtst.py
|
transformers.models.patchtst.modeling_patchtst.PatchTSTMeanScaler
|
import torch
from torch import nn
from .configuration_patchtst import PatchTSTConfig
class PatchTSTMeanScaler(nn.Module):
"""
Computes a scaling factor as the weighted average absolute value along the first dimension, and scales the data
accordingly.
"""
def __init__(self, config: PatchTSTConfig):
super().__init__()
self.dim = config.scaling_dim if hasattr(config, 'scaling_dim') else 1
self.keepdim = config.keepdim if hasattr(config, 'keepdim') else True
self.minimum_scale = config.minimum_scale if hasattr(config, 'minimum_scale') else 1e-10
self.default_scale = config.default_scale if hasattr(config, 'default_scale') else None
def forward(self, data: torch.Tensor, observed_indicator: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Parameters:
data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
input for Batch norm calculation
observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`):
Calculating the scale on the observed indicator.
Returns:
tuple of `torch.Tensor` of shapes
(`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
`(batch_size, 1, num_input_channels)`)
"""
ts_sum = (data * observed_indicator).abs().sum(self.dim, keepdim=True)
num_observed = observed_indicator.sum(self.dim, keepdim=True)
scale = ts_sum / torch.clamp(num_observed, min=1)
if self.default_scale is None:
batch_sum = ts_sum.sum(dim=0)
batch_observations = torch.clamp(num_observed.sum(0), min=1)
default_scale = torch.squeeze(batch_sum / batch_observations)
else:
default_scale = self.default_scale * torch.ones_like(scale)
scale = torch.where(num_observed > 0, scale, default_scale)
scale = torch.clamp(scale, min=self.minimum_scale)
scaled_data = data / scale
if not self.keepdim:
scale = scale.squeeze(dim=self.dim)
return (scaled_data, torch.zeros_like(scale), scale)
|
class PatchTSTMeanScaler(nn.Module):
'''
Computes a scaling factor as the weighted average absolute value along the first dimension, and scales the data
accordingly.
'''
def __init__(self, config: PatchTSTConfig):
pass
def forward(self, data: torch.Tensor, observed_indicator: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
'''
Parameters:
data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
input for Batch norm calculation
observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`):
Calculating the scale on the observed indicator.
Returns:
tuple of `torch.Tensor` of shapes
(`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
`(batch_size, 1, num_input_channels)`)
'''
pass
| 3
| 2
| 23
| 3
| 12
| 8
| 4
| 0.76
| 1
| 3
| 1
| 0
| 2
| 4
| 2
| 12
| 52
| 8
| 25
| 16
| 20
| 19
| 22
| 14
| 19
| 5
| 1
| 1
| 8
|
4,435
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtst/modeling_patchtst.py
|
transformers.models.patchtst.modeling_patchtst.PatchTSTModel
|
from torch import nn
from .configuration_patchtst import PatchTSTConfig
import torch
from ...utils import ModelOutput, auto_docstring, logging
from typing import Callable, Optional, Union
@auto_docstring
class PatchTSTModel(PatchTSTPreTrainedModel):
def __init__(self, config: PatchTSTConfig):
super().__init__(config)
self.scaler = PatchTSTScaler(config)
self.patchifier = PatchTSTPatchify(config)
self.do_mask_input = config.do_mask_input
num_patches = self.patchifier.num_patches
if self.do_mask_input:
self.masking = PatchTSTMasking(config)
else:
self.masking = nn.Identity()
self.encoder = PatchTSTEncoder(config, num_patches=num_patches)
self.post_init()
def forward(self, past_values: torch.Tensor, past_observed_mask: Optional[torch.Tensor]=None, future_values: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, PatchTSTModelOutput]:
"""
Parameters:
past_values (`torch.Tensor` of shape `(bs, sequence_length, num_input_channels)`, *required*):
Input sequence to the model
past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected
in `[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
future_values (`torch.BoolTensor` of shape `(batch_size, prediction_length, num_input_channels)`, *optional*):
Future target values associated with the `past_values`
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers
output_attentions (`bool`, *optional*):
Whether or not to return the output attention of all layers
return_dict (`bool`, *optional*):
Whether or not to return a `ModelOutput` instead of a plain tuple.
Returns:
`PatchTSTModelOutput` or tuple of `torch.Tensor` (if `return_dict`=False or `config.return_dict`=False)
Examples:
```python
>>> from huggingface_hub import hf_hub_download
>>> import torch
>>> from transformers import PatchTSTModel
>>> file = hf_hub_download(
... repo_id="hf-internal-testing/etth1-hourly-batch", filename="train-batch.pt", repo_type="dataset"
... )
>>> batch = torch.load(file)
>>> model = PatchTSTModel.from_pretrained("namctin/patchtst_etth1_pretrain")
>>> # during training, one provides both past and future values
>>> outputs = model(
... past_values=batch["past_values"],
... future_values=batch["future_values"],
... )
>>> last_hidden_state = outputs.last_hidden_state
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
if past_observed_mask is None:
past_observed_mask = torch.ones_like(past_values)
scaled_past_values, loc, scale = self.scaler(past_values, past_observed_mask)
patched_values = self.patchifier(scaled_past_values)
if self.do_mask_input:
masked_values, mask = self.masking(patched_values)
else:
masked_values, mask = (self.masking(patched_values), None)
encoder_output = self.encoder(patch_input=masked_values, output_hidden_states=output_hidden_states, output_attentions=output_attentions)
if not return_dict:
outputs = (encoder_output.last_hidden_state, encoder_output.hidden_states, encoder_output.attentions)
outputs = outputs + (mask, loc, scale, patched_values)
return tuple((v for v in outputs if v is not None))
return PatchTSTModelOutput(last_hidden_state=encoder_output.last_hidden_state, hidden_states=encoder_output.hidden_states, attentions=encoder_output.attentions, mask=mask, loc=loc, scale=scale, patch_input=patched_values)
|
@auto_docstring
class PatchTSTModel(PatchTSTPreTrainedModel):
def __init__(self, config: PatchTSTConfig):
pass
def forward(self, past_values: torch.Tensor, past_observed_mask: Optional[torch.Tensor]=None, future_values: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, PatchTSTModelOutput]:
'''
Parameters:
past_values (`torch.Tensor` of shape `(bs, sequence_length, num_input_channels)`, *required*):
Input sequence to the model
past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected
in `[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
future_values (`torch.BoolTensor` of shape `(batch_size, prediction_length, num_input_channels)`, *optional*):
Future target values associated with the `past_values`
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers
output_attentions (`bool`, *optional*):
Whether or not to return the output attention of all layers
return_dict (`bool`, *optional*):
Whether or not to return a `ModelOutput` instead of a plain tuple.
Returns:
`PatchTSTModelOutput` or tuple of `torch.Tensor` (if `return_dict`=False or `config.return_dict`=False)
Examples:
```python
>>> from huggingface_hub import hf_hub_download
>>> import torch
>>> from transformers import PatchTSTModel
>>> file = hf_hub_download(
... repo_id="hf-internal-testing/etth1-hourly-batch", filename="train-batch.pt", repo_type="dataset"
... )
>>> batch = torch.load(file)
>>> model = PatchTSTModel.from_pretrained("namctin/patchtst_etth1_pretrain")
>>> # during training, one provides both past and future values
>>> outputs = model(
... past_values=batch["past_values"],
... future_values=batch["future_values"],
... )
>>> last_hidden_state = outputs.last_hidden_state
```'''
pass
| 4
| 1
| 54
| 9
| 25
| 20
| 5
| 0.78
| 1
| 10
| 6
| 0
| 2
| 5
| 2
| 4
| 110
| 19
| 51
| 22
| 40
| 40
| 29
| 14
| 26
| 7
| 2
| 1
| 9
|
4,436
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtst/modeling_patchtst.py
|
transformers.models.patchtst.modeling_patchtst.PatchTSTModelOutput
|
from ...utils import ModelOutput, auto_docstring, logging
import torch
from dataclasses import dataclass
from typing import Callable, Optional, Union
@dataclass
@auto_docstring(custom_intro="\n Base class for model's outputs, with potential hidden states.\n ")
class PatchTSTModelOutput(ModelOutput):
"""
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, num_patches, patch_length)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, num_channels, height, width)`. Hidden-states of
the model at the output of each layer plus the optional initial embedding outputs.
mask (`torch.FloatTensor` of shape `(batch_size, num_channels, num_patches)`, *optional*):
Bool masked tensor indicating which patches are masked
loc (`torch.FloatTensor` of shape `(batch_size, 1, num_channels)`, *optional*):
Mean of the input data (batch_size, sequence_length, num_channels) over the sequence_length
scale (`torch.FloatTensor` of shape `(batch_size, 1, num_channels)`, *optional*):
Std of the input data (batch_size, sequence_length, num_channels) over the sequence_length
patch_input (`torch.FloatTensor` of shape `(batch_size, num_channels, num_patches, patch_length)`):
Patched input to the Transformer
"""
last_hidden_state: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
mask: Optional[torch.FloatTensor] = None
loc: Optional[torch.FloatTensor] = None
scale: Optional[torch.FloatTensor] = None
patch_input: Optional[torch.FloatTensor] = None
|
@dataclass
@auto_docstring(custom_intro="\n Base class for model's outputs, with potential hidden states.\n ")
class PatchTSTModelOutput(ModelOutput):
'''
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, num_patches, patch_length)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, num_channels, height, width)`. Hidden-states of
the model at the output of each layer plus the optional initial embedding outputs.
mask (`torch.FloatTensor` of shape `(batch_size, num_channels, num_patches)`, *optional*):
Bool masked tensor indicating which patches are masked
loc (`torch.FloatTensor` of shape `(batch_size, 1, num_channels)`, *optional*):
Mean of the input data (batch_size, sequence_length, num_channels) over the sequence_length
scale (`torch.FloatTensor` of shape `(batch_size, 1, num_channels)`, *optional*):
Std of the input data (batch_size, sequence_length, num_channels) over the sequence_length
patch_input (`torch.FloatTensor` of shape `(batch_size, num_channels, num_patches, patch_length)`):
Patched input to the Transformer
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 2.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 28
| 2
| 8
| 8
| 7
| 18
| 8
| 8
| 7
| 0
| 1
| 0
| 0
|
4,437
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtst/modeling_patchtst.py
|
transformers.models.patchtst.modeling_patchtst.PatchTSTNOPScaler
|
import torch
from torch import nn
from .configuration_patchtst import PatchTSTConfig
from typing import Callable, Optional, Union
class PatchTSTNOPScaler(nn.Module):
"""
Assigns a scaling factor equal to 1 along the first dimension, and therefore applies no scaling to the input data.
"""
def __init__(self, config: PatchTSTConfig):
super().__init__()
self.dim = config.scaling_dim if hasattr(config, 'scaling_dim') else 1
self.keepdim = config.keepdim if hasattr(config, 'keepdim') else True
def forward(self, data: torch.Tensor, observed_indicator: Optional[torch.Tensor]=None) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Parameters:
data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
input for Batch norm calculation
Returns:
tuple of `torch.Tensor` of shapes
(`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
`(batch_size, 1, num_input_channels)`)
"""
scale = torch.ones_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim)
loc = torch.zeros_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim)
return (data, loc, scale)
|
class PatchTSTNOPScaler(nn.Module):
'''
Assigns a scaling factor equal to 1 along the first dimension, and therefore applies no scaling to the input data.
'''
def __init__(self, config: PatchTSTConfig):
pass
def forward(self, data: torch.Tensor, observed_indicator: Optional[torch.Tensor]=None) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
'''
Parameters:
data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
input for Batch norm calculation
Returns:
tuple of `torch.Tensor` of shapes
(`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
`(batch_size, 1, num_input_channels)`)
'''
pass
| 3
| 2
| 10
| 0
| 5
| 5
| 2
| 1.09
| 1
| 3
| 1
| 0
| 2
| 2
| 2
| 12
| 25
| 2
| 11
| 9
| 6
| 12
| 9
| 7
| 6
| 3
| 1
| 0
| 4
|
4,438
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtst/modeling_patchtst.py
|
transformers.models.patchtst.modeling_patchtst.PatchTSTPatchify
|
import torch
from torch import nn
from .configuration_patchtst import PatchTSTConfig
class PatchTSTPatchify(nn.Module):
"""
A class to patchify the time series sequence into different patches
Returns:
`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`
"""
def __init__(self, config: PatchTSTConfig):
super().__init__()
self.sequence_length = config.context_length
self.patch_length = config.patch_length
self.patch_stride = config.patch_stride
if self.sequence_length <= self.patch_length:
raise ValueError(f'Sequence length ({self.sequence_length}) has to be greater than the patch length ({self.patch_length})')
self.num_patches = (max(self.sequence_length, self.patch_length) - self.patch_length) // self.patch_stride + 1
new_sequence_length = self.patch_length + self.patch_stride * (self.num_patches - 1)
self.sequence_start = self.sequence_length - new_sequence_length
def forward(self, past_values: torch.Tensor):
"""
Parameters:
past_values (`torch.Tensor` of shape `(batch_size, sequence_length, num_channels)`, *required*):
Input for patchification
Returns:
`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`
"""
sequence_length = past_values.shape[-2]
if sequence_length != self.sequence_length:
raise ValueError(f"Input sequence length ({sequence_length}) doesn't match model configuration ({self.sequence_length}).")
output = past_values[:, self.sequence_start:, :]
output = output.unfold(dimension=-2, size=self.patch_length, step=self.patch_stride)
output = output.transpose(-2, -3).contiguous()
return output
|
class PatchTSTPatchify(nn.Module):
'''
A class to patchify the time series sequence into different patches
Returns:
`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`
'''
def __init__(self, config: PatchTSTConfig):
pass
def forward(self, past_values: torch.Tensor):
'''
Parameters:
past_values (`torch.Tensor` of shape `(batch_size, sequence_length, num_channels)`, *required*):
Input for patchification
Returns:
`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`
'''
pass
| 3
| 2
| 19
| 2
| 11
| 6
| 2
| 0.7
| 1
| 4
| 1
| 0
| 2
| 5
| 2
| 12
| 46
| 7
| 23
| 11
| 20
| 16
| 19
| 11
| 16
| 2
| 1
| 1
| 4
|
4,439
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtst/modeling_patchtst.py
|
transformers.models.patchtst.modeling_patchtst.PatchTSTPositionalEncoding
|
import math
import torch
from .configuration_patchtst import PatchTSTConfig
from torch import nn
class PatchTSTPositionalEncoding(nn.Module):
"""
Class for positional encoding
"""
def __init__(self, config: PatchTSTConfig, num_patches: int):
super().__init__()
self.use_cls_token = config.use_cls_token
self.num_input_channels = config.num_input_channels
if config.use_cls_token:
self.cls_token = nn.Parameter(torch.zeros(1, 1, 1, config.d_model))
num_patches += 1
self.position_enc = self._init_pe(config, num_patches)
self.positional_dropout = nn.Dropout(config.positional_dropout) if config.positional_dropout > 0 else nn.Identity()
@staticmethod
def _init_pe(config: PatchTSTConfig, num_patches: int) -> nn.Parameter:
if config.positional_encoding_type == 'random':
position_enc = nn.Parameter(torch.randn(num_patches, config.d_model), requires_grad=True)
elif config.positional_encoding_type == 'sincos':
position_enc = torch.zeros(num_patches, config.d_model)
position = torch.arange(0, num_patches).unsqueeze(1)
div_term = torch.exp(torch.arange(0, config.d_model, 2) * -(math.log(10000.0) / config.d_model))
position_enc[:, 0::2] = torch.sin(position * div_term)
position_enc[:, 1::2] = torch.cos(position * div_term)
position_enc = position_enc - position_enc.mean()
position_enc = position_enc / (position_enc.std() * 10)
position_enc = nn.Parameter(position_enc, requires_grad=False)
else:
raise ValueError(f"{config.positional_encoding_type} is not a valid positional encoder. Available types are 'random' and 'sincos'.")
return position_enc
def forward(self, patch_input: torch.Tensor):
if self.use_cls_token:
patch_input = self.positional_dropout(patch_input + self.position_enc[1:, :])
cls_token = self.cls_token + self.position_enc[:1, :]
cls_tokens = cls_token.expand(patch_input.shape[0], self.num_input_channels, -1, -1)
hidden_state = torch.cat((cls_tokens, patch_input), dim=2)
else:
hidden_state = self.positional_dropout(patch_input + self.position_enc)
return hidden_state
|
class PatchTSTPositionalEncoding(nn.Module):
'''
Class for positional encoding
'''
def __init__(self, config: PatchTSTConfig, num_patches: int):
pass
@staticmethod
def _init_pe(config: PatchTSTConfig, num_patches: int) -> nn.Parameter:
pass
def forward(self, patch_input: torch.Tensor):
pass
| 5
| 1
| 15
| 0
| 12
| 3
| 3
| 0.31
| 1
| 5
| 1
| 0
| 2
| 5
| 3
| 13
| 54
| 3
| 39
| 16
| 34
| 12
| 31
| 15
| 27
| 3
| 1
| 1
| 8
|
4,440
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtst/modeling_patchtst.py
|
transformers.models.patchtst.modeling_patchtst.PatchTSTPreTrainedModel
|
from ...utils import ModelOutput, auto_docstring, logging
from .configuration_patchtst import PatchTSTConfig
from torch import nn
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
@auto_docstring
class PatchTSTPreTrainedModel(PreTrainedModel):
config: PatchTSTConfig
base_model_prefix = 'model'
main_input_name = 'past_values'
supports_gradient_checkpointing = False
def _init_weights(self, module: nn.Module):
"""
Initialize weights
"""
if isinstance(module, PatchTSTPositionalEncoding):
num_patches = (max(self.config.context_length, self.config.patch_length) - self.config.patch_length) // self.config.patch_stride + 1
if self.config.use_cls_token:
nn.init.normal_(module.cls_token, std=0.02)
num_patches += 1
module.position_enc = module._init_pe(self.config, num_patches)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, PatchTSTBatchNorm):
module.batchnorm.bias.data.zero_()
module.batchnorm.weight.data.fill_(1.0)
elif isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.init_std)
if module.bias is not None:
module.bias.data.zero_()
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, PatchTSTEncoder):
module.gradient_checkpointing = value
|
@auto_docstring
class PatchTSTPreTrainedModel(PreTrainedModel):
def _init_weights(self, module: nn.Module):
'''
Initialize weights
'''
pass
def _set_gradient_checkpointing(self, module, value=False):
pass
| 4
| 1
| 12
| 0
| 10
| 3
| 5
| 0.21
| 1
| 3
| 3
| 6
| 2
| 0
| 2
| 2
| 31
| 2
| 24
| 7
| 21
| 5
| 21
| 7
| 18
| 8
| 1
| 2
| 10
|
4,441
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtst/modeling_patchtst.py
|
transformers.models.patchtst.modeling_patchtst.PatchTSTPredictionHead
|
from ...utils import ModelOutput, auto_docstring, logging
import torch
from torch import nn
from .configuration_patchtst import PatchTSTConfig
@auto_docstring(custom_intro='\n The PatchTST for regression Model.\n ')
class PatchTSTPredictionHead(nn.Module):
def __init__(self, config: PatchTSTConfig, num_patches: int, distribution_output=None):
"""
num_patches (`int`):
The number of patches in the input sequence.
distribution_output (`DistributionOutput`, *optional*):
The distribution output layer for probabilistic forecasting. If None, a linear output layer is used.
"""
super().__init__()
self.share_projection = config.share_projection
self.num_input_channels = config.num_input_channels
self.use_cls_token = config.use_cls_token
self.pooling_type = config.pooling_type
if self.pooling_type or self.use_cls_token:
head_dim = config.d_model
else:
head_dim = config.d_model * num_patches
if not self.share_projection:
self.projections = nn.ModuleList()
self.dropouts = nn.ModuleList()
self.flattens = nn.ModuleList()
for i in range(self.num_input_channels):
self.flattens.append(nn.Flatten(start_dim=2))
if distribution_output is None:
self.projections.append(nn.Linear(head_dim, config.prediction_length))
else:
self.projections.append(distribution_output.get_parameter_projection(head_dim))
self.dropouts.append(nn.Dropout(config.head_dropout) if config.head_dropout > 0 else nn.Identity())
else:
self.flatten = nn.Flatten(start_dim=2)
if distribution_output is None:
self.projection = nn.Linear(head_dim, config.prediction_length)
else:
self.projection = distribution_output.get_parameter_projection(head_dim)
self.dropout = nn.Dropout(config.head_dropout) if config.head_dropout > 0 else nn.Identity()
def forward(self, embedding: torch.Tensor):
"""
Parameters:
embedding (`torch.Tensor` of shape `(bs, num_channels, num_patches, d_model)` or
`(bs, num_channels, num_patches+1, d_model)` if `cls_token` is set to True, *required*):
Embedding from the model
Returns:
`torch.Tensor` of shape `(bs, forecast_len, num_channels)`
"""
if self.use_cls_token:
pooled_embedding = embedding[:, :, 0, :]
elif self.pooling_type == 'mean':
pooled_embedding = embedding.mean(dim=2)
elif self.pooling_type == 'max':
pooled_embedding = embedding.max(dim=2).values
else:
pooled_embedding = embedding
if not self.share_projection:
output = []
for i in range(self.num_input_channels):
pooled_embedding = self.flattens[i](pooled_embedding[:, i, :])
pooled_embedding = self.dropouts[i](pooled_embedding)
pooled_embedding = self.projections[i](pooled_embedding)
output.append(pooled_embedding)
output = torch.stack(output, dim=1)
else:
pooled_embedding = self.flatten(pooled_embedding)
pooled_embedding = self.dropout(pooled_embedding)
output = self.projection(pooled_embedding)
if isinstance(output, tuple):
output = tuple((z.transpose(2, 1) for z in output))
else:
output = output.transpose(2, 1)
return output
|
@auto_docstring(custom_intro='\n The PatchTST for regression Model.\n ')
class PatchTSTPredictionHead(nn.Module):
def __init__(self, config: PatchTSTConfig, num_patches: int, distribution_output=None):
'''
num_patches (`int`):
The number of patches in the input sequence.
distribution_output (`DistributionOutput`, *optional*):
The distribution output layer for probabilistic forecasting. If None, a linear output layer is used.
'''
pass
def forward(self, embedding: torch.Tensor):
'''
Parameters:
embedding (`torch.Tensor` of shape `(bs, num_channels, num_patches, d_model)` or
`(bs, num_channels, num_patches+1, d_model)` if `cls_token` is set to True, *required*):
Embedding from the model
Returns:
`torch.Tensor` of shape `(bs, forecast_len, num_channels)`
'''
pass
| 4
| 2
| 43
| 3
| 28
| 14
| 8
| 0.48
| 1
| 5
| 1
| 0
| 2
| 10
| 2
| 12
| 88
| 6
| 56
| 18
| 53
| 27
| 47
| 18
| 44
| 8
| 1
| 3
| 15
|
4,442
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtst/modeling_patchtst.py
|
transformers.models.patchtst.modeling_patchtst.PatchTSTRegressionHead
|
import torch
from torch import nn
from .configuration_patchtst import PatchTSTConfig
class PatchTSTRegressionHead(nn.Module):
"""
Regression head
"""
def __init__(self, config: PatchTSTConfig, distribution_output=None):
super().__init__()
self.y_range = config.output_range
self.use_cls_token = config.use_cls_token
self.pooling_type = config.pooling_type
self.distribution_output = distribution_output
head_dim = config.num_input_channels * config.d_model
self.flatten = nn.Flatten(start_dim=1)
self.dropout = nn.Dropout(config.head_dropout) if config.head_dropout > 0 else nn.Identity()
if distribution_output is None:
self.projection = nn.Linear(head_dim, config.num_targets)
else:
self.projection = distribution_output.get_parameter_projection(head_dim)
def forward(self, embedding: torch.Tensor):
"""
Parameters:
embedding (`torch.Tensor` of shape `(bs, num_channels, num_patches, d_model)` or
`(bs, num_channels, num_patches+1, d_model)` if `cls_token` is set to True, *required*):
Embedding from the model
Returns:
`torch.Tensor` of shape `(bs, output_dim)`
"""
if self.use_cls_token:
pooled_embedding = embedding[:, :, 0, :]
elif self.pooling_type == 'mean':
pooled_embedding = embedding.mean(dim=2)
elif self.pooling_type == 'max':
pooled_embedding = embedding.max(dim=2).values
else:
raise ValueError(f'pooling operator {self.pooling_type} is not implemented yet')
pooled_embedding = self.dropout(self.flatten(pooled_embedding))
output = self.projection(pooled_embedding)
if (self.distribution_output is None) & (self.y_range is not None):
output = torch.sigmoid(output) * (self.y_range[1] - self.y_range[0]) + self.y_range[0]
return output
|
class PatchTSTRegressionHead(nn.Module):
'''
Regression head
'''
def __init__(self, config: PatchTSTConfig, distribution_output=None):
pass
def forward(self, embedding: torch.Tensor):
'''
Parameters:
embedding (`torch.Tensor` of shape `(bs, num_channels, num_patches, d_model)` or
`(bs, num_channels, num_patches+1, d_model)` if `cls_token` is set to True, *required*):
Embedding from the model
Returns:
`torch.Tensor` of shape `(bs, output_dim)`
'''
pass
| 3
| 2
| 24
| 2
| 14
| 9
| 4
| 0.71
| 1
| 4
| 1
| 0
| 2
| 7
| 2
| 12
| 53
| 6
| 28
| 13
| 25
| 20
| 24
| 13
| 21
| 5
| 1
| 1
| 8
|
4,443
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtst/modeling_patchtst.py
|
transformers.models.patchtst.modeling_patchtst.PatchTSTScaler
|
import torch
from torch import nn
from .configuration_patchtst import PatchTSTConfig
class PatchTSTScaler(nn.Module):
def __init__(self, config: PatchTSTConfig):
super().__init__()
if config.scaling == 'mean' or config.scaling is True:
self.scaler = PatchTSTMeanScaler(config)
elif config.scaling == 'std':
self.scaler = PatchTSTStdScaler(config)
else:
self.scaler = PatchTSTNOPScaler(config)
def forward(self, data: torch.Tensor, observed_indicator: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Parameters:
data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
Input for scaler calculation
observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`):
Calculating the scale on the observed indicator.
Returns:
tuple of `torch.Tensor` of shapes
(`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
`(batch_size, 1, um_input_channels)`)
"""
data, loc, scale = self.scaler(data, observed_indicator)
return (data, loc, scale)
|
class PatchTSTScaler(nn.Module):
def __init__(self, config: PatchTSTConfig):
pass
def forward(self, data: torch.Tensor, observed_indicator: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
'''
Parameters:
data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
Input for scaler calculation
observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`):
Calculating the scale on the observed indicator.
Returns:
tuple of `torch.Tensor` of shapes
(`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
`(batch_size, 1, um_input_channels)`)
'''
pass
| 3
| 1
| 12
| 0
| 7
| 6
| 2
| 0.79
| 1
| 6
| 4
| 0
| 2
| 1
| 2
| 12
| 26
| 1
| 14
| 7
| 9
| 11
| 10
| 5
| 7
| 3
| 1
| 1
| 4
|
4,444
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtst/modeling_patchtst.py
|
transformers.models.patchtst.modeling_patchtst.PatchTSTStdScaler
|
import torch
from .configuration_patchtst import PatchTSTConfig
from torch import nn
class PatchTSTStdScaler(nn.Module):
"""
Standardize features by calculating the mean and scaling along the first dimension, and then normalizes it by
subtracting from the mean and dividing by the standard deviation.
"""
def __init__(self, config: PatchTSTConfig):
super().__init__()
self.dim = config.scaling_dim if hasattr(config, 'scaling_dim') else 1
self.keepdim = config.keepdim if hasattr(config, 'keepdim') else True
self.minimum_scale = config.minimum_scale if hasattr(config, 'minimum_scale') else 1e-05
def forward(self, data: torch.Tensor, observed_indicator: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Parameters:
data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
input for Batch norm calculation
observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`):
Calculating the scale on the observed indicator.
Returns:
tuple of `torch.Tensor` of shapes
(`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
`(batch_size, 1, num_input_channels)`)
"""
denominator = observed_indicator.sum(self.dim, keepdim=self.keepdim)
denominator = denominator.clamp_min(1.0)
loc = (data * observed_indicator).sum(self.dim, keepdim=self.keepdim) / denominator
variance = (((data - loc) * observed_indicator) ** 2).sum(self.dim, keepdim=self.keepdim) / denominator
scale = torch.sqrt(variance + self.minimum_scale)
return ((data - loc) / scale, loc, scale)
|
class PatchTSTStdScaler(nn.Module):
'''
Standardize features by calculating the mean and scaling along the first dimension, and then normalizes it by
subtracting from the mean and dividing by the standard deviation.
'''
def __init__(self, config: PatchTSTConfig):
pass
def forward(self, data: torch.Tensor, observed_indicator: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
'''
Parameters:
data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
input for Batch norm calculation
observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`):
Calculating the scale on the observed indicator.
Returns:
tuple of `torch.Tensor` of shapes
(`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
`(batch_size, 1, num_input_channels)`)
'''
pass
| 3
| 2
| 13
| 1
| 7
| 6
| 3
| 1
| 1
| 3
| 1
| 0
| 2
| 3
| 2
| 12
| 33
| 3
| 15
| 12
| 10
| 15
| 13
| 10
| 10
| 4
| 1
| 0
| 5
|
4,445
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/pegasus/configuration_pegasus.py
|
transformers.models.pegasus.configuration_pegasus.PegasusConfig
|
from ...configuration_utils import PretrainedConfig
class PegasusConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`PegasusModel`]. It is used to instantiate an
PEGASUS model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the PEGASUS
[google/pegasus-large](https://huggingface.co/google/pegasus-large) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50265):
Vocabulary size of the PEGASUS model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`PegasusModel`] or [`TFPegasusModel`].
d_model (`int`, *optional*, defaults to 1024):
Dimensionality of the layers and the pooler layer.
encoder_layers (`int`, *optional*, defaults to 12):
Number of encoder layers.
decoder_layers (`int`, *optional*, defaults to 12):
Number of decoder layers.
encoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer decoder.
decoder_ffn_dim (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
encoder_ffn_dim (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
max_position_embeddings (`int`, *optional*, defaults to 1024):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
encoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
decoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
scale_embedding (`bool`, *optional*, defaults to `False`):
Scale embeddings by diving by sqrt(d_model).
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models)
forced_eos_token_id (`int`, *optional*, defaults to 1):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
Example:
```python
>>> from transformers import PegasusConfig, PegasusModel
>>> # Initializing a PEGASUS google/pegasus-large style configuration
>>> configuration = PegasusConfig()
>>> # Initializing a model (with random weights) from the google/pegasus-large style configuration
>>> model = PegasusModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'pegasus'
keys_to_ignore_at_inference = ['past_key_values']
attribute_map = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__(self, vocab_size=50265, max_position_embeddings=1024, encoder_layers=12, encoder_ffn_dim=4096, encoder_attention_heads=16, decoder_layers=12, decoder_ffn_dim=4096, decoder_attention_heads=16, encoder_layerdrop=0.0, decoder_layerdrop=0.0, use_cache=True, is_encoder_decoder=True, activation_function='gelu', d_model=1024, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, decoder_start_token_id=0, scale_embedding=False, pad_token_id=0, eos_token_id=1, forced_eos_token_id=1, **kwargs):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.use_cache = use_cache
self.num_hidden_layers = encoder_layers
self.scale_embedding = scale_embedding
super().__init__(pad_token_id=pad_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, decoder_start_token_id=decoder_start_token_id, forced_eos_token_id=forced_eos_token_id, **kwargs)
@property
def num_attention_heads(self) -> int:
return self.encoder_attention_heads
@property
def hidden_size(self) -> int:
return self.d_model
|
class PegasusConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`PegasusModel`]. It is used to instantiate an
PEGASUS model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the PEGASUS
[google/pegasus-large](https://huggingface.co/google/pegasus-large) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50265):
Vocabulary size of the PEGASUS model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`PegasusModel`] or [`TFPegasusModel`].
d_model (`int`, *optional*, defaults to 1024):
Dimensionality of the layers and the pooler layer.
encoder_layers (`int`, *optional*, defaults to 12):
Number of encoder layers.
decoder_layers (`int`, *optional*, defaults to 12):
Number of decoder layers.
encoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer decoder.
decoder_ffn_dim (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
encoder_ffn_dim (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
max_position_embeddings (`int`, *optional*, defaults to 1024):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
encoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
decoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
scale_embedding (`bool`, *optional*, defaults to `False`):
Scale embeddings by diving by sqrt(d_model).
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models)
forced_eos_token_id (`int`, *optional*, defaults to 1):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
Example:
```python
>>> from transformers import PegasusConfig, PegasusModel
>>> # Initializing a PEGASUS google/pegasus-large style configuration
>>> configuration = PegasusConfig()
>>> # Initializing a model (with random weights) from the google/pegasus-large style configuration
>>> model = PegasusModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=50265, max_position_embeddings=1024, encoder_layers=12, encoder_ffn_dim=4096, encoder_attention_heads=16, decoder_layers=12, decoder_ffn_dim=4096, decoder_attention_heads=16, encoder_layerdrop=0.0, decoder_layerdrop=0.0, use_cache=True, is_encoder_decoder=True, activation_function='gelu', d_model=1024, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, decoder_start_token_id=0, scale_embedding=False, pad_token_id=0, eos_token_id=1, forced_eos_token_id=1, **kwargs):
pass
@property
def num_attention_heads(self) -> int:
pass
@property
def hidden_size(self) -> int:
pass
| 6
| 1
| 19
| 0
| 19
| 0
| 1
| 0.98
| 1
| 2
| 0
| 0
| 3
| 19
| 3
| 3
| 138
| 12
| 64
| 54
| 32
| 63
| 29
| 26
| 25
| 1
| 1
| 0
| 3
|
4,446
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/pegasus/modeling_pegasus.py
|
transformers.models.pegasus.modeling_pegasus.PegasusAttention
|
from ...utils.deprecation import deprecate_kwarg
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from ...processing_utils import Unpack
from torch import nn
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
import torch
from typing import Callable, Optional, Union
from .configuration_pegasus import PegasusConfig
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
class PegasusAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, is_causal: bool=False, config: Optional[PegasusConfig]=None, layer_idx: Optional[int]=None):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
self.config = config
if self.head_dim * num_heads != self.embed_dim:
raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).')
self.scaling = self.head_dim ** (-0.5)
self.is_decoder = is_decoder
self.is_causal = is_causal
self.layer_idx = layer_idx
if layer_idx is None and self.is_decoder:
logger.warning_once(f'Instantiating a decoder {self.__class__.__name__} without passing `layer_idx` is not recommended and will lead to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` when creating this class.')
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
is_cross_attention = key_value_states is not None
bsz, tgt_len = hidden_states.shape[:-1]
src_len = key_value_states.shape[1] if is_cross_attention else tgt_len
q_input_shape = (bsz, tgt_len, -1, self.head_dim)
kv_input_shape = (bsz, src_len, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(*q_input_shape).transpose(1, 2)
is_updated = False
if past_key_values is not None:
if isinstance(past_key_values, EncoderDecoderCache):
is_updated = past_key_values.is_updated.get(self.layer_idx)
if is_cross_attention:
curr_past_key_value = past_key_values.cross_attention_cache
else:
curr_past_key_value = past_key_values.self_attention_cache
else:
curr_past_key_value = past_key_values
current_states = key_value_states if is_cross_attention else hidden_states
if is_cross_attention and past_key_values is not None and is_updated:
key_states = curr_past_key_value.layers[self.layer_idx].keys
value_states = curr_past_key_value.layers[self.layer_idx].values
else:
key_states = self.k_proj(current_states)
value_states = self.v_proj(current_states)
key_states = key_states.view(*kv_input_shape).transpose(1, 2)
value_states = value_states.view(*kv_input_shape).transpose(1, 2)
if past_key_values is not None:
cache_position = cache_position if not is_cross_attention else None
key_states, value_states = curr_past_key_value.update(key_states, value_states, self.layer_idx, {'cache_position': cache_position})
if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache):
past_key_values.is_updated[self.layer_idx] = True
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.dropout, scaling=self.scaling, output_attentions=output_attentions, head_mask=layer_head_mask, **kwargs)
attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous()
attn_output = self.out_proj(attn_output)
return (attn_output, attn_weights)
|
class PegasusAttention(nn.Module):
'''Multi-headed attention from 'Attention Is All You Need' paper'''
def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, is_causal: bool=False, config: Optional[PegasusConfig]=None, layer_idx: Optional[int]=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
'''Input shape: Batch x Time x Channel'''
pass
| 4
| 2
| 50
| 7
| 35
| 8
| 5
| 0.24
| 1
| 7
| 1
| 0
| 3
| 12
| 3
| 13
| 156
| 23
| 107
| 44
| 86
| 26
| 68
| 27
| 64
| 12
| 1
| 2
| 15
|
4,447
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/pegasus/modeling_pegasus.py
|
transformers.models.pegasus.modeling_pegasus.PegasusDecoder
|
import torch
from .configuration_pegasus import PegasusConfig
from ...utils import auto_docstring, is_torch_flex_attn_available, is_torchdynamo_compiling, logging
from torch import nn
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput
from typing import Callable, Optional, Union
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
import math
class PegasusDecoder(PegasusPreTrainedModel):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`PegasusDecoderLayer`]
Args:
config: PegasusConfig
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: PegasusConfig, embed_tokens: Optional[nn.Embedding]=None):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.padding_idx = config.pad_token_id
self.max_target_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
if embed_tokens is not None:
self.embed_tokens = embed_tokens
else:
self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
self.embed_positions = PegasusSinusoidalPositionalEmbedding(config.max_position_embeddings, config.d_model, self.padding_idx)
self.layers = nn.ModuleList([PegasusDecoderLayer(config, layer_idx=i) for i in range(config.decoder_layers)])
self.layer_norm = nn.LayerNorm(config.d_model)
self.gradient_checkpointing = False
self.post_init()
def resize_position_embeddings(self, new_num_position_embeddings: int):
"""
Resizes position embeddings matrix of the model if `new_num_position_embeddings !=
config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`):
The number of new position embeddings. If position embeddings are learned, increasing the size will add
newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
add correct vectors at the end following the position encoding algorithm, whereas reducing the size
will remove vectors from the end.
"""
logger.info(f'Setting `config.max_position_embeddings={new_num_position_embeddings}`...')
self.config.max_position_embeddings = new_num_position_embeddings
self.embed_positions = PegasusSinusoidalPositionalEmbedding(self.config.max_position_embeddings, self.config.d_model, self.padding_idx)
self.embed_positions._init_weight()
self.embed_positions.to(self.device)
def get_position_embeddings(self) -> nn.Embedding:
"""
Returns the position embeddings matrix
"""
return self.embed_positions
def forward(self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, cache_position=None):
"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in decoder to avoid performing
cross-attention on hidden heads. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence. It is used to update the
cache in the correct position and to infer the complete sequence length.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError('You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time')
elif input_ids is not None:
input = input_ids
input_shape = input.shape
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
input = inputs_embeds[:, :, -1]
else:
raise ValueError('You have to specify either decoder_input_ids or decoder_inputs_embeds')
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input)
inputs_embeds = inputs_embeds * self.embed_scale
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing`. Setting `use_cache=False`...')
use_cache = False
if use_cache and past_key_values is None:
past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config)) if encoder_hidden_states is not None else DynamicCache(config=self.config)
if use_cache and isinstance(past_key_values, tuple):
logger.warning_once('Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. You should pass an instance of `EncoderDecoderCache` instead, e.g. `past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`.')
past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values)
batch_size, seq_length = inputs_embeds.size()[:-1]
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
if cache_position is None:
cache_position = torch.arange(past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device)
if attention_mask is None and (not is_torchdynamo_compiling()):
mask_seq_length = past_key_values_length + seq_length
attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device)
self_attn_cache = past_key_values.self_attention_cache if isinstance(past_key_values, EncoderDecoderCache) else past_key_values
causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, self_attn_cache)
encoder_attention_mask = self._update_cross_attn_mask(encoder_hidden_states, encoder_attention_mask, input_shape, inputs_embeds)
positions = self.embed_positions((batch_size, seq_length), past_key_values_length, position_ids=cache_position)
hidden_states = inputs_embeds + positions
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if output_attentions and encoder_hidden_states is not None else None
for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ['head_mask', 'cross_attn_head_mask']):
if attn_mask is not None:
if attn_mask.size()[0] != len(self.layers):
raise ValueError(f'The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.')
for idx, decoder_layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states += (hidden_states,)
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop:
continue
layer_outputs = decoder_layer(hidden_states, causal_mask, encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
all_hidden_states += (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None))
return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions)
|
class PegasusDecoder(PegasusPreTrainedModel):
'''
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`PegasusDecoderLayer`]
Args:
config: PegasusConfig
embed_tokens (nn.Embedding): output embedding
'''
def __init__(self, config: PegasusConfig, embed_tokens: Optional[nn.Embedding]=None):
pass
def resize_position_embeddings(self, new_num_position_embeddings: int):
'''
Resizes position embeddings matrix of the model if `new_num_position_embeddings !=
config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`):
The number of new position embeddings. If position embeddings are learned, increasing the size will add
newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
add correct vectors at the end following the position encoding algorithm, whereas reducing the size
will remove vectors from the end.
'''
pass
def get_position_embeddings(self) -> nn.Embedding:
'''
Returns the position embeddings matrix
'''
pass
def forward(self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, cache_position=None):
'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in decoder to avoid performing
cross-attention on hidden heads. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence. It is used to update the
cache in the correct position and to infer the complete sequence length.
'''
pass
| 5
| 4
| 45
| 6
| 26
| 13
| 7
| 0.54
| 1
| 11
| 4
| 0
| 6
| 10
| 6
| 7
| 281
| 43
| 155
| 45
| 134
| 83
| 84
| 31
| 77
| 37
| 2
| 3
| 44
|
4,448
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/pegasus/modeling_pegasus.py
|
transformers.models.pegasus.modeling_pegasus.PegasusDecoderLayer
|
from typing import Callable, Optional, Union
from .configuration_pegasus import PegasusConfig
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from ...utils.deprecation import deprecate_kwarg
from ...modeling_layers import GradientCheckpointingLayer
from torch import nn
import torch
from ...activations import ACT2FN
class PegasusDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: PegasusConfig, layer_idx: Optional[int]=None):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = PegasusAttention(embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, is_causal=True, config=config, layer_idx=layer_idx)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.encoder_attn = PegasusAttention(self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, config=config, layer_idx=layer_idx)
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, cross_attn_layer_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True, cache_position: Optional[torch.Tensor]=None) -> torch.Tensor:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`.
cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
size `(decoder_attention_heads,)`.
past_key_values (`Cache`): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence. It is used to update the
cache in the correct position and to infer the complete sequence length.
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, self_attn_weights = self.self_attn(hidden_states=hidden_states, past_key_values=past_key_values, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, cache_position=cache_position)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
hidden_states, cross_attn_weights = self.encoder_attn(hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_values=past_key_values, output_attentions=output_attentions)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
return outputs
|
class PegasusDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: PegasusConfig, layer_idx: Optional[int]=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, cross_attn_layer_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True, cache_position: Optional[torch.Tensor]=None) -> torch.Tensor:
'''
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`.
cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
size `(decoder_attention_heads,)`.
past_key_values (`Cache`): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence. It is used to update the
cache in the correct position and to infer the complete sequence length.
'''
pass
| 4
| 1
| 58
| 6
| 40
| 13
| 4
| 0.31
| 1
| 4
| 1
| 0
| 2
| 11
| 2
| 12
| 118
| 12
| 81
| 32
| 67
| 25
| 44
| 21
| 41
| 6
| 1
| 1
| 7
|
4,449
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/pegasus/modeling_pegasus.py
|
transformers.models.pegasus.modeling_pegasus.PegasusDecoderWrapper
|
class PegasusDecoderWrapper(PegasusPreTrainedModel):
"""
This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is
used in combination with the [`EncoderDecoderModel`] framework.
"""
def __init__(self, config):
super().__init__(config)
self.decoder = PegasusDecoder(config)
def forward(self, *args, **kwargs):
return self.decoder(*args, **kwargs)
|
class PegasusDecoderWrapper(PegasusPreTrainedModel):
'''
This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is
used in combination with the [`EncoderDecoderModel`] framework.
'''
def __init__(self, config):
pass
def forward(self, *args, **kwargs):
pass
| 3
| 1
| 3
| 0
| 3
| 0
| 1
| 0.67
| 1
| 2
| 1
| 0
| 2
| 1
| 2
| 3
| 12
| 2
| 6
| 4
| 3
| 4
| 6
| 4
| 3
| 1
| 2
| 0
| 2
|
4,450
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/pegasus/modeling_pegasus.py
|
transformers.models.pegasus.modeling_pegasus.PegasusEncoder
|
from torch import nn
from typing import Callable, Optional, Union
from .configuration_pegasus import PegasusConfig
import torch
import math
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput
class PegasusEncoder(PegasusPreTrainedModel):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
[`PegasusEncoderLayer`].
Args:
config: PegasusConfig
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: PegasusConfig, embed_tokens: Optional[nn.Embedding]=None):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
embed_dim = config.d_model
self.padding_idx = config.pad_token_id
self.max_source_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
if embed_tokens is not None:
self.embed_tokens = embed_tokens
else:
self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx)
self.embed_positions = PegasusSinusoidalPositionalEmbedding(config.max_position_embeddings, embed_dim, self.padding_idx)
self.layers = nn.ModuleList([PegasusEncoderLayer(config) for _ in range(config.encoder_layers)])
self.layer_norm = nn.LayerNorm(config.d_model)
self.gradient_checkpointing = False
self.post_init()
def resize_position_embeddings(self, new_num_position_embeddings: int):
"""
Resizes position embeddings matrix of the model if `new_num_position_embeddings !=
config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`):
The number of new position embeddings. If position embeddings are learned, increasing the size will add
newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
add correct vectors at the end following the position encoding algorithm, whereas reducing the size
will remove vectors from the end.
"""
logger.info(f'Setting `config.max_position_embeddings={new_num_position_embeddings}`...')
self.config.max_position_embeddings = new_num_position_embeddings
self.embed_positions = PegasusSinusoidalPositionalEmbedding(self.config.max_position_embeddings, self.config.d_model, self.padding_idx)
self.embed_positions._init_weight()
self.embed_positions.to(self.device)
def get_position_embeddings(self) -> nn.Embedding:
"""
Returns the position embeddings matrix
"""
return self.embed_positions
def forward(self, input_ids=None, attention_mask=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None):
"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds')
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
embed_pos = self.embed_positions(input_shape)
hidden_states = inputs_embeds + embed_pos
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
attention_mask = self._update_full_mask(attention_mask, inputs_embeds)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
if head_mask is not None:
if head_mask.size()[0] != len(self.layers):
raise ValueError(f'The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.')
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
to_drop = False
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop:
to_drop = True
if to_drop:
layer_outputs = (None, None)
else:
layer_outputs = encoder_layer(hidden_states, attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, output_attentions=output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
|
class PegasusEncoder(PegasusPreTrainedModel):
'''
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
[`PegasusEncoderLayer`].
Args:
config: PegasusConfig
embed_tokens (nn.Embedding): output embedding
'''
def __init__(self, config: PegasusConfig, embed_tokens: Optional[nn.Embedding]=None):
pass
def resize_position_embeddings(self, new_num_position_embeddings: int):
'''
Resizes position embeddings matrix of the model if `new_num_position_embeddings !=
config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`):
The number of new position embeddings. If position embeddings are learned, increasing the size will add
newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
add correct vectors at the end following the position encoding algorithm, whereas reducing the size
will remove vectors from the end.
'''
pass
def get_position_embeddings(self) -> nn.Embedding:
'''
Returns the position embeddings matrix
'''
pass
def forward(self, input_ids=None, attention_mask=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None):
'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
pass
| 5
| 4
| 47
| 7
| 27
| 13
| 7
| 0.53
| 1
| 10
| 4
| 0
| 4
| 10
| 4
| 5
| 199
| 32
| 110
| 34
| 96
| 58
| 69
| 25
| 64
| 24
| 2
| 3
| 29
|
4,451
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/pegasus/modeling_pegasus.py
|
transformers.models.pegasus.modeling_pegasus.PegasusEncoderLayer
|
from torch import nn
from ...activations import ACT2FN
from .configuration_pegasus import PegasusConfig
from ...modeling_layers import GradientCheckpointingLayer
import torch
class PegasusEncoderLayer(GradientCheckpointingLayer):
def __init__(self, config: PegasusConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = PegasusAttention(embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, config=config)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, layer_head_mask: torch.Tensor, output_attentions: bool=False) -> torch.Tensor:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
if hidden_states.dtype == torch.float16:
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
return (hidden_states, attn_weights)
|
class PegasusEncoderLayer(GradientCheckpointingLayer):
def __init__(self, config: PegasusConfig):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, layer_head_mask: torch.Tensor, output_attentions: bool=False) -> torch.Tensor:
'''
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
'''
pass
| 3
| 1
| 33
| 3
| 25
| 6
| 2
| 0.22
| 1
| 4
| 1
| 0
| 2
| 9
| 2
| 12
| 68
| 7
| 50
| 22
| 41
| 11
| 32
| 16
| 29
| 3
| 1
| 1
| 4
|
4,452
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/pegasus/modeling_pegasus.py
|
transformers.models.pegasus.modeling_pegasus.PegasusForCausalLM
|
import torch
from torch import nn
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from ...generation import GenerationMixin
import copy
from typing import Callable, Optional, Union
from ...utils import auto_docstring, is_torch_flex_attn_available, is_torchdynamo_compiling, logging
from torch.nn import CrossEntropyLoss
class PegasusForCausalLM(PegasusPreTrainedModel, GenerationMixin):
_tied_weights_keys = ['lm_head.weight']
def __init__(self, config):
config = copy.deepcopy(config)
config.is_decoder = True
config.is_encoder_decoder = False
super().__init__(config)
self.model = PegasusDecoderWrapper(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.post_init()
def get_input_embeddings(self):
return self.model.decoder.embed_tokens
def set_input_embeddings(self, value):
self.model.decoder.embed_tokens = value
def set_decoder(self, decoder):
self.model.decoder = decoder
def get_decoder(self):
return self.model.decoder
def get_position_embeddings(self) -> nn.Embedding:
"""
Returns the position embeddings matrix
"""
return self.model.decoder.get_position_embeddings()
def resize_position_embeddings(self, new_num_position_embeddings: int):
"""
Resizes position embeddings matrix of the model if `new_num_position_embeddings !=
config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`):
The number of new position embeddings. If position embeddings are learned, increasing the size will add
newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
add correct vectors at the end following the position encoding algorithm, whereas reducing the size
will remove vectors from the end.
"""
self.config.max_position_embeddings = new_num_position_embeddings
self.model.decoder.resize_position_embeddings(new_num_position_embeddings)
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple, CausalLMOutputWithCrossAttentions]:
"""
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, PegasusForCausalLM
>>> tokenizer = AutoTokenizer.from_pretrained("google/pegasus-large")
>>> model = PegasusForCausalLM.from_pretrained("google/pegasus-large", add_cross_attention=False)
>>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder."
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
>>> expected_shape = [1, inputs.input_ids.shape[-1], model.config.vocab_size]
>>> list(logits.shape) == expected_shape
True
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.model.decoder(input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position)
logits = self.lm_head(outputs[0])
loss = None
if labels is not None:
labels = labels.to(logits.device)
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutputWithCrossAttentions(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions)
|
class PegasusForCausalLM(PegasusPreTrainedModel, GenerationMixin):
def __init__(self, config):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, value):
pass
def set_decoder(self, decoder):
pass
def get_decoder(self):
pass
def get_position_embeddings(self) -> nn.Embedding:
'''
Returns the position embeddings matrix
'''
pass
def resize_position_embeddings(self, new_num_position_embeddings: int):
'''
Resizes position embeddings matrix of the model if `new_num_position_embeddings !=
config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`):
The number of new position embeddings. If position embeddings are learned, increasing the size will add
newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
add correct vectors at the end following the position encoding algorithm, whereas reducing the size
will remove vectors from the end.
'''
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple, CausalLMOutputWithCrossAttentions]:
'''
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, PegasusForCausalLM
>>> tokenizer = AutoTokenizer.from_pretrained("google/pegasus-large")
>>> model = PegasusForCausalLM.from_pretrained("google/pegasus-large", add_cross_attention=False)
>>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder."
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
>>> expected_shape = [1, inputs.input_ids.shape[-1], model.config.vocab_size]
>>> list(logits.shape) == expected_shape
True
```'''
pass
| 10
| 3
| 18
| 2
| 8
| 8
| 2
| 0.97
| 2
| 7
| 2
| 0
| 10
| 2
| 11
| 12
| 209
| 36
| 88
| 39
| 59
| 85
| 47
| 22
| 35
| 7
| 2
| 1
| 18
|
4,453
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/pegasus/modeling_pegasus.py
|
transformers.models.pegasus.modeling_pegasus.PegasusForConditionalGeneration
|
from ...utils import auto_docstring, is_torch_flex_attn_available, is_torchdynamo_compiling, logging
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput
from .configuration_pegasus import PegasusConfig
from ...generation import GenerationMixin
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from typing import Callable, Optional, Union
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
@auto_docstring(custom_intro='\n The PEGASUS Model with a language modeling head. Can be used for summarization.\n ')
class PegasusForConditionalGeneration(PegasusPreTrainedModel, GenerationMixin):
base_model_prefix = 'model'
_keys_to_ignore_on_load_missing = ['final_logits_bias']
_tied_weights_keys = ['encoder.embed_tokens.weight', 'decoder.embed_tokens.weight', 'lm_head.weight']
def __init__(self, config: PegasusConfig):
super().__init__(config)
self.model = PegasusModel(config)
self.register_buffer('final_logits_bias', torch.zeros((1, self.model.shared.num_embeddings)))
self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False)
self.post_init()
def get_encoder(self):
return self.model.get_encoder()
def get_decoder(self):
return self.model.get_decoder()
def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int]=None, mean_resizing: bool=True) -> nn.Embedding:
new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)
self._resize_final_logits_bias(new_embeddings.weight.shape[0])
return new_embeddings
def _resize_final_logits_bias(self, new_num_tokens: int) -> None:
old_num_tokens = self.final_logits_bias.shape[-1]
if new_num_tokens <= old_num_tokens:
new_bias = self.final_logits_bias[:, :new_num_tokens]
else:
extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)
new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)
self.register_buffer('final_logits_bias', new_bias)
def resize_position_embeddings(self, new_num_position_embeddings: int):
"""
Resizes position embeddings matrix of the model if `new_num_position_embeddings !=
config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`):
The number of new position embeddings. If position embeddings are learned, increasing the size will add
newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
add correct vectors at the end following the position encoding algorithm, whereas reducing the size
will remove vectors from the end.
"""
self.config.max_position_embeddings = new_num_position_embeddings
self.model.encoder.resize_position_embeddings(new_num_position_embeddings)
self.model.decoder.resize_position_embeddings(new_num_position_embeddings)
def get_position_embeddings(self) -> tuple[nn.Embedding]:
"""
Returns the position embeddings matrix
"""
return (self.model.encoder.get_position_embeddings(), self.model.decoder.get_position_embeddings())
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.Tensor]=None, decoder_attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[torch.FloatTensor]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.Tensor]=None, decoder_inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple, Seq2SeqLMOutput]:
"""
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
Pegasus uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,
1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example Summarization:
```python
>>> from transformers import AutoTokenizer, PegasusForConditionalGeneration
>>> model = PegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum")
>>> tokenizer = AutoTokenizer.from_pretrained("google/pegasus-xsum")
>>> ARTICLE_TO_SUMMARIZE = (
... "PG&E stated it scheduled the blackouts in response to forecasts for high winds "
... "amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were "
... "scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow."
... )
>>> inputs = tokenizer(ARTICLE_TO_SUMMARIZE, max_length=1024, return_tensors="pt")
>>> # Generate Summary
>>> summary_ids = model.generate(inputs["input_ids"])
>>> tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"California's largest electricity provider has turned off power to hundreds of thousands of customers."
```
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
if use_cache:
logger.warning('The `use_cache` argument is changed to `False` since `labels` is provided.')
use_cache = False
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
outputs = self.model(input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position)
lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (lm_logits,) + outputs[1:]
return (masked_lm_loss,) + output if masked_lm_loss is not None else output
return Seq2SeqLMOutput(loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions)
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
|
@auto_docstring(custom_intro='\n The PEGASUS Model with a language modeling head. Can be used for summarization.\n ')
class PegasusForConditionalGeneration(PegasusPreTrainedModel, GenerationMixin):
def __init__(self, config: PegasusConfig):
pass
def get_encoder(self):
pass
def get_decoder(self):
pass
def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int]=None, mean_resizing: bool=True) -> nn.Embedding:
pass
def _resize_final_logits_bias(self, new_num_tokens: int) -> None:
pass
def resize_position_embeddings(self, new_num_position_embeddings: int):
'''
Resizes position embeddings matrix of the model if `new_num_position_embeddings !=
config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`):
The number of new position embeddings. If position embeddings are learned, increasing the size will add
newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
add correct vectors at the end following the position encoding algorithm, whereas reducing the size
will remove vectors from the end.
'''
pass
def get_position_embeddings(self) -> tuple[nn.Embedding]:
'''
Returns the position embeddings matrix
'''
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.Tensor]=None, decoder_attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[torch.FloatTensor]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.Tensor]=None, decoder_inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple, Seq2SeqLMOutput]:
'''
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
Pegasus uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,
1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example Summarization:
```python
>>> from transformers import AutoTokenizer, PegasusForConditionalGeneration
>>> model = PegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum")
>>> tokenizer = AutoTokenizer.from_pretrained("google/pegasus-xsum")
>>> ARTICLE_TO_SUMMARIZE = (
... "PG&E stated it scheduled the blackouts in response to forecasts for high winds "
... "amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were "
... "scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow."
... )
>>> inputs = tokenizer(ARTICLE_TO_SUMMARIZE, max_length=1024, return_tensors="pt")
>>> # Generate Summary
>>> summary_ids = model.generate(inputs["input_ids"])
>>> tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"California's largest electricity provider has turned off power to hundreds of thousands of customers."
```
'''
pass
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
pass
| 12
| 3
| 12
| 1
| 9
| 2
| 2
| 0.2
| 2
| 8
| 3
| 0
| 11
| 3
| 12
| 13
| 160
| 21
| 116
| 52
| 79
| 23
| 60
| 29
| 47
| 8
| 2
| 2
| 21
|
4,454
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/pegasus/modeling_pegasus.py
|
transformers.models.pegasus.modeling_pegasus.PegasusModel
|
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
import torch
from typing import Callable, Optional, Union
from ...utils import auto_docstring, is_torch_flex_attn_available, is_torchdynamo_compiling, logging
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput
from torch import nn
from .configuration_pegasus import PegasusConfig
@auto_docstring
class PegasusModel(PegasusPreTrainedModel):
_tied_weights_keys = ['encoder.embed_tokens.weight', 'decoder.embed_tokens.weight']
def __init__(self, config: PegasusConfig):
super().__init__(config)
padding_idx, vocab_size = (config.pad_token_id, config.vocab_size)
self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)
self.encoder = PegasusEncoder(config, self.shared)
self.decoder = PegasusDecoder(config, self.shared)
self.post_init()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, value):
self.shared = value
self.encoder.embed_tokens = self.shared
self.decoder.embed_tokens = self.shared
def get_encoder(self):
return self.encoder
def resize_position_embeddings(self, new_num_position_embeddings: int):
"""
Resizes position embeddings matrix of the model if `new_num_position_embeddings !=
config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`):
The number of new position embeddings. If position embeddings are learned, increasing the size will add
newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
add correct vectors at the end following the position encoding algorithm, whereas reducing the size
will remove vectors from the end.
"""
self.config.max_position_embeddings = new_num_position_embeddings
self.encoder.resize_position_embeddings(new_num_position_embeddings)
self.decoder.resize_position_embeddings(new_num_position_embeddings)
def get_position_embeddings(self) -> tuple[nn.Embedding]:
"""
Returns the position embeddings matrix
"""
return (self.encoder.get_position_embeddings(), self.decoder.get_position_embeddings())
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.Tensor]=None, decoder_attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[torch.FloatTensor]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.Tensor]=None, decoder_inputs_embeds: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple, Seq2SeqModelOutput]:
"""
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
Pegasus uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,
1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
Example:
```python
>>> from transformers import AutoTokenizer, PegasusModel
>>> tokenizer = AutoTokenizer.from_pretrained("google/pegasus-large")
>>> model = PegasusModel.from_pretrained("google/pegasus-large")
>>> inputs = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt")
>>> decoder_inputs = tokenizer("Studies show that", return_tensors="pt")
>>> outputs = model(input_ids=inputs.input_ids, decoder_input_ids=decoder_inputs.input_ids)
>>> last_hidden_states = outputs.last_hidden_state
>>> list(last_hidden_states.shape)
[1, 4, 1024]
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)):
encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None)
decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions)
|
@auto_docstring
class PegasusModel(PegasusPreTrainedModel):
def __init__(self, config: PegasusConfig):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, value):
pass
def get_encoder(self):
pass
def resize_position_embeddings(self, new_num_position_embeddings: int):
'''
Resizes position embeddings matrix of the model if `new_num_position_embeddings !=
config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`):
The number of new position embeddings. If position embeddings are learned, increasing the size will add
newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
add correct vectors at the end following the position encoding algorithm, whereas reducing the size
will remove vectors from the end.
'''
pass
def get_position_embeddings(self) -> tuple[nn.Embedding]:
'''
Returns the position embeddings matrix
'''
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.Tensor]=None, decoder_attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[torch.FloatTensor]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.Tensor]=None, decoder_inputs_embeds: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple, Seq2SeqModelOutput]:
'''
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
Pegasus uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,
1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
Example:
```python
>>> from transformers import AutoTokenizer, PegasusModel
>>> tokenizer = AutoTokenizer.from_pretrained("google/pegasus-large")
>>> model = PegasusModel.from_pretrained("google/pegasus-large")
>>> inputs = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt")
>>> decoder_inputs = tokenizer("Studies show that", return_tensors="pt")
>>> outputs = model(input_ids=inputs.input_ids, decoder_input_ids=decoder_inputs.input_ids)
>>> last_hidden_states = outputs.last_hidden_state
>>> list(last_hidden_states.shape)
[1, 4, 1024]
```'''
pass
| 10
| 3
| 17
| 2
| 11
| 4
| 2
| 0.33
| 1
| 9
| 5
| 0
| 8
| 3
| 8
| 9
| 146
| 22
| 93
| 33
| 65
| 31
| 37
| 15
| 28
| 10
| 2
| 1
| 17
|
4,455
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/pegasus/modeling_pegasus.py
|
transformers.models.pegasus.modeling_pegasus.PegasusPreTrainedModel
|
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from .configuration_pegasus import PegasusConfig
import torch
from ...modeling_attn_mask_utils import AttentionMaskConverter, _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Callable, Optional, Union
from torch import nn
from ...utils import auto_docstring, is_torch_flex_attn_available, is_torchdynamo_compiling, logging
@auto_docstring
class PegasusPreTrainedModel(PreTrainedModel):
config: PegasusConfig
base_model_prefix = 'model'
supports_gradient_checkpointing = True
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = True
def _init_weights(self, module):
std = self.config.init_std
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, PegasusSinusoidalPositionalEmbedding):
module._init_weight()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.weight.data.fill_(1.0)
module.bias.data.zero_()
def _update_full_mask(self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor):
if attention_mask is not None:
if 'flash' in self.config._attn_implementation:
attention_mask = attention_mask if 0 in attention_mask else None
elif self.config._attn_implementation == 'sdpa':
attention_mask = _prepare_4d_attention_mask_for_sdpa(attention_mask, inputs_embeds.dtype)
elif self.config._attn_implementation == 'flex_attention':
if isinstance(attention_mask, torch.Tensor):
attention_mask = make_flex_block_causal_mask(attention_mask, is_causal=False)
else:
attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
return attention_mask
def _update_causal_mask(self, attention_mask: Optional[Union[torch.Tensor, 'BlockMask']], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache):
if self.config._attn_implementation == 'flex_attention':
if isinstance(attention_mask, torch.Tensor):
attention_mask = make_flex_block_causal_mask(attention_mask)
elif attention_mask is None:
attention_mask = make_flex_block_causal_mask(torch.ones(size=(input_tensor.shape[0], input_tensor.shape[1]), device=attention_mask.device))
return attention_mask
if 'flash' in self.config._attn_implementation:
if attention_mask is not None and (attention_mask == 0.0).any():
return attention_mask
return None
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
using_compilable_cache = past_key_values.is_compileable if past_key_values is not None else False
if self.config._attn_implementation == 'sdpa' and (not using_compilable_cache):
if AttentionMaskConverter._ignore_causal_mask_sdpa(attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training):
return None
dtype = input_tensor.dtype
sequence_length = input_tensor.shape[1]
if using_compilable_cache:
target_length = past_key_values.get_max_cache_shape()
else:
target_length = attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1
causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, cache_position=cache_position, batch_size=input_tensor.shape[0])
if self.config._attn_implementation == 'sdpa' and attention_mask is not None and (attention_mask.device.type in ['cuda', 'xpu', 'npu']):
min_dtype = torch.finfo(dtype).min
causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
return causal_mask
@staticmethod
def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs):
"""
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
Args:
attention_mask (`torch.Tensor`):
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
`(batch_size, 1, query_length, key_value_length)`.
sequence_length (`int`):
The sequence length being processed.
target_length (`int`):
The target length: when generating with static cache, the mask should be as long as the static cache,
to account for the 0 padding, the part of the cache that is not filled yet.
dtype (`torch.dtype`):
The dtype to use for the 4D attention mask.
cache_position (`torch.Tensor`):
Indices depicting the position of the input sequence tokens in the sequence.
batch_size (`torch.Tensor`):
Batch size.
"""
if attention_mask is not None and attention_mask.dim() == 4:
causal_mask = attention_mask
else:
min_dtype = torch.finfo(dtype).min
causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device)
if sequence_length != 1:
causal_mask = torch.triu(causal_mask, diagonal=1)
causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1)
causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
if attention_mask is not None:
causal_mask = causal_mask.clone()
mask_length = attention_mask.shape[-1]
padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device)
padding_mask = padding_mask == 0
causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype)
return causal_mask
def _update_cross_attn_mask(self, encoder_hidden_states: Union[torch.Tensor, None], encoder_attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor):
if encoder_hidden_states is not None and encoder_attention_mask is not None:
if 'flash' in self.config._attn_implementation:
encoder_attention_mask = encoder_attention_mask if 0 in encoder_attention_mask else None
elif self.config._attn_implementation == 'sdpa':
encoder_attention_mask = _prepare_4d_attention_mask_for_sdpa(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
elif self.config._attn_implementation == 'flex_attention':
if isinstance(encoder_attention_mask, torch.Tensor):
encoder_attention_mask = make_flex_block_causal_mask(encoder_attention_mask, query_length=input_shape[-1], is_causal=False)
else:
encoder_attention_mask = _prepare_4d_attention_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
return encoder_attention_mask
|
@auto_docstring
class PegasusPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
pass
def _update_full_mask(self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor):
pass
def _update_causal_mask(self, attention_mask: Optional[Union[torch.Tensor, 'BlockMask']], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache):
pass
@staticmethod
def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs):
'''
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
Args:
attention_mask (`torch.Tensor`):
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
`(batch_size, 1, query_length, key_value_length)`.
sequence_length (`int`):
The sequence length being processed.
target_length (`int`):
The target length: when generating with static cache, the mask should be as long as the static cache,
to account for the 0 padding, the part of the cache that is not filled yet.
dtype (`torch.dtype`):
The dtype to use for the 4D attention mask.
cache_position (`torch.Tensor`):
Indices depicting the position of the input sequence tokens in the sequence.
batch_size (`torch.Tensor`):
Batch size.
'''
pass
def _update_cross_attn_mask(self, encoder_hidden_states: Union[torch.Tensor, None], encoder_attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor):
pass
| 8
| 1
| 12
| 0
| 12
| 0
| 6
| 0
| 1
| 1
| 1
| 6
| 1
| 0
| 1
| 1
| 17
| 1
| 16
| 6
| 14
| 0
| 14
| 6
| 12
| 6
| 1
| 2
| 6
|
4,456
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/pegasus/modeling_pegasus.py
|
transformers.models.pegasus.modeling_pegasus.PegasusSinusoidalPositionalEmbedding
|
from typing import Callable, Optional, Union
import numpy as np
from torch import nn
import torch
class PegasusSinusoidalPositionalEmbedding(nn.Embedding):
"""This module produces sinusoidal positional embeddings of any length."""
def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int]=None) -> None:
super().__init__(num_positions, embedding_dim)
def _init_weight(self):
"""
Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in
the 2nd half of the vector. [dim // 2:]
"""
n_pos, dim = self.weight.shape
position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)])
out = torch.empty(n_pos, dim, dtype=self.weight.dtype, requires_grad=False)
sentinel = dim // 2 if dim % 2 == 0 else dim // 2 + 1
out[:, 0:sentinel] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
out[:, sentinel:] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
self.weight = nn.Parameter(out, requires_grad=False)
@torch.no_grad()
def forward(self, input_ids_shape: torch.Size, past_key_values_length: int=0, position_ids: Optional[torch.Tensor]=None) -> torch.Tensor:
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
if position_ids is None:
bsz, seq_len = input_ids_shape[:2]
position_ids = torch.arange(past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device)
return super().forward(position_ids)
|
class PegasusSinusoidalPositionalEmbedding(nn.Embedding):
'''This module produces sinusoidal positional embeddings of any length.'''
def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int]=None) -> None:
pass
def _init_weight(self):
'''
Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in
the 2nd half of the vector. [dim // 2:]
'''
pass
@torch.no_grad()
def forward(self, input_ids_shape: torch.Size, past_key_values_length: int=0, position_ids: Optional[torch.Tensor]=None) -> torch.Tensor:
'''`input_ids_shape` is expected to be [bsz x seqlen].'''
pass
| 5
| 3
| 8
| 0
| 7
| 2
| 1
| 0.3
| 1
| 4
| 0
| 0
| 2
| 1
| 3
| 3
| 32
| 3
| 23
| 12
| 17
| 7
| 17
| 10
| 13
| 2
| 1
| 0
| 4
|
4,457
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/pegasus/tokenization_pegasus.py
|
transformers.models.pegasus.tokenization_pegasus.PegasusTokenizer
|
import sentencepiece as spm
from typing import Any, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils.import_utils import requires
from shutil import copyfile
import os
@requires(backends=('sentencepiece',))
class PegasusTokenizer(PreTrainedTokenizer):
"""
Construct a PEGASUS tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
contains the vocabulary necessary to instantiate a tokenizer.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
mask_token (`str`, *optional*, defaults to `"<mask_2>"`):
The token used for masking single token values. This is the token used when training this model with masked
language modeling (MLM). This is the token that the PEGASUS encoder will try to predict during pretraining.
It corresponds to *[MASK2]* in [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive
Summarization](https://huggingface.co/papers/1912.08777).
mask_token_sent (`str`, *optional*, defaults to `"<mask_1>"`):
The token used for masking whole target sentences. This is the token used when training this model with gap
sentences generation (GSG). This is the sentence that the PEGASUS decoder will try to predict during
pretraining. It corresponds to *[MASK1]* in [PEGASUS: Pre-training with Extracted Gap-sentences for
Abstractive Summarization](https://huggingface.co/papers/1912.08777).
additional_special_tokens (`List[str]`, *optional*):
Additional special tokens used by the tokenizer. If no additional_special_tokens are provided <mask_2> and
<unk_2, ..., unk_102> are used as additional special tokens corresponding to the [original PEGASUS
tokenizer](https://github.com/google-research/pegasus/blob/939830367bcf411193d2b5eca2f2f90f3f9260ca/pegasus/ops/pretrain_parsing_ops.cc#L66)
that uses the tokens 2 - 104 only for pretraining
sp_model_kwargs (`dict`, *optional*):
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
to set:
- `enable_sampling`: Enable subword regularization.
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
- `nbest_size = {0,1}`: No sampling is performed.
- `nbest_size > 1`: samples from the nbest_size results.
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
using forward-filtering-and-backward-sampling algorithm.
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
BPE-dropout.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ['input_ids', 'attention_mask']
def __init__(self, vocab_file, pad_token='<pad>', eos_token='</s>', unk_token='<unk>', mask_token='<mask_2>', mask_token_sent='<mask_1>', additional_special_tokens=None, offset=103, sp_model_kwargs: Optional[dict[str, Any]]=None, **kwargs) -> None:
self.offset = offset
if additional_special_tokens is not None:
if not isinstance(additional_special_tokens, list):
raise TypeError(f'additional_special_tokens should be of type {type(list)}, but is {type(additional_special_tokens)}')
additional_special_tokens_extended = [mask_token_sent] + additional_special_tokens if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens
additional_special_tokens_extended += [f'<unk_{i}>' for i in range(len(additional_special_tokens_extended), self.offset - 1)]
if len(set(additional_special_tokens_extended)) != len(additional_special_tokens_extended):
raise ValueError(f'Please make sure that the provided additional_special_tokens do not contain an incorrectly shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.')
additional_special_tokens = additional_special_tokens_extended
else:
additional_special_tokens_extended = []
additional_special_tokens = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'<unk_{i}>' for i in range(2, self.offset)]
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
self.mask_token_sent = mask_token_sent
self.vocab_file = vocab_file
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(vocab_file)
_added_tokens_decoder = {0: AddedToken(str(pad_token), special=True), 1: AddedToken(str(eos_token), special=True)}
if self.mask_token_sent is not None:
_added_tokens_decoder[2] = AddedToken(mask_token_sent, special=True)
_added_tokens_decoder[3] = AddedToken(str(mask_token), special=True)
for i in range(2, self.offset):
_added_tokens_decoder[len(_added_tokens_decoder)] = AddedToken(f'<unk_{i}>', special=True)
self._added_tokens_decoder = kwargs.pop('added_tokens_decoder', {})
self._added_tokens_decoder.update(_added_tokens_decoder)
super().__init__(eos_token=eos_token, unk_token=unk_token, mask_token=mask_token, pad_token=pad_token, mask_token_sent=mask_token_sent, offset=offset, additional_special_tokens=additional_special_tokens, sp_model_kwargs=self.sp_model_kwargs, **kwargs)
@property
def vocab_size(self) -> int:
return len(self.sp_model) + self.offset
def get_vocab(self) -> dict[str, int]:
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__(self):
state = self.__dict__.copy()
state['sp_model'] = None
return state
def __setstate__(self, d):
self.__dict__ = d
if not hasattr(self, 'sp_model_kwargs'):
self.sp_model_kwargs = {}
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def _tokenize(self, text: str) -> list[str]:
"""Take as input a string and return a list of strings (tokens) for words/sub-words"""
return self.sp_model.encode(text, out_type=str)
def _convert_token_to_id(self, token: str) -> int:
"""Converts a token (str) to an id using the vocab."""
sp_id = self.sp_model.piece_to_id(token)
return sp_id + self.offset
def _convert_id_to_token(self, index: int) -> str:
"""Converts an index (integer) to a token (str) using the vocab."""
if index < self.offset:
return self.sp_model.IdToPiece(index)
token = self.sp_model.IdToPiece(index - self.offset)
return token
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
current_sub_tokens = []
out_string = ''
for token in tokens:
if token in self.all_special_tokens:
out_string += self.sp_model.decode(current_sub_tokens) + token
current_sub_tokens = []
else:
current_sub_tokens.append(token)
out_string += self.sp_model.decode(current_sub_tokens)
return out_string.strip()
def num_special_tokens_to_add(self, pair=False):
"""Just EOS"""
return 1
def _special_token_mask(self, seq):
all_special_ids = set(self.all_special_ids)
all_special_ids.remove(self.unk_token_id)
return [1 if x in all_special_ids else 0 for x in seq]
def get_special_tokens_mask(self, token_ids_0: list, token_ids_1: Optional[list]=None, already_has_special_tokens: bool=False) -> list[int]:
"""Get list where entries are [1] if a token is [eos] or [pad] else 0."""
if already_has_special_tokens:
return self._special_token_mask(token_ids_0)
elif token_ids_1 is None:
return self._special_token_mask(token_ids_0) + [1]
else:
return self._special_token_mask(token_ids_0 + token_ids_1) + [1]
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None) -> list[int]:
"""
Build model inputs from a sequence or a pair of sequences for sequence classification tasks by concatenating
and adding special tokens. A PEGASUS sequence has the following format, where `X` represents the sequence:
- single sequence: `X </s>`
- pair of sequences: `A B </s>` (not intended use)
BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
separator.
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return token_ids_0 + [self.eos_token_id]
return token_ids_0 + token_ids_1 + [self.eos_token_id]
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, out_vocab_file)
elif not os.path.isfile(self.vocab_file):
with open(out_vocab_file, 'wb') as fi:
content_spiece_model = self.sp_model.serialized_model_proto()
fi.write(content_spiece_model)
return (out_vocab_file,)
|
@requires(backends=('sentencepiece',))
class PegasusTokenizer(PreTrainedTokenizer):
'''
Construct a PEGASUS tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
contains the vocabulary necessary to instantiate a tokenizer.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
mask_token (`str`, *optional*, defaults to `"<mask_2>"`):
The token used for masking single token values. This is the token used when training this model with masked
language modeling (MLM). This is the token that the PEGASUS encoder will try to predict during pretraining.
It corresponds to *[MASK2]* in [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive
Summarization](https://huggingface.co/papers/1912.08777).
mask_token_sent (`str`, *optional*, defaults to `"<mask_1>"`):
The token used for masking whole target sentences. This is the token used when training this model with gap
sentences generation (GSG). This is the sentence that the PEGASUS decoder will try to predict during
pretraining. It corresponds to *[MASK1]* in [PEGASUS: Pre-training with Extracted Gap-sentences for
Abstractive Summarization](https://huggingface.co/papers/1912.08777).
additional_special_tokens (`List[str]`, *optional*):
Additional special tokens used by the tokenizer. If no additional_special_tokens are provided <mask_2> and
<unk_2, ..., unk_102> are used as additional special tokens corresponding to the [original PEGASUS
tokenizer](https://github.com/google-research/pegasus/blob/939830367bcf411193d2b5eca2f2f90f3f9260ca/pegasus/ops/pretrain_parsing_ops.cc#L66)
that uses the tokens 2 - 104 only for pretraining
sp_model_kwargs (`dict`, *optional*):
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
to set:
- `enable_sampling`: Enable subword regularization.
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
- `nbest_size = {0,1}`: No sampling is performed.
- `nbest_size > 1`: samples from the nbest_size results.
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
using forward-filtering-and-backward-sampling algorithm.
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
BPE-dropout.
'''
def __init__(self, vocab_file, pad_token='<pad>', eos_token='</s>', unk_token='<unk>', mask_token='<mask_2>', mask_token_sent='<mask_1>', additional_special_tokens=None, offset=103, sp_model_kwargs: Optional[dict[str, Any]]=None, **kwargs) -> None:
pass
@property
def vocab_size(self) -> int:
pass
def get_vocab(self) -> dict[str, int]:
pass
def __getstate__(self):
pass
def __setstate__(self, d):
pass
def _tokenize(self, text: str) -> list[str]:
'''Take as input a string and return a list of strings (tokens) for words/sub-words'''
pass
def _convert_token_to_id(self, token: str) -> int:
'''Converts a token (str) to an id using the vocab.'''
pass
def _convert_id_to_token(self, index: int) -> str:
'''Converts an index (integer) to a token (str) using the vocab.'''
pass
def convert_tokens_to_string(self, tokens):
'''Converts a sequence of tokens (string) in a single string.'''
pass
def num_special_tokens_to_add(self, pair=False):
'''Just EOS'''
pass
def _special_token_mask(self, seq):
pass
def get_special_tokens_mask(self, token_ids_0: list, token_ids_1: Optional[list]=None, already_has_special_tokens: bool=False) -> list[int]:
'''Get list where entries are [1] if a token is [eos] or [pad] else 0.'''
pass
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None) -> list[int]:
'''
Build model inputs from a sequence or a pair of sequences for sequence classification tasks by concatenating
and adding special tokens. A PEGASUS sequence has the following format, where `X` represents the sequence:
- single sequence: `X </s>`
- pair of sequences: `A B </s>` (not intended use)
BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
separator.
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
'''
pass
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
pass
| 17
| 8
| 13
| 1
| 10
| 2
| 2
| 0.55
| 1
| 11
| 0
| 0
| 14
| 7
| 14
| 103
| 252
| 40
| 139
| 52
| 109
| 76
| 92
| 36
| 77
| 9
| 3
| 2
| 34
|
4,458
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/pegasus/tokenization_pegasus_fast.py
|
transformers.models.pegasus.tokenization_pegasus_fast.PegasusTokenizerFast
|
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from shutil import copyfile
import os
from typing import Optional
class PegasusTokenizerFast(PreTrainedTokenizerFast):
"""
Construct a "fast" PEGASUS tokenizer (backed by HuggingFace's *tokenizers* library). Based on
[Unigram](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models).
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
contains the vocabulary necessary to instantiate a tokenizer.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
mask_token (`str`, *optional*, defaults to `"<mask_2>"`):
The token used for masking single token values. This is the token used when training this model with masked
language modeling (MLM). This is the token that the PEGASUS encoder will try to predict during pretraining.
It corresponds to *[MASK2]* in [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive
Summarization](https://huggingface.co/papers/1912.08777).
mask_token_sent (`str`, *optional*, defaults to `"<mask_1>"`):
The token used for masking whole target sentences. This is the token used when training this model with gap
sentences generation (GSG). This is the sentence that the PEGASUS decoder will try to predict during
pretraining. It corresponds to *[MASK1]* in [PEGASUS: Pre-training with Extracted Gap-sentences for
Abstractive Summarization](https://huggingface.co/papers/1912.08777).
additional_special_tokens (`List[str]`, *optional*):
Additional special tokens used by the tokenizer. If no additional_special_tokens are provided <mask_2> and
<unk_2, ..., unk_102> are used as additional special tokens corresponding to the [original PEGASUS
tokenizer](https://github.com/google-research/pegasus/blob/939830367bcf411193d2b5eca2f2f90f3f9260ca/pegasus/ops/pretrain_parsing_ops.cc#L66)
that uses the tokens 2 - 104 only for pretraining
"""
vocab_files_names = VOCAB_FILES_NAMES
slow_tokenizer_class = PegasusTokenizer
model_input_names = ['input_ids', 'attention_mask']
def __init__(self, vocab_file=None, tokenizer_file=None, pad_token='<pad>', eos_token='</s>', unk_token='<unk>', mask_token='<mask_2>', mask_token_sent='<mask_1>', additional_special_tokens=None, offset=103, **kwargs):
self.offset = offset
if additional_special_tokens is not None:
if not isinstance(additional_special_tokens, list):
raise TypeError(f'additional_special_tokens should be of type {type(list)}, but is {type(additional_special_tokens)}')
additional_special_tokens_extended = [mask_token_sent] + additional_special_tokens if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens
additional_special_tokens_extended += [f'<unk_{i}>' for i in range(len(additional_special_tokens_extended), self.offset - 1)]
if len(set(additional_special_tokens_extended)) != len(additional_special_tokens_extended):
raise ValueError(f'Please make sure that the provided additional_special_tokens do not contain an incorrectly shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.')
additional_special_tokens = additional_special_tokens_extended
else:
additional_special_tokens = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'<unk_{i}>' for i in range(2, self.offset)]
from_slow = kwargs.pop('from_slow', None)
from_slow = from_slow or str(pad_token) != '<pad>' or str(eos_token) != '</s>' or (str(unk_token) != '<unk>')
kwargs.pop('added_tokens_decoder', {})
super().__init__(vocab_file, tokenizer_file=tokenizer_file, pad_token=pad_token, eos_token=eos_token, unk_token=unk_token, mask_token=mask_token, mask_token_sent=mask_token_sent, offset=offset, additional_special_tokens=additional_special_tokens, from_slow=from_slow, **kwargs)
self.vocab_file = vocab_file
def _special_token_mask(self, seq):
all_special_ids = set(self.all_special_ids)
all_special_ids.remove(self.unk_token_id)
if all_special_ids != set(range(len(self.additional_special_tokens) + 3)):
raise ValueError(f'There should be 3 special tokens: mask_token, pad_token, and eos_token + {len(self.additional_special_tokens)} additional_special_tokens, but got {all_special_ids}')
return [1 if x in all_special_ids else 0 for x in seq]
def get_special_tokens_mask(self, token_ids_0: list, token_ids_1: Optional[list]=None, already_has_special_tokens: bool=False) -> list[int]:
"""Get list where entries are [1] if a token is [eos] or [pad] else 0."""
if already_has_special_tokens:
return self._special_token_mask(token_ids_0)
elif token_ids_1 is None:
return self._special_token_mask(token_ids_0) + [1]
else:
return self._special_token_mask(token_ids_0 + token_ids_1) + [1]
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None) -> list[int]:
"""
Build model inputs from a sequence by adding eos to the end. no bos token is added to the front.
- single sequence: `X </s>`
- pair of sequences: `A B </s>` (not intended use)
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return token_ids_0 + [self.eos_token_id]
return token_ids_0 + token_ids_1 + [self.eos_token_id]
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError('Your fast tokenizer does not have the necessary information to save the vocabulary for a slow tokenizer.')
if not os.path.isdir(save_directory):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,)
|
class PegasusTokenizerFast(PreTrainedTokenizerFast):
'''
Construct a "fast" PEGASUS tokenizer (backed by HuggingFace's *tokenizers* library). Based on
[Unigram](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models).
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
contains the vocabulary necessary to instantiate a tokenizer.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
mask_token (`str`, *optional*, defaults to `"<mask_2>"`):
The token used for masking single token values. This is the token used when training this model with masked
language modeling (MLM). This is the token that the PEGASUS encoder will try to predict during pretraining.
It corresponds to *[MASK2]* in [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive
Summarization](https://huggingface.co/papers/1912.08777).
mask_token_sent (`str`, *optional*, defaults to `"<mask_1>"`):
The token used for masking whole target sentences. This is the token used when training this model with gap
sentences generation (GSG). This is the sentence that the PEGASUS decoder will try to predict during
pretraining. It corresponds to *[MASK1]* in [PEGASUS: Pre-training with Extracted Gap-sentences for
Abstractive Summarization](https://huggingface.co/papers/1912.08777).
additional_special_tokens (`List[str]`, *optional*):
Additional special tokens used by the tokenizer. If no additional_special_tokens are provided <mask_2> and
<unk_2, ..., unk_102> are used as additional special tokens corresponding to the [original PEGASUS
tokenizer](https://github.com/google-research/pegasus/blob/939830367bcf411193d2b5eca2f2f90f3f9260ca/pegasus/ops/pretrain_parsing_ops.cc#L66)
that uses the tokens 2 - 104 only for pretraining
'''
def __init__(self, vocab_file=None, tokenizer_file=None, pad_token='<pad>', eos_token='</s>', unk_token='<unk>', mask_token='<mask_2>', mask_token_sent='<mask_1>', additional_special_tokens=None, offset=103, **kwargs):
pass
def _special_token_mask(self, seq):
pass
def get_special_tokens_mask(self, token_ids_0: list, token_ids_1: Optional[list]=None, already_has_special_tokens: bool=False) -> list[int]:
'''Get list where entries are [1] if a token is [eos] or [pad] else 0.'''
pass
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None) -> list[int]:
'''
Build model inputs from a sequence by adding eos to the end. no bos token is added to the front.
- single sequence: `X </s>`
- pair of sequences: `A B </s>` (not intended use)
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.
'''
pass
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
pass
| 6
| 3
| 21
| 2
| 16
| 3
| 4
| 0.57
| 1
| 10
| 0
| 0
| 6
| 2
| 6
| 94
| 178
| 27
| 98
| 31
| 76
| 56
| 48
| 16
| 41
| 6
| 3
| 2
| 21
|
4,459
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/pegasus_x/configuration_pegasus_x.py
|
transformers.models.pegasus_x.configuration_pegasus_x.PegasusXConfig
|
from ...configuration_utils import PretrainedConfig
class PegasusXConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`PegasusXModel`]. It is used to instantiate a
PEGASUS-X model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the PEGASUS-X
[google/pegasus-x-large](https://huggingface.co/google/pegasus-x-large) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 96103):
Vocabulary size of the PEGASUS-X model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling [`PegasusXModel`].
d_model (`int`, *optional*, defaults to 1024):
Dimension of the layers and the pooler layer.
encoder_layers (`int`, *optional*, defaults to 16):
Number of encoder layers.
decoder_layers (`int`, *optional*, defaults to 16):
Number of decoder layers.
encoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer decoder.
decoder_ffn_dim (`int`, *optional*, defaults to 4096):
Dimension of the "intermediate" (often named feed-forward) layer in decoder.
encoder_ffn_dim (`int`, *optional*, defaults to 4096):
Dimension of the "intermediate" (often named feed-forward) layer in decoder.
activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
max_position_embeddings (`int`, *optional*, defaults to 16384):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
encoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
decoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models)
forced_eos_token_id (`int`, *optional*, defaults to 1):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
num_global_tokens (`int`, *optional*, defaults to 128):
Number of global tokens to use for the encoder
block_size (`int`, *optional*, defaults to 512):
Block size for encoder local attention. Sequence length should be an exact multiple of block size.
block_size must be a multiple of 2 if stagger_local_block is True
stagger_local_block (`bool`, *optional*, defaults to `True`):
Whether to stagger every other local attention by half a block
Example:
```python
>>> from transformers import PegasusXConfig, PegasusXModel
>>> # Initializing a PEGASUS google/pegasus-x-large style configuration
>>> configuration = PegasusXConfig()
>>> # Initializing a model (with random weights) from the google/pegasus-x-large style configuration
>>> model = PegasusXModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'pegasus_x'
keys_to_ignore_at_inference = ['past_key_values']
attribute_map = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__(self, vocab_size=96103, max_position_embeddings=16384, encoder_layers=16, encoder_ffn_dim=4096, encoder_attention_heads=16, decoder_layers=16, decoder_ffn_dim=4096, decoder_attention_heads=16, encoder_layerdrop=0.0, decoder_layerdrop=0.0, use_cache=True, is_encoder_decoder=True, activation_function='gelu', d_model=1024, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, decoder_start_token_id=0, scale_embedding=True, pad_token_id=0, eos_token_id=1, forced_eos_token_id=1, num_global_tokens=32, block_size=512, stagger_local_blocks=True, **kwargs):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.use_cache = use_cache
self.num_hidden_layers = encoder_layers
self.scale_embedding = scale_embedding
self.num_global_tokens = num_global_tokens
self.block_size = block_size
self.stagger_local_blocks = stagger_local_blocks
super().__init__(pad_token_id=pad_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, decoder_start_token_id=decoder_start_token_id, forced_eos_token_id=forced_eos_token_id, **kwargs)
@property
def num_attention_heads(self) -> int:
return self.encoder_attention_heads
@property
def hidden_size(self) -> int:
return self.d_model
|
class PegasusXConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`PegasusXModel`]. It is used to instantiate a
PEGASUS-X model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the PEGASUS-X
[google/pegasus-x-large](https://huggingface.co/google/pegasus-x-large) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 96103):
Vocabulary size of the PEGASUS-X model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling [`PegasusXModel`].
d_model (`int`, *optional*, defaults to 1024):
Dimension of the layers and the pooler layer.
encoder_layers (`int`, *optional*, defaults to 16):
Number of encoder layers.
decoder_layers (`int`, *optional*, defaults to 16):
Number of decoder layers.
encoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer decoder.
decoder_ffn_dim (`int`, *optional*, defaults to 4096):
Dimension of the "intermediate" (often named feed-forward) layer in decoder.
encoder_ffn_dim (`int`, *optional*, defaults to 4096):
Dimension of the "intermediate" (often named feed-forward) layer in decoder.
activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
max_position_embeddings (`int`, *optional*, defaults to 16384):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
encoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
decoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models)
forced_eos_token_id (`int`, *optional*, defaults to 1):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
num_global_tokens (`int`, *optional*, defaults to 128):
Number of global tokens to use for the encoder
block_size (`int`, *optional*, defaults to 512):
Block size for encoder local attention. Sequence length should be an exact multiple of block size.
block_size must be a multiple of 2 if stagger_local_block is True
stagger_local_block (`bool`, *optional*, defaults to `True`):
Whether to stagger every other local attention by half a block
Example:
```python
>>> from transformers import PegasusXConfig, PegasusXModel
>>> # Initializing a PEGASUS google/pegasus-x-large style configuration
>>> configuration = PegasusXConfig()
>>> # Initializing a model (with random weights) from the google/pegasus-x-large style configuration
>>> model = PegasusXModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=96103, max_position_embeddings=16384, encoder_layers=16, encoder_ffn_dim=4096, encoder_attention_heads=16, decoder_layers=16, decoder_ffn_dim=4096, decoder_attention_heads=16, encoder_layerdrop=0.0, decoder_layerdrop=0.0, use_cache=True, is_encoder_decoder=True, activation_function='gelu', d_model=1024, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, decoder_start_token_id=0, scale_embedding=True, pad_token_id=0, eos_token_id=1, forced_eos_token_id=1, num_global_tokens=32, block_size=512, stagger_local_blocks=True, **kwargs):
pass
@property
def num_attention_heads(self) -> int:
pass
@property
def hidden_size(self) -> int:
pass
| 6
| 1
| 22
| 1
| 21
| 0
| 1
| 0.97
| 1
| 2
| 0
| 0
| 3
| 22
| 3
| 3
| 151
| 14
| 70
| 60
| 35
| 68
| 32
| 29
| 28
| 1
| 1
| 0
| 3
|
4,460
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/pegasus_x/modeling_pegasus_x.py
|
transformers.models.pegasus_x.modeling_pegasus_x.DimensionInfo
|
from dataclasses import dataclass
@dataclass
class DimensionInfo:
"""Wrapper for dimension info."""
batch_size: int
seq_len: int
block_size: int
num_heads: int
hidden_dim: int
dim_per_head: int
num_blocks: int
global_len: int
padded_seq_len: int
|
@dataclass
class DimensionInfo:
'''Wrapper for dimension info.'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 12
| 1
| 10
| 1
| 9
| 10
| 10
| 1
| 9
| 0
| 0
| 0
| 0
|
4,461
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/pegasus_x/modeling_pegasus_x.py
|
transformers.models.pegasus_x.modeling_pegasus_x.PegasusXAttention
|
from ...utils.deprecation import deprecate_kwarg
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from typing import Callable, Optional, Union
from ...processing_utils import Unpack
from torch import nn
from .configuration_pegasus_x import PegasusXConfig
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
import torch
from ...modeling_flash_attention_utils import FlashAttentionKwargs
class PegasusXAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, is_causal: bool=False, config: Optional[PegasusXConfig]=None, layer_idx: Optional[int]=None):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
self.config = config
if self.head_dim * num_heads != self.embed_dim:
raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).')
self.scaling = self.head_dim ** (-0.5)
self.is_decoder = is_decoder
self.is_causal = is_causal
self.layer_idx = layer_idx
if layer_idx is None and self.is_decoder:
logger.warning_once(f'Instantiating a decoder {self.__class__.__name__} without passing `layer_idx` is not recommended and will lead to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` when creating this class.')
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
is_cross_attention = key_value_states is not None
bsz, tgt_len = hidden_states.shape[:-1]
src_len = key_value_states.shape[1] if is_cross_attention else tgt_len
q_input_shape = (bsz, tgt_len, -1, self.head_dim)
kv_input_shape = (bsz, src_len, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(*q_input_shape).transpose(1, 2)
is_updated = False
if past_key_values is not None:
if isinstance(past_key_values, EncoderDecoderCache):
is_updated = past_key_values.is_updated.get(self.layer_idx)
if is_cross_attention:
curr_past_key_value = past_key_values.cross_attention_cache
else:
curr_past_key_value = past_key_values.self_attention_cache
else:
curr_past_key_value = past_key_values
current_states = key_value_states if is_cross_attention else hidden_states
if is_cross_attention and past_key_values is not None and is_updated:
key_states = curr_past_key_value.layers[self.layer_idx].keys
value_states = curr_past_key_value.layers[self.layer_idx].values
else:
key_states = self.k_proj(current_states)
value_states = self.v_proj(current_states)
key_states = key_states.view(*kv_input_shape).transpose(1, 2)
value_states = value_states.view(*kv_input_shape).transpose(1, 2)
if past_key_values is not None:
cache_position = cache_position if not is_cross_attention else None
key_states, value_states = curr_past_key_value.update(key_states, value_states, self.layer_idx, {'cache_position': cache_position})
if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache):
past_key_values.is_updated[self.layer_idx] = True
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.dropout, scaling=self.scaling, output_attentions=output_attentions, head_mask=layer_head_mask, **kwargs)
attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous()
attn_output = self.out_proj(attn_output)
return (attn_output, attn_weights)
|
class PegasusXAttention(nn.Module):
'''Multi-headed attention from 'Attention Is All You Need' paper'''
def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, is_causal: bool=False, config: Optional[PegasusXConfig]=None, layer_idx: Optional[int]=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
'''Input shape: Batch x Time x Channel'''
pass
| 4
| 2
| 50
| 7
| 35
| 8
| 5
| 0.24
| 1
| 7
| 1
| 0
| 3
| 12
| 3
| 13
| 156
| 23
| 107
| 44
| 86
| 26
| 68
| 27
| 64
| 12
| 1
| 2
| 15
|
4,462
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/pegasus_x/modeling_pegasus_x.py
|
transformers.models.pegasus_x.modeling_pegasus_x.PegasusXDecoder
|
from .configuration_pegasus_x import PegasusXConfig
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput
import torch
from torch import nn
from ...utils import auto_docstring, is_torch_flex_attn_available, is_torchdynamo_compiling, logging
import math
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Callable, Optional, Union
class PegasusXDecoder(PegasusXPreTrainedModel):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`PegasusDecoderLayer`]
Args:
config: PegasusXConfig
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: PegasusXConfig, embed_tokens: Optional[nn.Embedding]=None):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.max_target_positions = config.max_position_embeddings
embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
padding_idx = config.pad_token_id
if embed_tokens is not None:
self.embed_tokens = embed_tokens
else:
self.embed_tokens = PegasusXScaledWordEmbedding(config.vocab_size, config.d_model, padding_idx=padding_idx, embed_scale=embed_scale)
self.embed_positions = PegasusXSinusoidalPositionalEmbedding(config.d_model)
self.layers = nn.ModuleList([PegasusXDecoderLayer(config, layer_idx=i) for i in range(config.decoder_layers)])
self.layer_norm = nn.LayerNorm(config.d_model)
self.gradient_checkpointing = False
self.post_init()
def forward(self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, cache_position=None):
"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
inputs_embeds (`torch.FloatTensor` of
shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing
`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more
control over how to convert `input_ids` indices into associated vectors than the model's internal
embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence. It is used to update the
cache in the correct position and to infer the complete sequence length.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError('You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time')
elif input_ids is not None:
input = input_ids
input_shape = input.shape
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
input = inputs_embeds[:, :, -1]
else:
raise ValueError('You have to specify either decoder_input_ids or decoder_inputs_embeds')
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input)
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing`. Setting `use_cache=False`...')
use_cache = False
if use_cache and past_key_values is None:
past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config))
if use_cache and isinstance(past_key_values, tuple):
logger.warning_once('Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. You should pass an instance of `EncoderDecoderCache` instead, e.g. `past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`.')
past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values)
batch_size, seq_length = inputs_embeds.size()[:-1]
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
if cache_position is None:
cache_position = torch.arange(past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device)
if attention_mask is None and (not is_torchdynamo_compiling()):
mask_seq_length = past_key_values_length + seq_length
attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device)
self_attn_cache = past_key_values.self_attention_cache if isinstance(past_key_values, EncoderDecoderCache) else past_key_values
causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, self_attn_cache)
encoder_attention_mask = self._update_cross_attn_mask(encoder_hidden_states, encoder_attention_mask, input_shape, inputs_embeds)
position_ids = cache_position.unsqueeze(1)
position_ids = self.embed_positions(inputs_embeds, past_key_values_length, position_ids)
position_ids = position_ids.to(inputs_embeds.device)
hidden_states = inputs_embeds + position_ids
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if output_attentions and encoder_hidden_states is not None else None
for idx, decoder_layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states += (hidden_states,)
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop:
continue
layer_outputs = decoder_layer(hidden_states, causal_mask, encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
all_hidden_states += (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None))
return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions)
| null | 3
| 2
| 53
| 8
| 32
| 14
| 9
| 0.47
| 1
| 10
| 5
| 0
| 4
| 8
| 4
| 5
| 225
| 37
| 128
| 40
| 111
| 60
| 74
| 28
| 69
| 30
| 2
| 3
| 35
|
4,463
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/pegasus_x/modeling_pegasus_x.py
|
transformers.models.pegasus_x.modeling_pegasus_x.PegasusXDecoderLayer
|
from torch import nn
import torch
from ...modeling_layers import GradientCheckpointingLayer
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from .configuration_pegasus_x import PegasusXConfig
from ...activations import ACT2FN
from ...utils.deprecation import deprecate_kwarg
from typing import Callable, Optional, Union
class PegasusXDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: PegasusXConfig, layer_idx: Optional[int]=None):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = PegasusXAttention(embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, bias=False, config=config, layer_idx=layer_idx)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.encoder_attn = PegasusXAttention(self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, bias=False, config=config, layer_idx=layer_idx)
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True, cache_position: Optional[torch.Tensor]=None) -> torch.Tensor:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape *(seq_len, batch, embed_dim)*
attention_mask (`torch.FloatTensor`): attention mask of size
*(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape *(seq_len, batch, embed_dim)*
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
*(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
past_key_values (`Cache`): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache: Whether to us KV cache for decoding
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence. It is used to update the
cache in the correct position and to infer the complete sequence length.
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, self_attn_weights = self.self_attn(hidden_states=hidden_states, past_key_values=past_key_values, attention_mask=attention_mask, output_attentions=output_attentions, cache_position=cache_position)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
hidden_states, cross_attn_weights = self.encoder_attn(hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_attentions=output_attentions)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
return outputs
|
class PegasusXDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: PegasusXConfig, layer_idx: Optional[int]=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True, cache_position: Optional[torch.Tensor]=None) -> torch.Tensor:
'''
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape *(seq_len, batch, embed_dim)*
attention_mask (`torch.FloatTensor`): attention mask of size
*(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape *(seq_len, batch, embed_dim)*
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
*(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
past_key_values (`Cache`): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache: Whether to us KV cache for decoding
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence. It is used to update the
cache in the correct position and to infer the complete sequence length.
'''
pass
| 4
| 1
| 54
| 6
| 38
| 11
| 4
| 0.29
| 1
| 5
| 2
| 0
| 2
| 11
| 2
| 12
| 110
| 12
| 76
| 30
| 64
| 22
| 44
| 21
| 41
| 6
| 1
| 1
| 7
|
4,464
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/pegasus_x/modeling_pegasus_x.py
|
transformers.models.pegasus_x.modeling_pegasus_x.PegasusXDecoderWrapper
|
class PegasusXDecoderWrapper(PegasusXPreTrainedModel):
"""
This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is
used in combination with the [`EncoderDecoderModel`] framework.
"""
def __init__(self, config):
super().__init__(config)
self.decoder = PegasusXDecoder(config)
def forward(self, *args, **kwargs):
return self.decoder(*args, **kwargs)
|
class PegasusXDecoderWrapper(PegasusXPreTrainedModel):
'''
This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is
used in combination with the [`EncoderDecoderModel`] framework.
'''
def __init__(self, config):
pass
def forward(self, *args, **kwargs):
pass
| 3
| 1
| 3
| 0
| 3
| 0
| 1
| 0.67
| 1
| 2
| 1
| 0
| 2
| 1
| 2
| 3
| 12
| 2
| 6
| 4
| 3
| 4
| 6
| 4
| 3
| 1
| 2
| 0
| 2
|
4,465
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/pegasus_x/modeling_pegasus_x.py
|
transformers.models.pegasus_x.modeling_pegasus_x.PegasusXEncoder
|
import torch
from typing import Callable, Optional, Union
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput
from torch import nn
import math
from .configuration_pegasus_x import PegasusXConfig
class PegasusXEncoder(PegasusXPreTrainedModel):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
[`PegasusXEncoderLayer`].
Args:
config: PegasusXConfig
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: PegasusXConfig, embed_tokens: Optional[nn.Embedding]=None):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
embed_dim = config.d_model
padding_idx = config.pad_token_id
self.max_source_positions = config.max_position_embeddings
embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
if embed_tokens is not None:
self.embed_tokens = embed_tokens
else:
self.embed_tokens = PegasusXScaledWordEmbedding(config.vocab_size, embed_dim, padding_idx, embed_scale=embed_scale)
self.embed_global = nn.Embedding(config.num_global_tokens, embed_dim)
self.embed_positions = PegasusXSinusoidalPositionalEmbedding(embed_dim)
self.layers = nn.ModuleList([PegasusXEncoderLayer(stagger_blocks_this_layer=i % 2 == 1 and config.stagger_local_blocks, config=config) for i in range(config.encoder_layers)])
self.layer_norm = nn.LayerNorm(config.d_model)
self.gradient_checkpointing = False
self.post_init()
def resize_position_embeddings(self, new_num_position_embeddings: int):
"""
Resizes position embeddings matrix of the model if `new_num_position_embeddings !=
config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`):
The number of new position embeddings. If position embeddings are learned, increasing the size will add
newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
add correct vectors at the end following the position encoding algorithm, whereas reducing the size
will remove vectors from the end.
"""
logger.info(f'Setting `config.max_position_embeddings={new_num_position_embeddings}`...')
self.config.max_position_embeddings = new_num_position_embeddings
self.embed_positions = PegasusXSinusoidalPositionalEmbedding(self.config.d_model)
self.embed_positions.to(self.device)
def get_position_embeddings(self) -> nn.Embedding:
"""
Returns the position embeddings matrix
"""
return self.embed_positions
def forward(self, input_ids=None, attention_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None):
"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds')
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
embed_pos = self.embed_positions(inputs_embeds)
hidden_states = inputs_embeds + embed_pos
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
batch_size, seq_len, _ = hidden_states.shape
if attention_mask is None:
attention_mask = torch.ones(*input_shape, dtype=inputs_embeds.dtype, device=inputs_embeds.device)
attention_mask = attention_mask.to(dtype=hidden_states.dtype)
mask_min_value = torch.finfo(hidden_states.dtype).min
inverted_mask = 1.0 - attention_mask
attention_mask = inverted_mask.masked_fill(inverted_mask.to(torch.bool), mask_min_value)
if seq_len % self.config.block_size != 0:
pad_len = self.config.block_size - seq_len % self.config.block_size
hidden_states = nn.functional.pad(hidden_states, pad=(0, 0, 0, pad_len), value=0)
attention_mask = nn.functional.pad(attention_mask, pad=(0, pad_len), value=mask_min_value)
global_hidden_states = self.embed_global(torch.arange(self.config.num_global_tokens, device=hidden_states.device)[None].expand(batch_size, -1))
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
to_drop = False
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop:
to_drop = True
if to_drop:
layer_outputs = (None, None)
else:
layer_outputs = encoder_layer(hidden_states, global_hidden_states, attention_mask, output_attentions=output_attentions)
hidden_states = layer_outputs[0]
global_hidden_states = layer_outputs[1]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[2],)
hidden_states = hidden_states[:, :seq_len]
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
encoder_states = encoder_states + ((hidden_states, global_hidden_states),)
if not return_dict:
return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
|
class PegasusXEncoder(PegasusXPreTrainedModel):
'''
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
[`PegasusXEncoderLayer`].
Args:
config: PegasusXConfig
embed_tokens (nn.Embedding): output embedding
'''
def __init__(self, config: PegasusXConfig, embed_tokens: Optional[nn.Embedding]=None):
pass
def resize_position_embeddings(self, new_num_position_embeddings: int):
'''
Resizes position embeddings matrix of the model if `new_num_position_embeddings !=
config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`):
The number of new position embeddings. If position embeddings are learned, increasing the size will add
newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
add correct vectors at the end following the position encoding algorithm, whereas reducing the size
will remove vectors from the end.
'''
pass
def get_position_embeddings(self) -> nn.Embedding:
'''
Returns the position embeddings matrix
'''
pass
def forward(self, input_ids=None, attention_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None):
'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
pass
| 5
| 4
| 49
| 7
| 30
| 12
| 7
| 0.45
| 1
| 12
| 5
| 0
| 4
| 9
| 4
| 5
| 210
| 34
| 122
| 39
| 109
| 55
| 79
| 31
| 74
| 21
| 2
| 3
| 26
|
4,466
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/pegasus_x/modeling_pegasus_x.py
|
transformers.models.pegasus_x.modeling_pegasus_x.PegasusXEncoderLayer
|
from torch import nn
import torch
from ...modeling_layers import GradientCheckpointingLayer
from .configuration_pegasus_x import PegasusXConfig
from ...activations import ACT2FN
class PegasusXEncoderLayer(GradientCheckpointingLayer):
def __init__(self, stagger_blocks_this_layer: bool, config: PegasusXConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = PegasusXGlobalLocalAttention(embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, block_size=config.block_size, dropout=config.attention_dropout)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.global_self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
self.stagger_blocks_this_layer = stagger_blocks_this_layer
self.block_size = config.block_size
def forward(self, hidden_states: torch.Tensor, global_hidden_states: torch.Tensor, attention_mask: torch.Tensor, output_attentions: bool=False) -> torch.Tensor:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape *(seq_len, batch, embed_dim)*
global_hidden_states (`torch.FloatTensor`): global token hidden states
*(seq_len, num_global_tokens, embed_dim)*
attention_mask (`torch.FloatTensor`): attention mask of size
*(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
global_residual = global_hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
global_hidden_states = self.global_self_attn_layer_norm(global_hidden_states)
if self.stagger_blocks_this_layer:
hidden_states, attention_mask = self.pad_local_tokens(hidden_states=hidden_states, attention_mask=attention_mask, block_size=self.block_size)
hidden_states, global_hidden_states, attn_weights = self.self_attn(token_hidden_states=hidden_states, global_hidden_states=global_hidden_states, attention_mask=attention_mask, output_attentions=output_attentions)
if self.stagger_blocks_this_layer:
hidden_states = self.unpad_local_tokens(padded_hidden_states=hidden_states, block_size=self.block_size)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
global_hidden_states = nn.functional.dropout(global_hidden_states, p=self.dropout, training=self.training)
global_hidden_states = global_residual + global_hidden_states
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
global_residual = global_hidden_states
global_hidden_states = self.final_layer_norm(global_hidden_states)
global_hidden_states = self.activation_fn(self.fc1(global_hidden_states))
global_hidden_states = nn.functional.dropout(global_hidden_states, p=self.activation_dropout, training=self.training)
global_hidden_states = self.fc2(global_hidden_states)
global_hidden_states = nn.functional.dropout(global_hidden_states, p=self.dropout, training=self.training)
global_hidden_states = global_residual + global_hidden_states
outputs = (hidden_states, global_hidden_states)
if output_attentions:
outputs += (attn_weights,)
return outputs
@classmethod
def pad_local_tokens(cls, hidden_states, attention_mask, block_size):
pad_size = block_size // 2
mask_min_value = torch.finfo(hidden_states.dtype).min
padded_hidden_states = torch.nn.functional.pad(hidden_states, pad=(0, 0, pad_size, pad_size))
padded_mask = torch.nn.functional.pad(attention_mask, pad=(pad_size, pad_size), value=mask_min_value)
return (padded_hidden_states, padded_mask)
@classmethod
def unpad_local_tokens(cls, padded_hidden_states, block_size):
pad_size = block_size // 2
return padded_hidden_states[:, pad_size:-pad_size, :]
|
class PegasusXEncoderLayer(GradientCheckpointingLayer):
def __init__(self, stagger_blocks_this_layer: bool, config: PegasusXConfig):
pass
def forward(self, hidden_states: torch.Tensor, global_hidden_states: torch.Tensor, attention_mask: torch.Tensor, output_attentions: bool=False) -> torch.Tensor:
'''
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape *(seq_len, batch, embed_dim)*
global_hidden_states (`torch.FloatTensor`): global token hidden states
*(seq_len, num_global_tokens, embed_dim)*
attention_mask (`torch.FloatTensor`): attention mask of size
*(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
'''
pass
@classmethod
def pad_local_tokens(cls, hidden_states, attention_mask, block_size):
pass
@classmethod
def unpad_local_tokens(cls, padded_hidden_states, block_size):
pass
| 7
| 1
| 27
| 3
| 21
| 4
| 2
| 0.18
| 1
| 5
| 2
| 0
| 2
| 12
| 4
| 14
| 113
| 13
| 85
| 34
| 72
| 15
| 56
| 26
| 51
| 4
| 1
| 1
| 7
|
4,467
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/pegasus_x/modeling_pegasus_x.py
|
transformers.models.pegasus_x.modeling_pegasus_x.PegasusXForConditionalGeneration
|
import torch
from torch.nn import CrossEntropyLoss
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput
from .configuration_pegasus_x import PegasusXConfig
from ...generation import GenerationMixin
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from torch import nn
from typing import Callable, Optional, Union
from ...utils import auto_docstring, is_torch_flex_attn_available, is_torchdynamo_compiling, logging
@auto_docstring(custom_intro='\n The PEGASUS-X for conditional generation (e.g. summarization).\n ')
class PegasusXForConditionalGeneration(PegasusXPreTrainedModel, GenerationMixin):
base_model_prefix = 'model'
_tied_weights_keys = ['encoder.embed_tokens.weight', 'decoder.embed_tokens.weight', 'lm_head.weight']
def __init__(self, config: PegasusXConfig):
super().__init__(config)
self.model = PegasusXModel(config)
self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False)
self.post_init()
def get_encoder(self):
return self.model.get_encoder()
def get_decoder(self):
return self.model.get_decoder()
def resize_position_embeddings(self, new_num_position_embeddings: int):
"""
Resizes position embeddings matrix of the model if `new_num_position_embeddings !=
config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`):
The number of new position embeddings. If position embeddings are learned, increasing the size will add
newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
add correct vectors at the end following the position encoding algorithm, whereas reducing the size
will remove vectors from the end.
"""
self.config.max_position_embeddings = new_num_position_embeddings
self.model.encoder.resize_position_embeddings(new_num_position_embeddings)
self.model.decoder.resize_position_embeddings(new_num_position_embeddings)
def get_position_embeddings(self) -> tuple[nn.Embedding]:
"""
Returns the position embeddings matrix
"""
return (self.model.encoder.get_position_embeddings(), self.model.decoder.get_position_embeddings())
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.Tensor]=None, decoder_attention_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[torch.FloatTensor]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.Tensor]=None, decoder_inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple, Seq2SeqLMOutput]:
"""
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
PEGASUS-X uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
if use_cache:
logger.warning('The `use_cache` argument is changed to `False` since `labels` is provided.')
use_cache = False
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
outputs = self.model(input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position)
lm_logits = self.lm_head(outputs[0])
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (lm_logits,) + outputs[1:]
return (masked_lm_loss,) + output if masked_lm_loss is not None else output
return Seq2SeqLMOutput(loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions)
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
|
@auto_docstring(custom_intro='\n The PEGASUS-X for conditional generation (e.g. summarization).\n ')
class PegasusXForConditionalGeneration(PegasusXPreTrainedModel, GenerationMixin):
def __init__(self, config: PegasusXConfig):
pass
def get_encoder(self):
pass
def get_decoder(self):
pass
def resize_position_embeddings(self, new_num_position_embeddings: int):
'''
Resizes position embeddings matrix of the model if `new_num_position_embeddings !=
config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`):
The number of new position embeddings. If position embeddings are learned, increasing the size will add
newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
add correct vectors at the end following the position encoding algorithm, whereas reducing the size
will remove vectors from the end.
'''
pass
def get_position_embeddings(self) -> tuple[nn.Embedding]:
'''
Returns the position embeddings matrix
'''
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.Tensor]=None, decoder_attention_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[torch.FloatTensor]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.Tensor]=None, decoder_inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple, Seq2SeqLMOutput]:
'''
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
PEGASUS-X uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
'''
pass
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
pass
| 10
| 3
| 12
| 1
| 9
| 2
| 2
| 0.24
| 2
| 8
| 3
| 0
| 9
| 2
| 10
| 11
| 136
| 19
| 94
| 39
| 64
| 23
| 47
| 22
| 36
| 8
| 2
| 2
| 18
|
4,468
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/pegasus_x/modeling_pegasus_x.py
|
transformers.models.pegasus_x.modeling_pegasus_x.PegasusXGlobalLocalAttention
|
import torch
from typing import Callable, Optional, Union
from torch import nn
class PegasusXGlobalLocalAttention(nn.Module):
"""Global + Local attention. For use with Encoder only."""
def __init__(self, embed_dim: int, num_heads: int, block_size: int, dropout: float=0.0, is_decoder: bool=False):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.block_size = block_size
self.dropout = dropout
self.head_dim = embed_dim // num_heads
if self.head_dim * num_heads != self.embed_dim:
raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).')
self.scaling = self.head_dim ** (-0.5)
self.is_decoder = is_decoder
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=False)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=False)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=False)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=False)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(self, token_hidden_states: torch.Tensor, global_hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
"""Input shape: Batch x Time x Channel"""
dim = DimensionInfo(batch_size=token_hidden_states.shape[0], seq_len=token_hidden_states.shape[1], block_size=self.block_size, num_heads=self.num_heads, hidden_dim=token_hidden_states.shape[2], dim_per_head=self.head_dim, num_blocks=token_hidden_states.shape[1] // self.block_size, global_len=global_hidden_states.shape[1], padded_seq_len=token_hidden_states.shape[1])
local_q = self._shape(self.q_proj(token_hidden_states) * self.scaling, seq_len=dim.padded_seq_len, bsz=dim.batch_size)
local_k = self._shape(self.k_proj(token_hidden_states), seq_len=dim.padded_seq_len, bsz=dim.batch_size)
local_v = self._shape(self.v_proj(token_hidden_states), seq_len=dim.padded_seq_len, bsz=dim.batch_size)
global_q = self._shape(self.q_proj(global_hidden_states) * self.scaling, seq_len=dim.global_len, bsz=dim.batch_size)
global_k = self._shape(self.k_proj(global_hidden_states), seq_len=dim.global_len, bsz=dim.batch_size)
global_v = self._shape(self.v_proj(global_hidden_states), seq_len=dim.global_len, bsz=dim.batch_size)
global_attn_output, global_attn_probs = self.compute_global_attention_representations(global_q=global_q, global_k=global_k, global_v=global_v, local_k=local_k, local_v=local_v, mask=attention_mask, dim=dim)
local_attn_output, local_attn_probs = self.compute_local_attention_representations(global_k=global_k, global_v=global_v, local_q=local_q, local_k=local_k, local_v=local_v, mask=attention_mask, dim=dim)
global_attn_output = global_attn_output.transpose(1, 2).contiguous().view(dim.batch_size, dim.global_len, dim.hidden_dim)
global_attn_output = self.out_proj(global_attn_output)
local_attn_output = local_attn_output.permute(0, 2, 3, 1, 4).contiguous()
local_attn_output = local_attn_output.view(dim.batch_size, dim.padded_seq_len, dim.hidden_dim)
local_attn_output = self.out_proj(local_attn_output)
if output_attentions:
attn_probs = {'global': global_attn_probs, 'local': local_attn_probs}
else:
attn_probs = None
return (local_attn_output, global_attn_output, attn_probs)
def compute_global_attention_representations(self, global_q, global_k, global_v, local_k, local_v, mask, dim: DimensionInfo):
"""Compute attention representations for global tokens.
Global tokens will attend to both global tokens as well as all input sequence tokens. Because the input
sequence tokens are arranged in blocks for local attention, we unblock them and compute attention.
Args:
global_q (`torch.FloatTensor`) of shape [batch_size, num_heads, global_len, dim_per_head]:
query vectors from global tokens
global_k (`torch.FloatTensor`) of shape [batch_size, num_heads, global_len, dim_per_head]:
key vectors from global tokens
global_v (`torch.FloatTensor`) of shape [batch_size, num_heads, global_len, dim_per_head]:
value vectors from global tokens
local_k (`torch.FloatTensor`) of shape [batch_size, num_heads, padded_seq_len, dim_per_head]:
key vectors from local tokens
local_v (`torch.FloatTensor`) of shape [batch_size, num_heads, padded_seq_len, dim_per_head]:
value vectors from local tokens
mask (`torch.FloatTensor`) of shape [batch_size, padded_seq_len]: attention mask
dim (DimensionInfo): DimensionInfo wrapper for dimensions
Returns:
output of shape `[batch_sizes, length, features]`. where length will be padded to a multiple of block_size
"""
global_and_local_k = torch.cat([global_k, local_k], dim=2)
global_and_local_v = torch.cat([global_v, local_v], dim=2)
extended_mask = nn.functional.pad(mask, pad=(dim.global_len, 0), value=0)
attn_weights = torch.einsum('BHGF,BHXF->BHGX', global_q, global_and_local_k)
attn_weights = attn_weights + extended_mask[:, None, None, :]
attn_probs = nn.functional.softmax(attn_weights, dim=-1)
attn_probs = nn.functional.dropout(attn_probs, p=self.dropout, training=self.training)
attn_output = torch.einsum('BHGX,BHXF->BHGF', attn_probs, global_and_local_v)
return (attn_output, attn_probs)
def compute_local_attention_representations(self, global_k, global_v, local_q, local_k, local_v, mask, dim: DimensionInfo):
"""Compute attention representations for local tokens.
Local tokens will attend to both global tokens as well as all other tokens within the same local block. Hence,
we need to tile and concatenate the global tokens to every local block
Args:
global_k (`torch.FloatTensor`) of shape [batch_size, num_heads, global_len, dim_per_head]:
key vectors from global tokens
global_v (`torch.FloatTensor`) of shape [batch_size, num_heads, global_len, dim_per_head]:
value vectors from global tokens
local_q (`torch.FloatTensor`) of shape [batch_size, num_heads, padded_seq_len, dim_per_head]:
query vectors from local tokens
local_k (`torch.FloatTensor`) of shape [batch_size, num_heads, padded_seq_len, dim_per_head]:
key vectors from local tokens
local_v (`torch.FloatTensor`) of shape [batch_size, num_heads, padded_seq_len, dim_per_head]:
value vectors from local tokens
mask (`torch.FloatTensor`) of shape [batch_size, padded_seq_len]: attention mask
dim (DimensionInfo): DimensionInfo wrapper for dimensions
Returns:
output of shape `[batch_sizes, length, features]`. where length will be padded to a multiple of block_size
"""
blocked_local_q = local_q.view(dim.batch_size, dim.num_heads, dim.num_blocks, dim.block_size, dim.dim_per_head)
blocked_local_k = local_k.view(dim.batch_size, dim.num_heads, dim.num_blocks, dim.block_size, dim.dim_per_head)
blocked_local_v = local_v.view(dim.batch_size, dim.num_heads, dim.num_blocks, dim.block_size, dim.dim_per_head)
extended_mask = nn.functional.pad(mask.view(dim.batch_size, dim.num_blocks, dim.block_size), pad=(dim.global_len, 0), value=0)
blocked_local2global = torch.einsum('BHNKF,BHGF->BHNKG', blocked_local_q, global_k)
blocked_local2local = torch.einsum('BHNKF,BHNXF->BHNKX', blocked_local_q, blocked_local_k)
attn_weights = torch.cat([blocked_local2global, blocked_local2local], dim=-1)
attn_weights = attn_weights + extended_mask[:, None, :, None, :]
attn_probs = nn.functional.softmax(attn_weights, dim=-1)
attn_probs = nn.functional.dropout(attn_probs, p=self.dropout, training=self.training)
local2global_attn_probs = attn_probs[:, :, :, :, :dim.global_len]
local2local_attn_probs = attn_probs[:, :, :, :, dim.global_len:]
local2global_attn_output = torch.einsum('BHNKG,BHGF->BHNKF', local2global_attn_probs, global_v)
local2local_attn_output = torch.einsum('BHNKX,BHNXF->BHNKF', local2local_attn_probs, blocked_local_v)
attn_output = local2global_attn_output + local2local_attn_output
return (attn_output, attn_probs)
|
class PegasusXGlobalLocalAttention(nn.Module):
'''Global + Local attention. For use with Encoder only.'''
def __init__(self, embed_dim: int, num_heads: int, block_size: int, dropout: float=0.0, is_decoder: bool=False):
pass
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
pass
def forward(self, token_hidden_states: torch.Tensor, global_hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
'''Input shape: Batch x Time x Channel'''
pass
def compute_global_attention_representations(self, global_q, global_k, global_v, local_k, local_v, mask, dim: DimensionInfo):
'''Compute attention representations for global tokens.
Global tokens will attend to both global tokens as well as all input sequence tokens. Because the input
sequence tokens are arranged in blocks for local attention, we unblock them and compute attention.
Args:
global_q (`torch.FloatTensor`) of shape [batch_size, num_heads, global_len, dim_per_head]:
query vectors from global tokens
global_k (`torch.FloatTensor`) of shape [batch_size, num_heads, global_len, dim_per_head]:
key vectors from global tokens
global_v (`torch.FloatTensor`) of shape [batch_size, num_heads, global_len, dim_per_head]:
value vectors from global tokens
local_k (`torch.FloatTensor`) of shape [batch_size, num_heads, padded_seq_len, dim_per_head]:
key vectors from local tokens
local_v (`torch.FloatTensor`) of shape [batch_size, num_heads, padded_seq_len, dim_per_head]:
value vectors from local tokens
mask (`torch.FloatTensor`) of shape [batch_size, padded_seq_len]: attention mask
dim (DimensionInfo): DimensionInfo wrapper for dimensions
Returns:
output of shape `[batch_sizes, length, features]`. where length will be padded to a multiple of block_size
'''
pass
def compute_local_attention_representations(self, global_k, global_v, local_q, local_k, local_v, mask, dim: DimensionInfo):
'''Compute attention representations for local tokens.
Local tokens will attend to both global tokens as well as all other tokens within the same local block. Hence,
we need to tile and concatenate the global tokens to every local block
Args:
global_k (`torch.FloatTensor`) of shape [batch_size, num_heads, global_len, dim_per_head]:
key vectors from global tokens
global_v (`torch.FloatTensor`) of shape [batch_size, num_heads, global_len, dim_per_head]:
value vectors from global tokens
local_q (`torch.FloatTensor`) of shape [batch_size, num_heads, padded_seq_len, dim_per_head]:
query vectors from local tokens
local_k (`torch.FloatTensor`) of shape [batch_size, num_heads, padded_seq_len, dim_per_head]:
key vectors from local tokens
local_v (`torch.FloatTensor`) of shape [batch_size, num_heads, padded_seq_len, dim_per_head]:
value vectors from local tokens
mask (`torch.FloatTensor`) of shape [batch_size, padded_seq_len]: attention mask
dim (DimensionInfo): DimensionInfo wrapper for dimensions
Returns:
output of shape `[batch_sizes, length, features]`. where length will be padded to a multiple of block_size
'''
pass
| 6
| 4
| 45
| 4
| 28
| 13
| 1
| 0.45
| 1
| 7
| 1
| 0
| 5
| 11
| 5
| 15
| 232
| 27
| 141
| 63
| 118
| 64
| 64
| 46
| 58
| 2
| 1
| 1
| 7
|
4,469
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/pegasus_x/modeling_pegasus_x.py
|
transformers.models.pegasus_x.modeling_pegasus_x.PegasusXModel
|
from typing import Callable, Optional, Union
from ...utils import auto_docstring, is_torch_flex_attn_available, is_torchdynamo_compiling, logging
from torch import nn
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from .configuration_pegasus_x import PegasusXConfig
import math
import torch
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput
@auto_docstring
class PegasusXModel(PegasusXPreTrainedModel):
_tied_weights_keys = ['encoder.embed_tokens.weight', 'decoder.embed_tokens.weight']
def __init__(self, config: PegasusXConfig):
super().__init__(config)
vocab_size = config.vocab_size
embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
padding_idx = config.pad_token_id
self.shared = PegasusXScaledWordEmbedding(vocab_size, config.d_model, padding_idx=padding_idx, embed_scale=embed_scale)
self.encoder = PegasusXEncoder(config, self.shared)
self.decoder = PegasusXDecoder(config, self.shared)
self.post_init()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, value):
self.shared = value
self.encoder.embed_tokens = self.shared
self.decoder.embed_tokens = self.shared
def get_encoder(self):
return self.encoder
def resize_position_embeddings(self, new_num_position_embeddings: int):
"""
Resizes position embeddings matrix of the model if `new_num_position_embeddings !=
config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`):
The number of new position embeddings. If position embeddings are learned, increasing the size will add
newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
add correct vectors at the end following the position encoding algorithm, whereas reducing the size
will remove vectors from the end.
"""
self.config.max_position_embeddings = new_num_position_embeddings
self.encoder.resize_position_embeddings(new_num_position_embeddings)
self.decoder.resize_position_embeddings(new_num_position_embeddings)
def get_position_embeddings(self) -> tuple[nn.Embedding]:
"""
Returns the position embeddings matrix
"""
return (self.encoder.get_position_embeddings(), self.decoder.get_position_embeddings())
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.Tensor]=None, decoder_attention_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[torch.FloatTensor]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.Tensor]=None, decoder_inputs_embeds: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple, Seq2SeqModelOutput]:
"""
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
PEGASUS-X uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
Example:
```python
>>> from transformers import AutoTokenizer, PegasusModel
>>> tokenizer = AutoTokenizer.from_pretrained("google/pegasus-x-large")
>>> model = PegasusModel.from_pretrained("google/pegasus-x-large")
>>> inputs = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt")
>>> decoder_inputs = tokenizer("Studies show that", return_tensors="pt")
>>> outputs = model(input_ids=inputs.input_ids, decoder_input_ids=decoder_inputs.input_ids)
>>> last_hidden_states = outputs.last_hidden_state
>>> list(last_hidden_states.shape)
[1, 4, 1024]
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)):
encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None)
decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions)
|
@auto_docstring
class PegasusXModel(PegasusXPreTrainedModel):
def __init__(self, config: PegasusXConfig):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, value):
pass
def get_encoder(self):
pass
def resize_position_embeddings(self, new_num_position_embeddings: int):
'''
Resizes position embeddings matrix of the model if `new_num_position_embeddings !=
config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`):
The number of new position embeddings. If position embeddings are learned, increasing the size will add
newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
add correct vectors at the end following the position encoding algorithm, whereas reducing the size
will remove vectors from the end.
'''
pass
def get_position_embeddings(self) -> tuple[nn.Embedding]:
'''
Returns the position embeddings matrix
'''
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.Tensor]=None, decoder_attention_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[torch.FloatTensor]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.Tensor]=None, decoder_inputs_embeds: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple, Seq2SeqModelOutput]:
'''
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
PEGASUS-X uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
Example:
```python
>>> from transformers import AutoTokenizer, PegasusModel
>>> tokenizer = AutoTokenizer.from_pretrained("google/pegasus-x-large")
>>> model = PegasusModel.from_pretrained("google/pegasus-x-large")
>>> inputs = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt")
>>> decoder_inputs = tokenizer("Studies show that", return_tensors="pt")
>>> outputs = model(input_ids=inputs.input_ids, decoder_input_ids=decoder_inputs.input_ids)
>>> last_hidden_states = outputs.last_hidden_state
>>> list(last_hidden_states.shape)
[1, 4, 1024]
```'''
pass
| 10
| 3
| 17
| 2
| 11
| 4
| 2
| 0.34
| 1
| 10
| 6
| 0
| 8
| 3
| 8
| 9
| 144
| 22
| 91
| 32
| 66
| 31
| 39
| 17
| 30
| 10
| 2
| 1
| 18
|
4,470
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/pegasus_x/modeling_pegasus_x.py
|
transformers.models.pegasus_x.modeling_pegasus_x.PegasusXPreTrainedModel
|
import torch
from torch import nn
from ...modeling_attn_mask_utils import AttentionMaskConverter, _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Callable, Optional, Union
from ...utils import auto_docstring, is_torch_flex_attn_available, is_torchdynamo_compiling, logging
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from .configuration_pegasus_x import PegasusXConfig
@auto_docstring
class PegasusXPreTrainedModel(PreTrainedModel):
config: PegasusXConfig
base_model_prefix = 'model'
supports_gradient_checkpointing = True
_no_split_modules = ['PegasusXEncoderLayer', 'PegasusXDecoderLayer']
_supports_flash_attn = True
_supports_sdpa = False
_supports_flex_attn = True
_can_compile_fullgraph = True
def _init_weights(self, module):
std = self.config.init_std
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
elif isinstance(module, nn.LayerNorm):
module.weight.data.fill_(1.0)
module.bias.data.zero_()
def _update_full_mask(self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor):
if attention_mask is not None:
if 'flash' in self.config._attn_implementation:
attention_mask = attention_mask if 0 in attention_mask else None
elif self.config._attn_implementation == 'sdpa':
attention_mask = _prepare_4d_attention_mask_for_sdpa(attention_mask, inputs_embeds.dtype)
elif self.config._attn_implementation == 'flex_attention':
if isinstance(attention_mask, torch.Tensor):
attention_mask = make_flex_block_causal_mask(attention_mask, is_causal=False)
else:
attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
return attention_mask
def _update_causal_mask(self, attention_mask: Optional[Union[torch.Tensor, 'BlockMask']], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache):
if self.config._attn_implementation == 'flex_attention':
if isinstance(attention_mask, torch.Tensor):
attention_mask = make_flex_block_causal_mask(attention_mask)
elif attention_mask is None:
attention_mask = make_flex_block_causal_mask(torch.ones(size=(input_tensor.shape[0], input_tensor.shape[1]), device=attention_mask.device))
return attention_mask
if 'flash' in self.config._attn_implementation:
if attention_mask is not None and (attention_mask == 0.0).any():
return attention_mask
return None
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
using_compilable_cache = past_key_values.is_compileable if past_key_values is not None else False
if self.config._attn_implementation == 'sdpa' and (not using_compilable_cache):
if AttentionMaskConverter._ignore_causal_mask_sdpa(attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training):
return None
dtype = input_tensor.dtype
sequence_length = input_tensor.shape[1]
if using_compilable_cache:
target_length = past_key_values.get_max_cache_shape()
else:
target_length = attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1
causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, cache_position=cache_position, batch_size=input_tensor.shape[0])
if self.config._attn_implementation == 'sdpa' and attention_mask is not None and (attention_mask.device.type in ['cuda', 'xpu', 'npu']):
min_dtype = torch.finfo(dtype).min
causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
return causal_mask
@staticmethod
def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs):
"""
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
Args:
attention_mask (`torch.Tensor`):
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
`(batch_size, 1, query_length, key_value_length)`.
sequence_length (`int`):
The sequence length being processed.
target_length (`int`):
The target length: when generating with static cache, the mask should be as long as the static cache,
to account for the 0 padding, the part of the cache that is not filled yet.
dtype (`torch.dtype`):
The dtype to use for the 4D attention mask.
cache_position (`torch.Tensor`):
Indices depicting the position of the input sequence tokens in the sequence.
batch_size (`torch.Tensor`):
Batch size.
"""
if attention_mask is not None and attention_mask.dim() == 4:
causal_mask = attention_mask
else:
min_dtype = torch.finfo(dtype).min
causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device)
if sequence_length != 1:
causal_mask = torch.triu(causal_mask, diagonal=1)
causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1)
causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
if attention_mask is not None:
causal_mask = causal_mask.clone()
mask_length = attention_mask.shape[-1]
padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device)
padding_mask = padding_mask == 0
causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype)
return causal_mask
def _update_cross_attn_mask(self, encoder_hidden_states: Union[torch.Tensor, None], encoder_attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor):
if encoder_hidden_states is not None and encoder_attention_mask is not None:
if 'flash' in self.config._attn_implementation:
encoder_attention_mask = encoder_attention_mask if 0 in encoder_attention_mask else None
elif self.config._attn_implementation == 'sdpa':
encoder_attention_mask = _prepare_4d_attention_mask_for_sdpa(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
elif self.config._attn_implementation == 'flex_attention':
if isinstance(encoder_attention_mask, torch.Tensor):
encoder_attention_mask = make_flex_block_causal_mask(encoder_attention_mask, query_length=input_shape[-1], is_causal=False)
else:
encoder_attention_mask = _prepare_4d_attention_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
return encoder_attention_mask
|
@auto_docstring
class PegasusXPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
pass
def _update_full_mask(self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor):
pass
def _update_causal_mask(self, attention_mask: Optional[Union[torch.Tensor, 'BlockMask']], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache):
pass
@staticmethod
def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs):
'''
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
Args:
attention_mask (`torch.Tensor`):
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
`(batch_size, 1, query_length, key_value_length)`.
sequence_length (`int`):
The sequence length being processed.
target_length (`int`):
The target length: when generating with static cache, the mask should be as long as the static cache,
to account for the 0 padding, the part of the cache that is not filled yet.
dtype (`torch.dtype`):
The dtype to use for the 4D attention mask.
cache_position (`torch.Tensor`):
Indices depicting the position of the input sequence tokens in the sequence.
batch_size (`torch.Tensor`):
Batch size.
'''
pass
def _update_cross_attn_mask(self, encoder_hidden_states: Union[torch.Tensor, None], encoder_attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor):
pass
| 8
| 1
| 8
| 0
| 8
| 0
| 4
| 0
| 1
| 0
| 0
| 5
| 1
| 0
| 1
| 1
| 14
| 1
| 13
| 7
| 11
| 0
| 12
| 7
| 10
| 4
| 1
| 2
| 4
|
4,471
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/pegasus_x/modeling_pegasus_x.py
|
transformers.models.pegasus_x.modeling_pegasus_x.PegasusXScaledWordEmbedding
|
import torch
from torch import nn
from typing import Callable, Optional, Union
class PegasusXScaledWordEmbedding(nn.Embedding):
"""
This module overrides nn.Embeddings' forward by multiplying with embeddings scale.
"""
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int, embed_scale: Optional[float]=1.0):
super().__init__(num_embeddings, embedding_dim, padding_idx)
self.embed_scale = embed_scale
def forward(self, input_ids: torch.Tensor):
return super().forward(input_ids) * self.embed_scale
|
class PegasusXScaledWordEmbedding(nn.Embedding):
'''
This module overrides nn.Embeddings' forward by multiplying with embeddings scale.
'''
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int, embed_scale: Optional[float]=1.0):
pass
def forward(self, input_ids: torch.Tensor):
pass
| 3
| 1
| 3
| 0
| 3
| 0
| 1
| 0.5
| 1
| 4
| 0
| 0
| 2
| 1
| 2
| 2
| 11
| 2
| 6
| 4
| 3
| 3
| 6
| 4
| 3
| 1
| 1
| 0
| 2
|
4,472
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/pegasus_x/modeling_pegasus_x.py
|
transformers.models.pegasus_x.modeling_pegasus_x.PegasusXSinusoidalPositionalEmbedding
|
import numpy as np
from torch import nn
from typing import Callable, Optional, Union
import torch
class PegasusXSinusoidalPositionalEmbedding(nn.Module):
"""This module produces sinusoidal positional embeddings of any length."""
def __init__(self, embed_dim, max_scale: int=10000.0):
super().__init__()
self.embed_dim = embed_dim
self.max_scale = max_scale
@torch.no_grad()
def forward(self, input_embeds: torch.Tensor, past_key_values_length: int=0, position_ids: Optional[torch.Tensor]=None) -> torch.Tensor:
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
batch_size, seq_len = input_embeds.shape[:2]
if position_ids is None:
position_ids = torch.arange(past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=input_embeds.device)[:, None]
pe = torch.zeros((seq_len, self.embed_dim), device=input_embeds.device, dtype=input_embeds.dtype)
half_d_feature = self.embed_dim // 2
div_term = torch.exp(torch.arange(half_d_feature, device=input_embeds.device, dtype=torch.int64).type_as(input_embeds) * -(np.log(float(self.max_scale)) / (half_d_feature - 1)))
pe[:, :half_d_feature] = torch.sin(position_ids * div_term)
pe[:, half_d_feature:] = torch.cos(position_ids * div_term)
return pe[None].expand(batch_size, -1, -1)
|
class PegasusXSinusoidalPositionalEmbedding(nn.Module):
'''This module produces sinusoidal positional embeddings of any length.'''
def __init__(self, embed_dim, max_scale: int=10000.0):
pass
@torch.no_grad()
def forward(self, input_embeds: torch.Tensor, past_key_values_length: int=0, position_ids: Optional[torch.Tensor]=None) -> torch.Tensor:
'''`input_ids_shape` is expected to be [bsz x seqlen].'''
pass
| 4
| 2
| 10
| 0
| 9
| 1
| 1
| 0.1
| 1
| 4
| 0
| 0
| 2
| 2
| 2
| 12
| 24
| 2
| 20
| 11
| 16
| 2
| 14
| 10
| 11
| 1
| 1
| 0
| 2
|
4,473
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/perceiver/configuration_perceiver.py
|
transformers.models.perceiver.configuration_perceiver.PerceiverConfig
|
from ...configuration_utils import PretrainedConfig
class PerceiverConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`PerceiverModel`]. It is used to instantiate an
Perceiver model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the Perceiver
[deepmind/language-perceiver](https://huggingface.co/deepmind/language-perceiver) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
num_latents (`int`, *optional*, defaults to 256):
The number of latents.
d_latents (`int`, *optional*, defaults to 1280):
Dimension of the latent embeddings.
d_model (`int`, *optional*, defaults to 768):
Dimension of the inputs. Should only be provided in case [*PerceiverTextPreprocessor*] is used or no
preprocessor is provided.
num_blocks (`int`, *optional*, defaults to 1):
Number of blocks in the Transformer encoder.
num_self_attends_per_block (`int`, *optional*, defaults to 26):
The number of self-attention layers per block.
num_self_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each self-attention layer in the Transformer encoder.
num_cross_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each cross-attention layer in the Transformer encoder.
qk_channels (`int`, *optional*):
Dimension to project the queries + keys before applying attention in the cross-attention and self-attention
layers of the encoder. Will default to preserving the dimension of the queries if not specified.
v_channels (`int`, *optional*):
Dimension to project the values before applying attention in the cross-attention and self-attention layers
of the encoder. Will default to preserving the dimension of the queries if not specified.
cross_attention_shape_for_attention (`str`, *optional*, defaults to `"kv"`):
Dimension to use when downsampling the queries and keys in the cross-attention layer of the encoder.
self_attention_widening_factor (`int`, *optional*, defaults to 1):
Dimension of the feed-forward layer in the cross-attention layer of the Transformer encoder.
cross_attention_widening_factor (`int`, *optional*, defaults to 1):
Dimension of the feed-forward layer in the self-attention layers of the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
use_query_residual (`float`, *optional*, defaults to `True`):
Whether to add a query residual in the cross-attention layer of the encoder.
vocab_size (`int`, *optional*, defaults to 262):
Vocabulary size for the masked language modeling model.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that the masked language modeling model might ever be used with. Typically set
this to something large just in case (e.g., 512 or 1024 or 2048).
image_size (`int`, *optional*, defaults to 56):
Size of the images after preprocessing, for [`PerceiverForImageClassificationLearned`].
train_size (`list[int]`, *optional*, defaults to `[368, 496]`):
Training size of the images for the optical flow model.
num_frames (`int`, *optional*, defaults to 16):
Number of video frames used for the multimodal autoencoding model.
audio_samples_per_frame (`int`, *optional*, defaults to 1920):
Number of audio samples per frame for the multimodal autoencoding model.
samples_per_patch (`int`, *optional*, defaults to 16):
Number of audio samples per patch when preprocessing the audio for the multimodal autoencoding model.
output_shape (`list[int]`, *optional*, defaults to `[1, 16, 224, 224]`):
Shape of the output (batch_size, num_frames, height, width) for the video decoder queries of the multimodal
autoencoding model. This excludes the channel dimension.
output_num_channels (`int`, *optional*, defaults to 512):
Number of output channels for each modalitiy decoder.
Example:
```python
>>> from transformers import PerceiverModel, PerceiverConfig
>>> # Initializing a Perceiver deepmind/language-perceiver style configuration
>>> configuration = PerceiverConfig()
>>> # Initializing a model from the deepmind/language-perceiver style configuration
>>> model = PerceiverModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'perceiver'
def __init__(self, num_latents=256, d_latents=1280, d_model=768, num_blocks=1, num_self_attends_per_block=26, num_self_attention_heads=8, num_cross_attention_heads=8, qk_channels=None, v_channels=None, cross_attention_shape_for_attention='kv', self_attention_widening_factor=1, cross_attention_widening_factor=1, hidden_act='gelu', attention_probs_dropout_prob=0.1, initializer_range=0.02, layer_norm_eps=1e-12, use_query_residual=True, vocab_size=262, max_position_embeddings=2048, image_size=56, train_size=[368, 496], num_frames=16, audio_samples_per_frame=1920, samples_per_patch=16, output_shape=[1, 16, 224, 224], output_num_channels=512, _label_trainable_num_channels=1024, **kwargs):
super().__init__(**kwargs)
self.num_latents = num_latents
self.d_latents = d_latents
self.d_model = d_model
self.num_blocks = num_blocks
self.num_self_attends_per_block = num_self_attends_per_block
self.num_self_attention_heads = num_self_attention_heads
self.num_cross_attention_heads = num_cross_attention_heads
self.qk_channels = qk_channels
self.v_channels = v_channels
self.cross_attention_shape_for_attention = cross_attention_shape_for_attention
self.self_attention_widening_factor = self_attention_widening_factor
self.cross_attention_widening_factor = cross_attention_widening_factor
self.hidden_act = hidden_act
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.use_query_residual = use_query_residual
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.image_size = image_size
self.train_size = train_size
self.num_frames = num_frames
self.audio_samples_per_frame = audio_samples_per_frame
self.samples_per_patch = samples_per_patch
self.output_shape = output_shape
self.output_num_channels = output_num_channels
self._label_trainable_num_channels = _label_trainable_num_channels
|
class PerceiverConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`PerceiverModel`]. It is used to instantiate an
Perceiver model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the Perceiver
[deepmind/language-perceiver](https://huggingface.co/deepmind/language-perceiver) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
num_latents (`int`, *optional*, defaults to 256):
The number of latents.
d_latents (`int`, *optional*, defaults to 1280):
Dimension of the latent embeddings.
d_model (`int`, *optional*, defaults to 768):
Dimension of the inputs. Should only be provided in case [*PerceiverTextPreprocessor*] is used or no
preprocessor is provided.
num_blocks (`int`, *optional*, defaults to 1):
Number of blocks in the Transformer encoder.
num_self_attends_per_block (`int`, *optional*, defaults to 26):
The number of self-attention layers per block.
num_self_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each self-attention layer in the Transformer encoder.
num_cross_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each cross-attention layer in the Transformer encoder.
qk_channels (`int`, *optional*):
Dimension to project the queries + keys before applying attention in the cross-attention and self-attention
layers of the encoder. Will default to preserving the dimension of the queries if not specified.
v_channels (`int`, *optional*):
Dimension to project the values before applying attention in the cross-attention and self-attention layers
of the encoder. Will default to preserving the dimension of the queries if not specified.
cross_attention_shape_for_attention (`str`, *optional*, defaults to `"kv"`):
Dimension to use when downsampling the queries and keys in the cross-attention layer of the encoder.
self_attention_widening_factor (`int`, *optional*, defaults to 1):
Dimension of the feed-forward layer in the cross-attention layer of the Transformer encoder.
cross_attention_widening_factor (`int`, *optional*, defaults to 1):
Dimension of the feed-forward layer in the self-attention layers of the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
use_query_residual (`float`, *optional*, defaults to `True`):
Whether to add a query residual in the cross-attention layer of the encoder.
vocab_size (`int`, *optional*, defaults to 262):
Vocabulary size for the masked language modeling model.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that the masked language modeling model might ever be used with. Typically set
this to something large just in case (e.g., 512 or 1024 or 2048).
image_size (`int`, *optional*, defaults to 56):
Size of the images after preprocessing, for [`PerceiverForImageClassificationLearned`].
train_size (`list[int]`, *optional*, defaults to `[368, 496]`):
Training size of the images for the optical flow model.
num_frames (`int`, *optional*, defaults to 16):
Number of video frames used for the multimodal autoencoding model.
audio_samples_per_frame (`int`, *optional*, defaults to 1920):
Number of audio samples per frame for the multimodal autoencoding model.
samples_per_patch (`int`, *optional*, defaults to 16):
Number of audio samples per patch when preprocessing the audio for the multimodal autoencoding model.
output_shape (`list[int]`, *optional*, defaults to `[1, 16, 224, 224]`):
Shape of the output (batch_size, num_frames, height, width) for the video decoder queries of the multimodal
autoencoding model. This excludes the channel dimension.
output_num_channels (`int`, *optional*, defaults to 512):
Number of output channels for each modalitiy decoder.
Example:
```python
>>> from transformers import PerceiverModel, PerceiverConfig
>>> # Initializing a Perceiver deepmind/language-perceiver style configuration
>>> configuration = PerceiverConfig()
>>> # Initializing a model from the deepmind/language-perceiver style configuration
>>> model = PerceiverModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, num_latents=256, d_latents=1280, d_model=768, num_blocks=1, num_self_attends_per_block=26, num_self_attention_heads=8, num_cross_attention_heads=8, qk_channels=None, v_channels=None, cross_attention_shape_for_attention='kv', self_attention_widening_factor=1, cross_attention_widening_factor=1, hidden_act='gelu', attention_probs_dropout_prob=0.1, initializer_range=0.02, layer_norm_eps=1e-12, use_query_residual=True, vocab_size=262, max_position_embeddings=2048, image_size=56, train_size=[368, 496], num_frames=16, audio_samples_per_frame=1920, samples_per_patch=16, output_shape=[1, 16, 224, 224], output_num_channels=512, _label_trainable_num_channels=1024, **kwargs):
pass
| 2
| 1
| 64
| 1
| 59
| 4
| 1
| 1.31
| 1
| 1
| 0
| 0
| 1
| 27
| 1
| 1
| 151
| 10
| 61
| 60
| 29
| 80
| 31
| 30
| 29
| 1
| 1
| 0
| 1
|
4,474
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/perceiver/configuration_perceiver.py
|
transformers.models.perceiver.configuration_perceiver.PerceiverOnnxConfig
|
from ...feature_extraction_utils import FeatureExtractionMixin
from collections import OrderedDict
from ...onnx.utils import compute_effective_axis_dimension
from ...onnx import OnnxConfig
from collections.abc import Mapping
from typing import Any, Union
from ...tokenization_utils_base import PreTrainedTokenizerBase
class PerceiverOnnxConfig(OnnxConfig):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
if self.task == 'multiple-choice':
dynamic_axis = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
dynamic_axis = {0: 'batch', 1: 'sequence'}
return OrderedDict([('inputs', dynamic_axis), ('attention_mask', dynamic_axis)])
@property
def atol_for_validation(self) -> float:
return 0.0001
def generate_dummy_inputs(self, preprocessor: Union['PreTrainedTokenizerBase', 'FeatureExtractionMixin'], batch_size: int=-1, seq_length: int=-1, num_choices: int=-1, is_pair: bool=False, num_channels: int=3, image_width: int=40, image_height: int=40) -> Mapping[str, Any]:
if isinstance(preprocessor, PreTrainedTokenizerBase):
batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0)
token_to_add = preprocessor.num_special_tokens_to_add(is_pair)
seq_length = compute_effective_axis_dimension(seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add)
dummy_input = [' '.join(['a']) * seq_length] * batch_size
inputs = dict(preprocessor(dummy_input, return_tensors='pt'))
inputs['inputs'] = inputs.pop('input_ids')
return inputs
elif isinstance(preprocessor, FeatureExtractionMixin) and preprocessor.model_input_names[0] == 'pixel_values':
batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
dummy_input = self._generate_dummy_images(batch_size, num_channels, image_height, image_width)
inputs = dict(preprocessor(images=dummy_input, return_tensors='pt'))
inputs['inputs'] = inputs.pop('pixel_values')
return inputs
else:
raise ValueError('Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.')
|
class PerceiverOnnxConfig(OnnxConfig):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
pass
@property
def atol_for_validation(self) -> float:
pass
def generate_dummy_inputs(self, preprocessor: Union['PreTrainedTokenizerBase', 'FeatureExtractionMixin'], batch_size: int=-1, seq_length: int=-1, num_choices: int=-1, is_pair: bool=False, num_channels: int=3, image_width: int=40, image_height: int=40) -> Mapping[str, Any]:
pass
| 6
| 0
| 18
| 0
| 16
| 2
| 2
| 0.1
| 1
| 10
| 2
| 0
| 3
| 0
| 3
| 3
| 58
| 3
| 50
| 21
| 33
| 5
| 23
| 8
| 19
| 3
| 1
| 1
| 6
|
4,475
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/perceiver/feature_extraction_perceiver.py
|
transformers.models.perceiver.feature_extraction_perceiver.PerceiverFeatureExtractor
|
import warnings
from .image_processing_perceiver import PerceiverImageProcessor
from ...utils.import_utils import requires
@requires(backends=('vision',))
class PerceiverFeatureExtractor(PerceiverImageProcessor):
def __init__(self, *args, **kwargs) -> None:
warnings.warn('The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please use PerceiverImageProcessor instead.', FutureWarning)
super().__init__(*args, **kwargs)
|
@requires(backends=('vision',))
class PerceiverFeatureExtractor(PerceiverImageProcessor):
def __init__(self, *args, **kwargs) -> None:
pass
| 3
| 0
| 7
| 0
| 7
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 1
| 0
| 1
| 25
| 8
| 0
| 8
| 2
| 6
| 0
| 4
| 2
| 2
| 1
| 4
| 0
| 1
|
4,476
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/perceiver/image_processing_perceiver.py
|
transformers.models.perceiver.image_processing_perceiver.PerceiverImageProcessor
|
import numpy as np
from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, resize, to_channel_dimension_format
from typing import Optional, Union
from ...image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, make_flat_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments
from ...utils.import_utils import requires
@requires(backends=('vision',))
class PerceiverImageProcessor(BaseImageProcessor):
"""
Constructs a Perceiver image processor.
Args:
do_center_crop (`bool`, `optional`, defaults to `True`):
Whether or not to center crop the image. If the input size if smaller than `crop_size` along any edge, the
image will be padded with zeros and then center cropped. Can be overridden by the `do_center_crop`
parameter in the `preprocess` method.
crop_size (`dict[str, int]`, *optional*, defaults to `{"height": 256, "width": 256}`):
Desired output size when applying center-cropping. Can be overridden by the `crop_size` parameter in the
`preprocess` method.
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image to `(size["height"], size["width"])`. Can be overridden by the `do_resize`
parameter in the `preprocess` method.
size (`dict[str, int]` *optional*, defaults to `{"height": 224, "width": 224}`):
Size of the image after resizing. Can be overridden by the `size` parameter in the `preprocess` method.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Defines the resampling filter to use if resizing the image. Can be overridden by the `resample` parameter
in the `preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Defines the scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter
in the `preprocess` method.
do_normalize:
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
"""
model_input_names = ['pixel_values']
def __init__(self, do_center_crop: bool=True, crop_size: Optional[dict[str, int]]=None, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, **kwargs) -> None:
super().__init__(**kwargs)
crop_size = crop_size if crop_size is not None else {'height': 256, 'width': 256}
crop_size = get_size_dict(crop_size, param_name='crop_size')
size = size if size is not None else {'height': 224, 'width': 224}
size = get_size_dict(size)
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def center_crop(self, image: np.ndarray, crop_size: dict[str, int], size: Optional[int]=None, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
"""
Center crop an image to `(size["height"] / crop_size["height"] * min_dim, size["width"] / crop_size["width"] *
min_dim)`. Where `min_dim = min(size["height"], size["width"])`.
If the input size is smaller than `crop_size` along any edge, the image will be padded with zeros and then
center cropped.
Args:
image (`np.ndarray`):
Image to center crop.
crop_size (`dict[str, int]`):
Desired output size after applying the center crop.
size (`dict[str, int]`, *optional*):
Size of the image after resizing. If not provided, the self.size attribute will be used.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
size = self.size if size is None else size
size = get_size_dict(size)
crop_size = get_size_dict(crop_size, param_name='crop_size')
height, width = get_image_size(image, channel_dim=input_data_format)
min_dim = min(height, width)
cropped_height = size['height'] / crop_size['height'] * min_dim
cropped_width = size['width'] / crop_size['width'] * min_dim
return center_crop(image, size=(cropped_height, cropped_width), data_format=data_format, input_data_format=input_data_format, **kwargs)
def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
"""
Resize an image to `(size["height"], size["width"])`.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
`PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
Returns:
`np.ndarray`: The resized image.
"""
size = get_size_dict(size)
if 'height' not in size or 'width' not in size:
raise ValueError(f'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}')
output_size = (size['height'], size['width'])
return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)
@filter_out_non_signature_kwargs()
def preprocess(self, images: ImageInput, do_center_crop: Optional[bool]=None, crop_size: Optional[dict[str, int]]=None, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
Whether to center crop the image to `crop_size`.
crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`):
Desired output size after applying the center crop.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
crop_size = crop_size if crop_size is not None else self.crop_size
crop_size = get_size_dict(crop_size, param_name='crop_size')
do_resize = do_resize if do_resize is not None else self.do_resize
size = size if size is not None else self.size
size = get_size_dict(size)
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor')
validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample)
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.')
if input_data_format is None:
input_data_format = infer_channel_dimension_format(images[0])
if do_center_crop:
images = [self.center_crop(image, crop_size, size=size, input_data_format=input_data_format) for image in images]
if do_resize:
images = [self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) for image in images]
if do_rescale:
images = [self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images]
if do_normalize:
images = [self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images]
images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images]
data = {'pixel_values': images}
return BatchFeature(data=data, tensor_type=return_tensors)
|
@requires(backends=('vision',))
class PerceiverImageProcessor(BaseImageProcessor):
'''
Constructs a Perceiver image processor.
Args:
do_center_crop (`bool`, `optional`, defaults to `True`):
Whether or not to center crop the image. If the input size if smaller than `crop_size` along any edge, the
image will be padded with zeros and then center cropped. Can be overridden by the `do_center_crop`
parameter in the `preprocess` method.
crop_size (`dict[str, int]`, *optional*, defaults to `{"height": 256, "width": 256}`):
Desired output size when applying center-cropping. Can be overridden by the `crop_size` parameter in the
`preprocess` method.
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image to `(size["height"], size["width"])`. Can be overridden by the `do_resize`
parameter in the `preprocess` method.
size (`dict[str, int]` *optional*, defaults to `{"height": 224, "width": 224}`):
Size of the image after resizing. Can be overridden by the `size` parameter in the `preprocess` method.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Defines the resampling filter to use if resizing the image. Can be overridden by the `resample` parameter
in the `preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Defines the scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter
in the `preprocess` method.
do_normalize:
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
'''
def __init__(self, do_center_crop: bool=True, crop_size: Optional[dict[str, int]]=None, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, **kwargs) -> None:
pass
def center_crop(self, image: np.ndarray, crop_size: dict[str, int], size: Optional[int]=None, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
'''
Center crop an image to `(size["height"] / crop_size["height"] * min_dim, size["width"] / crop_size["width"] *
min_dim)`. Where `min_dim = min(size["height"], size["width"])`.
If the input size is smaller than `crop_size` along any edge, the image will be padded with zeros and then
center cropped.
Args:
image (`np.ndarray`):
Image to center crop.
crop_size (`dict[str, int]`):
Desired output size after applying the center crop.
size (`dict[str, int]`, *optional*):
Size of the image after resizing. If not provided, the self.size attribute will be used.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
'''
pass
def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
'''
Resize an image to `(size["height"], size["width"])`.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
`PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
Returns:
`np.ndarray`: The resized image.
'''
pass
@filter_out_non_signature_kwargs()
def preprocess(self, images: ImageInput, do_center_crop: Optional[bool]=None, crop_size: Optional[dict[str, int]]=None, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image:
'''
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
Whether to center crop the image to `crop_size`.
crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`):
Desired output size after applying the center crop.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
'''
pass
| 7
| 4
| 65
| 5
| 38
| 22
| 7
| 0.79
| 1
| 8
| 2
| 1
| 4
| 10
| 4
| 24
| 302
| 24
| 155
| 68
| 104
| 123
| 66
| 22
| 61
| 18
| 3
| 1
| 27
|
4,477
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/perceiver/modeling_perceiver.py
|
transformers.models.perceiver.modeling_perceiver.AbstractPreprocessor
|
from torch import nn
class AbstractPreprocessor(nn.Module):
@property
def num_channels(self) -> int:
"""Returns size of preprocessor output."""
raise NotImplementedError()
|
class AbstractPreprocessor(nn.Module):
@property
def num_channels(self) -> int:
'''Returns size of preprocessor output.'''
pass
| 3
| 1
| 3
| 0
| 2
| 1
| 1
| 0.25
| 1
| 2
| 0
| 5
| 1
| 0
| 1
| 11
| 5
| 0
| 4
| 3
| 1
| 1
| 3
| 2
| 1
| 1
| 1
| 0
| 1
|
4,478
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/perceiver/modeling_perceiver.py
|
transformers.models.perceiver.modeling_perceiver.Conv2DDownsample
|
import torch
from torch import nn
class Conv2DDownsample(nn.Module):
"""Downsamples 4x by applying a 2D convolution and doing max pooling."""
def __init__(self, num_layers: int=1, in_channels: int=3, out_channels: int=64, use_batchnorm: bool=True):
"""
Constructs a Conv2DDownsample model.
Args:
in_channels (`int`, *optional*, defaults to 3):
The number of input channels.
out_channels (`int`, *optional*, defaults to 64):
The number of conv output channels.
use_batchnorm (`bool`, *optional*, defaults to `True`):
Whether to use batchnorm.
"""
super().__init__()
self.conv = Conv2dSamePadding(in_channels=in_channels, out_channels=out_channels, kernel_size=7, stride=2, bias=False)
self.batchnorm = nn.BatchNorm2d(num_features=out_channels) if use_batchnorm else nn.Identity()
self.relu = nn.ReLU()
self.max_pool = nn.MaxPool2d(kernel_size=3, stride=2)
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
out = self.conv(inputs)
out = self.batchnorm(out)
out = self.relu(out)
out = self.max_pool(out)
return out
|
class Conv2DDownsample(nn.Module):
'''Downsamples 4x by applying a 2D convolution and doing max pooling.'''
def __init__(self, num_layers: int=1, in_channels: int=3, out_channels: int=64, use_batchnorm: bool=True):
'''
Constructs a Conv2DDownsample model.
Args:
in_channels (`int`, *optional*, defaults to 3):
The number of input channels.
out_channels (`int`, *optional*, defaults to 64):
The number of conv output channels.
use_batchnorm (`bool`, *optional*, defaults to `True`):
Whether to use batchnorm.
'''
pass
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
pass
| 3
| 2
| 16
| 1
| 10
| 5
| 2
| 0.52
| 1
| 5
| 1
| 0
| 2
| 4
| 2
| 12
| 36
| 4
| 21
| 14
| 12
| 11
| 13
| 8
| 10
| 2
| 1
| 0
| 3
|
4,479
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/perceiver/modeling_perceiver.py
|
transformers.models.perceiver.modeling_perceiver.Conv2dSamePadding
|
from functools import reduce
from torch import nn
from operator import __add__
class Conv2dSamePadding(nn.Conv2d):
"""
Conv2d layer with padding="same" support. Source:
https://gist.github.com/sumanmichael/4de9dee93f972d47c80c4ade8e149ea6
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.zero_pad_2d = nn.ZeroPad2d(reduce(__add__, [(k // 2 + (k - 2 * (k // 2)) - 1, k // 2) for k in self.kernel_size[::-1]]))
def forward(self, input):
return self._conv_forward(self.zero_pad_2d(input), self.weight, self.bias)
|
class Conv2dSamePadding(nn.Conv2d):
'''
Conv2d layer with padding="same" support. Source:
https://gist.github.com/sumanmichael/4de9dee93f972d47c80c4ade8e149ea6
'''
def __init__(self, *args, **kwargs):
pass
def forward(self, input):
pass
| 3
| 1
| 4
| 0
| 4
| 0
| 1
| 0.5
| 1
| 1
| 0
| 0
| 2
| 1
| 2
| 2
| 14
| 2
| 8
| 4
| 5
| 4
| 6
| 4
| 3
| 1
| 1
| 0
| 2
|
4,480
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/perceiver/modeling_perceiver.py
|
transformers.models.perceiver.modeling_perceiver.PerceiverAbstractDecoder
|
import abc
from torch import nn
class PerceiverAbstractDecoder(nn.Module, metaclass=abc.ABCMeta):
"""Perceiver abstract decoder."""
@abc.abstractmethod
def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None):
raise NotImplementedError
@property
@abc.abstractmethod
def num_query_channels(self):
raise NotImplementedError
@abc.abstractmethod
def forward(self, query, z, query_mask=None):
raise NotImplementedError
|
class PerceiverAbstractDecoder(nn.Module, metaclass=abc.ABCMeta):
'''Perceiver abstract decoder.'''
@abc.abstractmethod
def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None):
pass
@property
@abc.abstractmethod
def num_query_channels(self):
pass
@abc.abstractmethod
def forward(self, query, z, query_mask=None):
pass
| 8
| 1
| 2
| 0
| 2
| 0
| 1
| 0.09
| 2
| 1
| 0
| 6
| 3
| 0
| 3
| 33
| 15
| 3
| 11
| 7
| 3
| 1
| 7
| 4
| 3
| 1
| 3
| 0
| 3
|
4,481
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/perceiver/modeling_perceiver.py
|
transformers.models.perceiver.modeling_perceiver.PerceiverAbstractPositionEncoding
|
import abc
from torch import nn
class PerceiverAbstractPositionEncoding(nn.Module, metaclass=abc.ABCMeta):
"""Perceiver abstract position encoding."""
@property
@abc.abstractmethod
def num_dimensions(self) -> int:
raise NotImplementedError
@abc.abstractmethod
def output_size(self, *args, **kwargs) -> int:
raise NotImplementedError
@abc.abstractmethod
def forward(self, batch_size, pos):
raise NotImplementedError
|
class PerceiverAbstractPositionEncoding(nn.Module, metaclass=abc.ABCMeta):
'''Perceiver abstract position encoding.'''
@property
@abc.abstractmethod
def num_dimensions(self) -> int:
pass
@abc.abstractmethod
def output_size(self, *args, **kwargs) -> int:
pass
@abc.abstractmethod
def forward(self, batch_size, pos):
pass
| 8
| 1
| 2
| 0
| 2
| 0
| 1
| 0.09
| 2
| 2
| 0
| 2
| 3
| 0
| 3
| 33
| 15
| 3
| 11
| 7
| 3
| 1
| 7
| 4
| 3
| 1
| 3
| 0
| 3
|
4,482
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/perceiver/modeling_perceiver.py
|
transformers.models.perceiver.modeling_perceiver.PerceiverAttention
|
from typing import Any, Callable, Optional, Union
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, meshgrid, prune_linear_layer
import torch
from torch import nn
class PerceiverAttention(nn.Module):
"""Attention module, including a dense block."""
def __init__(self, config, is_cross_attention=False, qk_channels=None, v_channels=None, num_heads=1, q_dim=None, kv_dim=None, use_query_residual=True):
super().__init__()
if is_cross_attention and qk_channels is None:
if config.cross_attention_shape_for_attention == 'q':
qk_channels = q_dim
elif config.cross_attention_shape_for_attention == 'kv':
qk_channels = kv_dim
else:
raise ValueError(f'Unknown value {config.cross_attention_shape_for_attention} for cross_attention_shape_for_attention.')
else:
if qk_channels is None:
qk_channels = q_dim
if v_channels is None:
v_channels = qk_channels
self.self = PerceiverSelfAttention(config, is_cross_attention=is_cross_attention, qk_channels=qk_channels, v_channels=v_channels, num_heads=num_heads, q_dim=q_dim, kv_dim=kv_dim)
output_channels = None
if is_cross_attention:
output_channels = q_dim
elif output_channels is None:
output_channels = v_channels
self.output = PerceiverSelfOutput(config, input_channels=self.self.v_channels, output_channels=output_channels)
self.use_query_residual = use_query_residual
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads)
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs: Optional[torch.FloatTensor]=None, inputs_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
self_outputs = self.self(hidden_states, attention_mask, head_mask, inputs, inputs_mask, output_attentions)
attention_output = self.output(self_outputs[0])
if self.use_query_residual:
attention_output = attention_output + hidden_states
outputs = (attention_output,) + self_outputs[1:]
return outputs
|
class PerceiverAttention(nn.Module):
'''Attention module, including a dense block.'''
def __init__(self, config, is_cross_attention=False, qk_channels=None, v_channels=None, num_heads=1, q_dim=None, kv_dim=None, use_query_residual=True):
pass
def prune_heads(self, heads):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs: Optional[torch.FloatTensor]=None, inputs_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
pass
| 4
| 1
| 31
| 2
| 27
| 3
| 4
| 0.12
| 1
| 7
| 2
| 0
| 3
| 4
| 3
| 13
| 98
| 8
| 81
| 31
| 59
| 10
| 39
| 13
| 35
| 8
| 1
| 2
| 12
|
4,483
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/perceiver/modeling_perceiver.py
|
transformers.models.perceiver.modeling_perceiver.PerceiverAudioPostprocessor
|
import torch
from torch import nn
from .configuration_perceiver import PerceiverConfig
from typing import Any, Callable, Optional, Union
class PerceiverAudioPostprocessor(nn.Module):
"""
Audio postprocessing for Perceiver. Can be used to convert the decoder output to audio features.
Args:
config ([*PerceiverConfig*]):
Model configuration.
in_channels (`int`):
Number of channels in the input.
postproc_type (`str`, *optional*, defaults to `"patches"`):
Postprocessor type to use. Currently, only "patches" is supported.
"""
def __init__(self, config: PerceiverConfig, in_channels: int, postproc_type: str='patches') -> None:
super().__init__()
if postproc_type not in ('patches',):
raise ValueError('Invalid postproc_type!')
self.classifier = nn.Linear(in_channels, config.samples_per_patch)
def forward(self, inputs: torch.Tensor, pos: Optional[torch.Tensor]=None, modality_sizes=None) -> torch.Tensor:
logits = self.classifier(inputs)
return torch.reshape(logits, [inputs.shape[0], -1])
|
class PerceiverAudioPostprocessor(nn.Module):
'''
Audio postprocessing for Perceiver. Can be used to convert the decoder output to audio features.
Args:
config ([*PerceiverConfig*]):
Model configuration.
in_channels (`int`):
Number of channels in the input.
postproc_type (`str`, *optional*, defaults to `"patches"`):
Postprocessor type to use. Currently, only "patches" is supported.
'''
def __init__(self, config: PerceiverConfig, in_channels: int, postproc_type: str='patches') -> None:
pass
def forward(self, inputs: torch.Tensor, pos: Optional[torch.Tensor]=None, modality_sizes=None) -> torch.Tensor:
pass
| 3
| 1
| 6
| 1
| 4
| 1
| 2
| 1.33
| 1
| 6
| 1
| 0
| 2
| 1
| 2
| 12
| 25
| 5
| 9
| 5
| 6
| 12
| 9
| 5
| 6
| 2
| 1
| 1
| 3
|
4,484
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/perceiver/modeling_perceiver.py
|
transformers.models.perceiver.modeling_perceiver.PerceiverAudioPreprocessor
|
import torch
from typing import Any, Callable, Optional, Union
class PerceiverAudioPreprocessor(AbstractPreprocessor):
"""
Audio preprocessing for Perceiver Encoder.
Args:
config ([*PerceiverConfig*]):
Model configuration.
prep_type (`str`, *optional*, defaults to `"patches"`):
Preprocessor type to use. Only "patches" is supported.
samples_per_patch (`int`, *optional*, defaults to 96):
Number of samples per patch.
position_encoding_type (`str`, *optional*, defaults to `"fourier"`):
Type of position encoding to use. Can be "trainable" or "fourier".
concat_or_add_pos (`str`, *optional*, defaults to `"concat"`):
How to concatenate the position encoding to the input. Can be "concat" or "add".
out_channels (`int`, *optional*, defaults to 64):
Number of channels in the output.
project_pos_dim (`int`, *optional*, defaults to -1):
Dimension of the position encoding to project to. If -1, no projection is applied.
**position_encoding_kwargs (`Dict`, *optional*):
Keyword arguments for the position encoding.
"""
def __init__(self, config, prep_type: str='patches', samples_per_patch: int=96, position_encoding_type: str='fourier', concat_or_add_pos: str='concat', out_channels=64, project_pos_dim=-1, **position_encoding_kwargs):
super().__init__()
self.config = config
if prep_type not in ('patches',):
raise ValueError(f"Prep_type {prep_type} is invalid, can only be 'patches'.")
if concat_or_add_pos not in ['concat', 'add']:
raise ValueError(f"Concat_or_pos {concat_or_add_pos} is invalid, can only be 'concat' or 'add'.")
self.samples_per_patch = samples_per_patch
self.position_encoding_type = position_encoding_type
self.concat_or_add_pos = concat_or_add_pos
self.project_pos_dim = project_pos_dim
self.position_embeddings, self.positions_projection = build_position_encoding(position_encoding_type=position_encoding_type, out_channels=out_channels, project_pos_dim=project_pos_dim, **position_encoding_kwargs)
@property
def num_channels(self) -> int:
if self.project_pos_dim > 0:
pos_dim = self.project_pos_dim
else:
pos_dim = self.position_embeddings.output_size()
if self.concat_or_add_pos == 'add':
return pos_dim
return self.samples_per_patch + pos_dim
def _build_network_inputs(self, inputs):
"""Construct the final input, including position encoding."""
batch_size = inputs.shape[0]
index_dims = inputs.shape[1:-1]
if self.position_encoding_type == 'trainable':
pos_enc = self.position_embeddings(batch_size)
elif self.position_encoding_type == 'fourier':
pos_enc = self.position_embeddings(index_dims, batch_size, device=inputs.device, dtype=inputs.dtype)
pos_enc = self.positions_projection(pos_enc)
if self.concat_or_add_pos == 'concat':
inputs_with_pos = torch.cat([inputs, pos_enc], dim=-1)
elif self.concat_or_add_pos == 'add':
inputs_with_pos = inputs + pos_enc
return (inputs_with_pos, inputs)
def forward(self, inputs: torch.Tensor, pos: Optional[torch.Tensor]=None, network_input_is_1d: bool=True, interpolate_pos_encoding: bool=False):
inputs = torch.reshape(inputs, [inputs.shape[0], -1, self.samples_per_patch])
inputs, inputs_without_pos = self._build_network_inputs(inputs)
modality_sizes = None
return (inputs, modality_sizes, inputs_without_pos)
|
class PerceiverAudioPreprocessor(AbstractPreprocessor):
'''
Audio preprocessing for Perceiver Encoder.
Args:
config ([*PerceiverConfig*]):
Model configuration.
prep_type (`str`, *optional*, defaults to `"patches"`):
Preprocessor type to use. Only "patches" is supported.
samples_per_patch (`int`, *optional*, defaults to 96):
Number of samples per patch.
position_encoding_type (`str`, *optional*, defaults to `"fourier"`):
Type of position encoding to use. Can be "trainable" or "fourier".
concat_or_add_pos (`str`, *optional*, defaults to `"concat"`):
How to concatenate the position encoding to the input. Can be "concat" or "add".
out_channels (`int`, *optional*, defaults to 64):
Number of channels in the output.
project_pos_dim (`int`, *optional*, defaults to -1):
Dimension of the position encoding to project to. If -1, no projection is applied.
**position_encoding_kwargs (`Dict`, *optional*):
Keyword arguments for the position encoding.
'''
def __init__(self, config, prep_type: str='patches', samples_per_patch: int=96, position_encoding_type: str='fourier', concat_or_add_pos: str='concat', out_channels=64, project_pos_dim=-1, **position_encoding_kwargs):
pass
@property
def num_channels(self) -> int:
pass
def _build_network_inputs(self, inputs):
'''Construct the final input, including position encoding.'''
pass
def forward(self, inputs: torch.Tensor, pos: Optional[torch.Tensor]=None, network_input_is_1d: bool=True, interpolate_pos_encoding: bool=False):
pass
| 6
| 2
| 19
| 3
| 15
| 2
| 3
| 0.43
| 1
| 6
| 0
| 0
| 4
| 7
| 4
| 15
| 101
| 15
| 61
| 35
| 39
| 26
| 36
| 18
| 31
| 5
| 2
| 1
| 12
|
4,485
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/perceiver/modeling_perceiver.py
|
transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder
|
from typing import Any, Callable, Optional, Union
from .configuration_perceiver import PerceiverConfig
from torch import nn
import torch
import numpy as np
class PerceiverBasicDecoder(PerceiverAbstractDecoder):
"""
Cross-attention-based decoder. This class can be used to decode the final hidden states of the latents using a
cross-attention operation, in which the latents produce keys and values.
The shape of the output of this class depends on how one defines the output queries (also called decoder queries).
Args:
config ([*PerceiverConfig*]):
Model configuration.
output_num_channels (`int`, *optional*):
The number of channels in the output. Will only be used in case *final_project* is set to `True`.
position_encoding_type (`str`, *optional*, defaults to "trainable"):
The type of position encoding to use. Can be either "trainable", "fourier", or "none".
output_index_dims (`int`, *optional*):
The number of dimensions of the output queries. Ignored if 'position_encoding_type' == 'none'.
num_channels (`int`, *optional*, defaults to 128):
The number of channels of the decoder queries. Ignored if 'position_encoding_type' == 'none'.
qk_channels (`int`, *optional*):
The number of channels of the queries and keys in the cross-attention layer.
v_channels (`int`, *optional*):
The number of channels of the values in the cross-attention layer.
num_heads (`int`, *optional*, defaults to 1):
The number of attention heads in the cross-attention layer.
widening_factor (`int`, *optional*, defaults to 1):
The widening factor of the cross-attention layer.
use_query_residual (`bool`, *optional*, defaults to `False`):
Whether to use a residual connection between the query and the output of the cross-attention layer.
concat_preprocessed_input (`bool`, *optional*, defaults to `False`):
Whether to concatenate the preprocessed input to the query.
final_project (`bool`, *optional*, defaults to `True`):
Whether to project the output of the cross-attention layer to a target dimension.
position_encoding_only (`bool`, *optional*, defaults to `False`):
Whether to only use this class to define output queries.
"""
def __init__(self, config: PerceiverConfig, output_num_channels: int, position_encoding_type: Optional[str]='trainable', output_index_dims: Optional[int]=None, num_channels: Optional[int]=128, subsampled_index_dims: Optional[int]=None, qk_channels: Optional[int]=None, v_channels: Optional[int]=None, num_heads: Optional[int]=1, widening_factor: Optional[int]=1, use_query_residual: Optional[bool]=False, concat_preprocessed_input: Optional[bool]=False, final_project: Optional[bool]=True, position_encoding_only: Optional[bool]=False, **position_encoding_kwargs) -> None:
super().__init__()
self.output_num_channels = output_num_channels
self.output_position_encodings = None
self.position_encoding_type = position_encoding_type
self.position_encoding_kwargs = position_encoding_kwargs
if position_encoding_type != 'none':
self.output_position_encodings, self.positions_projection = build_position_encoding(position_encoding_type=position_encoding_type, **position_encoding_kwargs)
self.output_index_dims = output_index_dims
self.num_channels = num_channels
if subsampled_index_dims is None:
subsampled_index_dims = output_index_dims
self.subsampled_index_dims = subsampled_index_dims
self.concat_preprocessed_input = concat_preprocessed_input
self.final_project = final_project
self.position_encoding_only = position_encoding_only
if not self.position_encoding_only:
self.decoding_cross_attention = PerceiverLayer(config, is_cross_attention=True, qk_channels=qk_channels, v_channels=v_channels, num_heads=num_heads, q_dim=num_channels, kv_dim=config.d_latents, widening_factor=widening_factor, use_query_residual=use_query_residual)
self.final_layer = nn.Linear(num_channels, output_num_channels) if final_project else nn.Identity()
@property
def num_query_channels(self) -> int:
if self.position_encoding_type == 'none':
raise ValueError('You cannot calculate number of decoder query channels when position_encoding_type is set to none')
if self.position_encoding_only:
if 'project_pos_dim' in self.position_encoding_kwargs:
return self.position_encoding_kwargs['project_pos_dim']
return self.output_position_encodings.output_size()
if self.final_project:
return self.output_num_channels
return self.num_channels
def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None):
if self.position_encoding_type == 'none':
raise ValueError('You cannot construct decoder queries when position_encoding_type is set to none')
if subsampled_points is not None:
indices = [torch.from_numpy(x) for x in np.unravel_index(subsampled_points.cpu(), self.output_index_dims)]
pos = torch.stack(indices, dim=1)
batch_size = inputs.shape[0]
pos = -1 + 2 * pos / torch.tensor(self.output_index_dims)[None, :]
pos = torch.broadcast_to(pos[None], [batch_size, pos.shape[0], pos.shape[1]])
if self.position_encoding_type == 'trainable':
pos_emb = self.output_position_encodings(batch_size)
elif self.position_encoding_type == 'fourier':
pos_emb = self.output_position_encodings(self.output_index_dims, batch_size=batch_size, device=inputs.device, dtype=inputs.dtype, pos=pos)
pos_emb = self.positions_projection(pos_emb)
pos_emb = torch.reshape(pos_emb, [pos_emb.shape[0], -1, pos_emb.shape[-1]])
else:
batch_size = inputs.shape[0]
index_dims = inputs.shape[2:]
if self.position_encoding_type == 'trainable':
pos_emb = self.output_position_encodings(batch_size)
elif self.position_encoding_type == 'fourier':
pos_emb = self.output_position_encodings(index_dims, batch_size, device=inputs.device, dtype=inputs.dtype)
pos_emb = self.positions_projection(pos_emb)
if self.concat_preprocessed_input:
if inputs_without_pos is None:
raise ValueError('Value is required for inputs_without_pos if concat_preprocessed_input is True')
pos_emb = torch.cat([inputs_without_pos, pos_emb], dim=-1)
return pos_emb
def forward(self, query: torch.Tensor, z: torch.FloatTensor, query_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> PerceiverDecoderOutput:
cross_attentions = () if output_attentions else None
layer_outputs = self.decoding_cross_attention(query, attention_mask=query_mask, head_mask=None, inputs=z, inputs_mask=None, output_attentions=output_attentions)
output = layer_outputs[0]
if output_attentions:
cross_attentions = cross_attentions + (layer_outputs[1],)
logits = self.final_layer(output)
return PerceiverDecoderOutput(logits=logits, cross_attentions=cross_attentions)
|
class PerceiverBasicDecoder(PerceiverAbstractDecoder):
'''
Cross-attention-based decoder. This class can be used to decode the final hidden states of the latents using a
cross-attention operation, in which the latents produce keys and values.
The shape of the output of this class depends on how one defines the output queries (also called decoder queries).
Args:
config ([*PerceiverConfig*]):
Model configuration.
output_num_channels (`int`, *optional*):
The number of channels in the output. Will only be used in case *final_project* is set to `True`.
position_encoding_type (`str`, *optional*, defaults to "trainable"):
The type of position encoding to use. Can be either "trainable", "fourier", or "none".
output_index_dims (`int`, *optional*):
The number of dimensions of the output queries. Ignored if 'position_encoding_type' == 'none'.
num_channels (`int`, *optional*, defaults to 128):
The number of channels of the decoder queries. Ignored if 'position_encoding_type' == 'none'.
qk_channels (`int`, *optional*):
The number of channels of the queries and keys in the cross-attention layer.
v_channels (`int`, *optional*):
The number of channels of the values in the cross-attention layer.
num_heads (`int`, *optional*, defaults to 1):
The number of attention heads in the cross-attention layer.
widening_factor (`int`, *optional*, defaults to 1):
The widening factor of the cross-attention layer.
use_query_residual (`bool`, *optional*, defaults to `False`):
Whether to use a residual connection between the query and the output of the cross-attention layer.
concat_preprocessed_input (`bool`, *optional*, defaults to `False`):
Whether to concatenate the preprocessed input to the query.
final_project (`bool`, *optional*, defaults to `True`):
Whether to project the output of the cross-attention layer to a target dimension.
position_encoding_only (`bool`, *optional*, defaults to `False`):
Whether to only use this class to define output queries.
'''
def __init__(self, config: PerceiverConfig, output_num_channels: int, position_encoding_type: Optional[str]='trainable', output_index_dims: Optional[int]=None, num_channels: Optional[int]=128, subsampled_index_dims: Optional[int]=None, qk_channels: Optional[int]=None, v_channels: Optional[int]=None, num_heads: Optional[int]=1, widening_factor: Optional[int]=1, use_query_residual: Optional[bool]=False, concat_preprocessed_input: Optional[bool]=False, final_project: Optional[bool]=True, position_encoding_only: Optional[bool]=False, **position_encoding_kwargs) -> None:
pass
@property
def num_query_channels(self) -> int:
pass
def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None):
pass
def forward(self, query: torch.Tensor, z: torch.FloatTensor, query_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> PerceiverDecoderOutput:
pass
| 6
| 1
| 36
| 3
| 28
| 5
| 6
| 0.46
| 1
| 9
| 3
| 0
| 4
| 13
| 4
| 37
| 184
| 18
| 115
| 51
| 86
| 53
| 63
| 27
| 58
| 9
| 4
| 2
| 22
|
4,486
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/perceiver/modeling_perceiver.py
|
transformers.models.perceiver.modeling_perceiver.PerceiverBasicVideoAutoencodingDecoder
|
import torch
from .configuration_perceiver import PerceiverConfig
from typing import Any, Callable, Optional, Union
class PerceiverBasicVideoAutoencodingDecoder(PerceiverAbstractDecoder):
"""
Cross-attention based video-autoencoding decoder. Light-weight wrapper of [*PerceiverBasicDecoder*] with video
reshaping logic.
Args:
config ([*PerceiverConfig*]):
Model configuration.
output_shape (`list[int]`):
Shape of the output as (batch_size, num_frames, height, width), excluding the channel dimension.
position_encoding_type (`str`):
The type of position encoding to use. Can be either "trainable", "fourier", or "none".
"""
def __init__(self, config: PerceiverConfig, output_shape: list[int], position_encoding_type: str, **decoder_kwargs) -> None:
super().__init__()
if len(output_shape) != 4:
raise ValueError(f'Expected rank 4 output_shape, got {output_shape}.')
self.output_shape = output_shape
self.output_num_channels = decoder_kwargs['output_num_channels']
self.decoder = PerceiverBasicDecoder(config, output_index_dims=self.output_shape[1:4], position_encoding_type=position_encoding_type, **decoder_kwargs)
@property
def num_query_channels(self) -> int:
return self.decoder.num_query_channels
def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None):
return self.decoder.decoder_query(inputs, modality_sizes=modality_sizes, inputs_without_pos=inputs_without_pos, subsampled_points=subsampled_points)
def forward(self, query: torch.Tensor, z: torch.FloatTensor, query_mask: Optional[torch.FloatTensor]=None) -> PerceiverDecoderOutput:
decoder_outputs = self.decoder(query, z)
logits = decoder_outputs.logits
logits = torch.reshape(logits, self.output_shape + [logits.shape[-1]])
return PerceiverDecoderOutput(logits=logits, cross_attentions=decoder_outputs.cross_attentions)
|
class PerceiverBasicVideoAutoencodingDecoder(PerceiverAbstractDecoder):
'''
Cross-attention based video-autoencoding decoder. Light-weight wrapper of [*PerceiverBasicDecoder*] with video
reshaping logic.
Args:
config ([*PerceiverConfig*]):
Model configuration.
output_shape (`list[int]`):
Shape of the output as (batch_size, num_frames, height, width), excluding the channel dimension.
position_encoding_type (`str`):
The type of position encoding to use. Can be either "trainable", "fourier", or "none".
'''
def __init__(self, config: PerceiverConfig, output_shape: list[int], position_encoding_type: str, **decoder_kwargs) -> None:
pass
@property
def num_query_channels(self) -> int:
pass
def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None):
pass
def forward(self, query: torch.Tensor, z: torch.FloatTensor, query_mask: Optional[torch.FloatTensor]=None) -> PerceiverDecoderOutput:
pass
| 6
| 1
| 8
| 1
| 8
| 1
| 1
| 0.44
| 1
| 8
| 3
| 0
| 4
| 3
| 4
| 37
| 51
| 7
| 32
| 15
| 22
| 14
| 17
| 10
| 12
| 2
| 4
| 1
| 5
|
4,487
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/perceiver/modeling_perceiver.py
|
transformers.models.perceiver.modeling_perceiver.PerceiverClassificationDecoder
|
import torch
from typing import Any, Callable, Optional, Union
class PerceiverClassificationDecoder(PerceiverAbstractDecoder):
"""
Cross-attention based classification decoder. Light-weight wrapper of [`PerceiverBasicDecoder`] for logit output.
Will turn the output of the Perceiver encoder which is of shape (batch_size, num_latents, d_latents) to a tensor of
shape (batch_size, num_labels). The queries are of shape (batch_size, 1, num_labels).
Args:
config ([`PerceiverConfig`]):
Model configuration.
"""
def __init__(self, config, **decoder_kwargs):
super().__init__()
self.num_labels = config.num_labels
self.decoder = PerceiverBasicDecoder(config, output_num_channels=self.num_labels, output_index_dims=1, **decoder_kwargs)
@property
def num_query_channels(self) -> int:
return self.decoder.num_query_channels
def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None):
return self.decoder.decoder_query(inputs, modality_sizes, inputs_without_pos, subsampled_points=subsampled_points)
def forward(self, query: torch.Tensor, z: torch.FloatTensor, query_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> PerceiverDecoderOutput:
decoder_outputs = self.decoder(query, z, output_attentions=output_attentions)
logits = decoder_outputs.logits[:, 0, :]
return PerceiverDecoderOutput(logits=logits, cross_attentions=decoder_outputs.cross_attentions)
|
class PerceiverClassificationDecoder(PerceiverAbstractDecoder):
'''
Cross-attention based classification decoder. Light-weight wrapper of [`PerceiverBasicDecoder`] for logit output.
Will turn the output of the Perceiver encoder which is of shape (batch_size, num_latents, d_latents) to a tensor of
shape (batch_size, num_labels). The queries are of shape (batch_size, 1, num_labels).
Args:
config ([`PerceiverConfig`]):
Model configuration.
'''
def __init__(self, config, **decoder_kwargs):
pass
@property
def num_query_channels(self) -> int:
pass
def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None):
pass
def forward(self, query: torch.Tensor, z: torch.FloatTensor, query_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> PerceiverDecoderOutput:
pass
| 6
| 1
| 7
| 1
| 6
| 1
| 1
| 0.37
| 1
| 6
| 2
| 0
| 4
| 2
| 4
| 37
| 44
| 8
| 27
| 16
| 15
| 10
| 13
| 9
| 8
| 1
| 4
| 0
| 4
|
4,488
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/perceiver/modeling_perceiver.py
|
transformers.models.perceiver.modeling_perceiver.PerceiverClassificationPostprocessor
|
import torch
from torch import nn
from typing import Any, Callable, Optional, Union
from .configuration_perceiver import PerceiverConfig
class PerceiverClassificationPostprocessor(nn.Module):
"""
Classification postprocessing for Perceiver. Can be used to convert the decoder output to classification logits.
Args:
config ([*PerceiverConfig*]):
Model configuration.
in_channels (`int`):
Number of channels in the input.
"""
def __init__(self, config: PerceiverConfig, in_channels: int) -> None:
super().__init__()
self.classifier = nn.Linear(in_channels, config.num_labels)
def forward(self, inputs, pos: Optional[torch.Tensor]=None, modality_sizes=None) -> torch.Tensor:
logits = self.classifier(inputs)
return logits[:, 0, :]
|
class PerceiverClassificationPostprocessor(nn.Module):
'''
Classification postprocessing for Perceiver. Can be used to convert the decoder output to classification logits.
Args:
config ([*PerceiverConfig*]):
Model configuration.
in_channels (`int`):
Number of channels in the input.
'''
def __init__(self, config: PerceiverConfig, in_channels: int) -> None:
pass
def forward(self, inputs, pos: Optional[torch.Tensor]=None, modality_sizes=None) -> torch.Tensor:
pass
| 3
| 1
| 3
| 0
| 3
| 0
| 1
| 1.14
| 1
| 4
| 1
| 0
| 2
| 1
| 2
| 12
| 18
| 3
| 7
| 5
| 4
| 8
| 7
| 5
| 4
| 1
| 1
| 0
| 2
|
4,489
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/perceiver/modeling_perceiver.py
|
transformers.models.perceiver.modeling_perceiver.PerceiverEmbeddingDecoder
|
import torch
from torch import nn
from .configuration_perceiver import PerceiverConfig
class PerceiverEmbeddingDecoder(nn.Module):
"""
Module to decode embeddings (for masked language modeling).
Args:
config ([`PerceiverConfig`]):
Model configuration.
"""
def __init__(self, config: PerceiverConfig) -> None:
super().__init__()
self.config = config
self.vocab_size = config.vocab_size
self.bias = nn.Parameter(torch.zeros(self.vocab_size))
def forward(self, hidden_states: torch.Tensor, embedding_layer: torch.Tensor) -> torch.Tensor:
batch_size, seq_len, d_model = hidden_states.shape
output = torch.matmul(hidden_states.reshape([-1, d_model]), embedding_layer.weight.transpose(0, 1))
output = output + self.bias
return output.reshape([batch_size, seq_len, self.vocab_size])
|
class PerceiverEmbeddingDecoder(nn.Module):
'''
Module to decode embeddings (for masked language modeling).
Args:
config ([`PerceiverConfig`]):
Model configuration.
'''
def __init__(self, config: PerceiverConfig) -> None:
pass
def forward(self, hidden_states: torch.Tensor, embedding_layer: torch.Tensor) -> torch.Tensor:
pass
| 3
| 1
| 6
| 1
| 5
| 1
| 1
| 0.64
| 1
| 3
| 1
| 0
| 2
| 3
| 2
| 12
| 22
| 4
| 11
| 8
| 8
| 7
| 11
| 8
| 8
| 1
| 1
| 0
| 2
|
4,490
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/perceiver/modeling_perceiver.py
|
transformers.models.perceiver.modeling_perceiver.PerceiverEmbeddings
|
import torch
from torch import nn
class PerceiverEmbeddings(nn.Module):
"""Construct the latent embeddings."""
def __init__(self, config):
super().__init__()
self.latents = nn.Parameter(torch.randn(config.num_latents, config.d_latents))
def forward(self, batch_size: int):
return self.latents.expand(batch_size, -1, -1)
|
class PerceiverEmbeddings(nn.Module):
'''Construct the latent embeddings.'''
def __init__(self, config):
pass
def forward(self, batch_size: int):
pass
| 3
| 1
| 3
| 0
| 3
| 1
| 1
| 0.33
| 1
| 2
| 0
| 0
| 2
| 1
| 2
| 12
| 9
| 2
| 6
| 4
| 3
| 2
| 6
| 4
| 3
| 1
| 1
| 0
| 2
|
4,491
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/perceiver/modeling_perceiver.py
|
transformers.models.perceiver.modeling_perceiver.PerceiverEncoder
|
from ...modeling_outputs import BaseModelOutputWithCrossAttentions
import torch
from torch import nn
from typing import Any, Callable, Optional, Union
class PerceiverEncoder(nn.Module):
"""The Perceiver Encoder: a scalable, fully attentional encoder."""
def __init__(self, config, kv_dim=None):
super().__init__()
self.config = config
if config.d_latents % config.num_self_attention_heads != 0:
raise ValueError(f'num_z_channels ({config.d_latents}) must be divisible by num_self_attend_heads ({config.num_self_attention_heads}).')
if config.d_latents % config.num_cross_attention_heads != 0:
raise ValueError(f'num_z_channels ({config.d_latents}) must be divisible by num_cross_attend_heads ({config.num_cross_attention_heads}).')
self.cross_attention = PerceiverLayer(config, is_cross_attention=True, qk_channels=config.qk_channels, v_channels=config.v_channels, num_heads=config.num_cross_attention_heads, q_dim=config.d_latents, kv_dim=kv_dim, widening_factor=config.cross_attention_widening_factor, use_query_residual=config.use_query_residual)
self_attention_layers = []
for _ in range(config.num_self_attends_per_block):
layer = PerceiverLayer(config, is_cross_attention=False, qk_channels=config.qk_channels, v_channels=config.v_channels, num_heads=config.num_self_attention_heads, q_dim=config.d_latents, kv_dim=config.d_latents, widening_factor=config.self_attention_widening_factor)
self_attention_layers.append(layer)
self.self_attends = nn.ModuleList(self_attention_layers)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs: Optional[torch.FloatTensor]=None, inputs_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[tuple, BaseModelOutputWithCrossAttentions]:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions else None
layer_outputs = self.cross_attention(hidden_states, attention_mask=attention_mask, head_mask=None, inputs=inputs, inputs_mask=inputs_mask, output_attentions=output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_cross_attentions = all_cross_attentions + (layer_outputs[1],)
for _ in range(self.config.num_blocks):
for i, layer_module in enumerate(self.self_attends):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
layer_outputs = layer_module(hidden_states, attention_mask=attention_mask, head_mask=layer_head_mask, output_attentions=output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None))
return BaseModelOutputWithCrossAttentions(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions)
|
class PerceiverEncoder(nn.Module):
'''The Perceiver Encoder: a scalable, fully attentional encoder.'''
def __init__(self, config, kv_dim=None):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs: Optional[torch.FloatTensor]=None, inputs_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[tuple, BaseModelOutputWithCrossAttentions]:
pass
| 3
| 1
| 55
| 6
| 46
| 3
| 8
| 0.08
| 1
| 9
| 2
| 0
| 2
| 3
| 2
| 12
| 113
| 14
| 92
| 26
| 79
| 7
| 36
| 16
| 33
| 12
| 1
| 3
| 16
|
4,492
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/perceiver/modeling_perceiver.py
|
transformers.models.perceiver.modeling_perceiver.PerceiverForImageClassificationConvProcessing
|
import torch
from typing import Any, Callable, Optional, Union
from ...utils import ModelOutput, auto_docstring, logging, torch_int
@auto_docstring(custom_intro='\n Example use of Perceiver for image classification, for tasks such as ImageNet.\n\n This model uses a 2D conv+maxpool preprocessing network. As shown in the paper, this model can achieve a top-1 accuracy\n of 82.1 on ImageNet.\n\n [`PerceiverForImageClassificationLearned`] uses [`~models.perceiver.modeling_perceiver.PerceiverImagePreprocessor`]\n (with `prep_type="conv"`) to preprocess the input images, and\n [`~models.perceiver.modeling_perceiver.PerceiverClassificationDecoder`] to decode the latent representation of\n [`PerceiverModel`] into classification logits.\n ')
class PerceiverForImageClassificationConvProcessing(PerceiverPreTrainedModel):
def __init__(self, config):
super().__init__(config)
fourier_position_encoding_kwargs_preprocessor = {'concat_pos': True, 'max_resolution': (56, 56), 'num_bands': 64, 'sine_only': False}
trainable_position_encoding_kwargs_decoder = {'num_channels': config.d_latents, 'index_dims': 1}
self.num_labels = config.num_labels
self.perceiver = PerceiverModel(config, input_preprocessor=PerceiverImagePreprocessor(config, prep_type='conv', spatial_downsample=1, position_encoding_type='fourier', fourier_position_encoding_kwargs=fourier_position_encoding_kwargs_preprocessor), decoder=PerceiverClassificationDecoder(config, num_channels=config.d_latents, trainable_position_encoding_kwargs=trainable_position_encoding_kwargs_decoder, use_query_residual=True))
self.post_init()
@auto_docstring
def forward(self, inputs: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, labels: Optional[torch.Tensor]=None, return_dict: Optional[bool]=None, pixel_values: Optional[torch.Tensor]=None) -> Union[tuple, PerceiverClassifierOutput]:
"""
inputs (`torch.FloatTensor`):
Inputs to the perceiver. Can be anything: images, text, audio, video, etc.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> from transformers import AutoImageProcessor, PerceiverForImageClassificationConvProcessing
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("deepmind/vision-perceiver-conv")
>>> model = PerceiverForImageClassificationConvProcessing.from_pretrained("deepmind/vision-perceiver-conv")
>>> inputs = image_processor(images=image, return_tensors="pt").pixel_values
>>> outputs = model(inputs=inputs)
>>> logits = outputs.logits
>>> list(logits.shape)
[1, 1000]
>>> # model predicts one of the 1000 ImageNet classes
>>> predicted_class_idx = logits.argmax(-1).item()
>>> print("Predicted class:", model.config.id2label[predicted_class_idx])
Predicted class: tabby, tabby cat
```"""
if inputs is not None and pixel_values is not None:
raise ValueError('You cannot use both `inputs` and `pixel_values`')
elif inputs is None and pixel_values is not None:
inputs = pixel_values
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.perceiver(inputs=inputs, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
logits = outputs.logits if return_dict else outputs[0]
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config)
if not return_dict:
output = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return PerceiverClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions)
|
@auto_docstring(custom_intro='\n Example use of Perceiver for image classification, for tasks such as ImageNet.\n\n This model uses a 2D conv+maxpool preprocessing network. As shown in the paper, this model can achieve a top-1 accuracy\n of 82.1 on ImageNet.\n\n [`PerceiverForImageClassificationLearned`] uses [`~models.perceiver.modeling_perceiver.PerceiverImagePreprocessor`]\n (with `prep_type="conv"`) to preprocess the input images, and\n [`~models.perceiver.modeling_perceiver.PerceiverClassificationDecoder`] to decode the latent representation of\n [`PerceiverModel`] into classification logits.\n ')
class PerceiverForImageClassificationConvProcessing(PerceiverPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, inputs: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, labels: Optional[torch.Tensor]=None, return_dict: Optional[bool]=None, pixel_values: Optional[torch.Tensor]=None) -> Union[tuple, PerceiverClassifierOutput]:
'''
inputs (`torch.FloatTensor`):
Inputs to the perceiver. Can be anything: images, text, audio, video, etc.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> from transformers import AutoImageProcessor, PerceiverForImageClassificationConvProcessing
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("deepmind/vision-perceiver-conv")
>>> model = PerceiverForImageClassificationConvProcessing.from_pretrained("deepmind/vision-perceiver-conv")
>>> inputs = image_processor(images=image, return_tensors="pt").pixel_values
>>> outputs = model(inputs=inputs)
>>> logits = outputs.logits
>>> list(logits.shape)
[1, 1000]
>>> # model predicts one of the 1000 ImageNet classes
>>> predicted_class_idx = logits.argmax(-1).item()
>>> print("Predicted class:", model.config.id2label[predicted_class_idx])
Predicted class: tabby, tabby cat
```'''
pass
| 5
| 1
| 62
| 8
| 42
| 13
| 8
| 0.3
| 1
| 9
| 4
| 0
| 2
| 2
| 2
| 3
| 128
| 16
| 86
| 23
| 71
| 26
| 35
| 12
| 32
| 15
| 2
| 3
| 16
|
4,493
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/perceiver/modeling_perceiver.py
|
transformers.models.perceiver.modeling_perceiver.PerceiverForImageClassificationFourier
|
from ...utils import ModelOutput, auto_docstring, logging, torch_int
import torch
from typing import Any, Callable, Optional, Union
@auto_docstring(custom_intro='\n Example use of Perceiver for image classification, for tasks such as ImageNet.\n\n This model uses fixed 2D Fourier position embeddings. As shown in the paper, this model can achieve a top-1 accuracy of\n 79.0 on ImageNet, and 84.5 when pre-trained on a large-scale dataset (i.e. JFT).\n\n [`PerceiverForImageClassificationLearned`] uses [`~models.perceiver.modeling_perceiver.PerceiverImagePreprocessor`]\n (with `prep_type="pixels"`) to preprocess the input images, and\n [`~models.perceiver.modeling_perceiver.PerceiverClassificationDecoder`] to decode the latent representation of\n [`PerceiverModel`] into classification logits.\n ')
class PerceiverForImageClassificationFourier(PerceiverPreTrainedModel):
def __init__(self, config):
super().__init__(config)
fourier_position_encoding_kwargs_preprocessor = {'concat_pos': True, 'max_resolution': (224, 224), 'num_bands': 64, 'sine_only': False}
trainable_position_encoding_kwargs_decoder = {'num_channels': config.d_latents, 'index_dims': 1}
self.num_labels = config.num_labels
self.perceiver = PerceiverModel(config, input_preprocessor=PerceiverImagePreprocessor(config, prep_type='pixels', spatial_downsample=1, fourier_position_encoding_kwargs=fourier_position_encoding_kwargs_preprocessor), decoder=PerceiverClassificationDecoder(config, num_channels=config.d_latents, trainable_position_encoding_kwargs=trainable_position_encoding_kwargs_decoder, use_query_residual=True))
self.post_init()
@auto_docstring
def forward(self, inputs: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, labels: Optional[torch.Tensor]=None, return_dict: Optional[bool]=None, pixel_values: Optional[torch.Tensor]=None) -> Union[tuple, PerceiverClassifierOutput]:
"""
inputs (`torch.FloatTensor`):
Inputs to the perceiver. Can be anything: images, text, audio, video, etc.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> from transformers import AutoImageProcessor, PerceiverForImageClassificationFourier
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("deepmind/vision-perceiver-fourier")
>>> model = PerceiverForImageClassificationFourier.from_pretrained("deepmind/vision-perceiver-fourier")
>>> inputs = image_processor(images=image, return_tensors="pt").pixel_values
>>> outputs = model(inputs=inputs)
>>> logits = outputs.logits
>>> list(logits.shape)
[1, 1000]
>>> # model predicts one of the 1000 ImageNet classes
>>> predicted_class_idx = logits.argmax(-1).item()
>>> print("Predicted class:", model.config.id2label[predicted_class_idx])
Predicted class: tabby, tabby cat
```"""
if inputs is not None and pixel_values is not None:
raise ValueError('You cannot use both `inputs` and `pixel_values`')
elif inputs is None and pixel_values is not None:
inputs = pixel_values
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.perceiver(inputs=inputs, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
logits = outputs.logits if return_dict else outputs[0]
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config)
if not return_dict:
output = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return PerceiverClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions)
|
@auto_docstring(custom_intro='\n Example use of Perceiver for image classification, for tasks such as ImageNet.\n\n This model uses fixed 2D Fourier position embeddings. As shown in the paper, this model can achieve a top-1 accuracy of\n 79.0 on ImageNet, and 84.5 when pre-trained on a large-scale dataset (i.e. JFT).\n\n [`PerceiverForImageClassificationLearned`] uses [`~models.perceiver.modeling_perceiver.PerceiverImagePreprocessor`]\n (with `prep_type="pixels"`) to preprocess the input images, and\n [`~models.perceiver.modeling_perceiver.PerceiverClassificationDecoder`] to decode the latent representation of\n [`PerceiverModel`] into classification logits.\n ')
class PerceiverForImageClassificationFourier(PerceiverPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, inputs: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, labels: Optional[torch.Tensor]=None, return_dict: Optional[bool]=None, pixel_values: Optional[torch.Tensor]=None) -> Union[tuple, PerceiverClassifierOutput]:
'''
inputs (`torch.FloatTensor`):
Inputs to the perceiver. Can be anything: images, text, audio, video, etc.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> from transformers import AutoImageProcessor, PerceiverForImageClassificationFourier
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("deepmind/vision-perceiver-fourier")
>>> model = PerceiverForImageClassificationFourier.from_pretrained("deepmind/vision-perceiver-fourier")
>>> inputs = image_processor(images=image, return_tensors="pt").pixel_values
>>> outputs = model(inputs=inputs)
>>> logits = outputs.logits
>>> list(logits.shape)
[1, 1000]
>>> # model predicts one of the 1000 ImageNet classes
>>> predicted_class_idx = logits.argmax(-1).item()
>>> print("Predicted class:", model.config.id2label[predicted_class_idx])
Predicted class: tabby, tabby cat
```'''
pass
| 5
| 1
| 62
| 8
| 41
| 13
| 8
| 0.31
| 1
| 9
| 4
| 0
| 2
| 2
| 2
| 3
| 127
| 16
| 85
| 23
| 70
| 26
| 35
| 12
| 32
| 15
| 2
| 3
| 16
|
4,494
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/perceiver/modeling_perceiver.py
|
transformers.models.perceiver.modeling_perceiver.PerceiverForImageClassificationLearned
|
import torch
from typing import Any, Callable, Optional, Union
from ...utils import ModelOutput, auto_docstring, logging, torch_int
@auto_docstring(custom_intro='\n Example use of Perceiver for image classification, for tasks such as ImageNet.\n\n This model uses learned position embeddings. In other words, this model is not given any privileged information about\n the structure of images. As shown in the paper, this model can achieve a top-1 accuracy of 72.7 on ImageNet.\n\n [`PerceiverForImageClassificationLearned`] uses [`~models.perceiver.modeling_perceiver.PerceiverImagePreprocessor`]\n (with `prep_type="conv1x1"`) to preprocess the input images, and\n [`~models.perceiver.modeling_perceiver.PerceiverClassificationDecoder`] to decode the latent representation of\n [`PerceiverModel`] into classification logits.\n ')
class PerceiverForImageClassificationLearned(PerceiverPreTrainedModel):
def __init__(self, config):
super().__init__(config)
trainable_position_encoding_kwargs_preprocessor = {'num_channels': 256, 'index_dims': config.image_size ** 2}
trainable_position_encoding_kwargs_decoder = {'num_channels': config.d_latents, 'index_dims': 1}
self.num_labels = config.num_labels
self.perceiver = PerceiverModel(config, input_preprocessor=PerceiverImagePreprocessor(config, prep_type='conv1x1', spatial_downsample=1, out_channels=256, position_encoding_type='trainable', concat_or_add_pos='concat', project_pos_dim=256, trainable_position_encoding_kwargs=trainable_position_encoding_kwargs_preprocessor), decoder=PerceiverClassificationDecoder(config, num_channels=config.d_latents, trainable_position_encoding_kwargs=trainable_position_encoding_kwargs_decoder, use_query_residual=True))
self.post_init()
@auto_docstring
def forward(self, inputs: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, labels: Optional[torch.Tensor]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None, pixel_values: Optional[torch.Tensor]=None) -> Union[tuple, PerceiverClassifierOutput]:
"""
inputs (`torch.FloatTensor`):
Inputs to the perceiver. Can be anything: images, text, audio, video, etc.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> from transformers import AutoImageProcessor, PerceiverForImageClassificationLearned
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("deepmind/vision-perceiver-learned")
>>> model = PerceiverForImageClassificationLearned.from_pretrained("deepmind/vision-perceiver-learned")
>>> inputs = image_processor(images=image, return_tensors="pt").pixel_values
>>> outputs = model(inputs=inputs)
>>> logits = outputs.logits
>>> list(logits.shape)
[1, 1000]
>>> # model predicts one of the 1000 ImageNet classes
>>> predicted_class_idx = logits.argmax(-1).item()
>>> print("Predicted class:", model.config.id2label[predicted_class_idx])
Predicted class: tabby, tabby cat
```"""
if inputs is not None and pixel_values is not None:
raise ValueError('You cannot use both `inputs` and `pixel_values`')
elif inputs is None and pixel_values is not None:
inputs = pixel_values
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.perceiver(inputs=inputs, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict)
logits = outputs.logits if return_dict else outputs[0]
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config)
if not return_dict:
output = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return PerceiverClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions)
|
@auto_docstring(custom_intro='\n Example use of Perceiver for image classification, for tasks such as ImageNet.\n\n This model uses learned position embeddings. In other words, this model is not given any privileged information about\n the structure of images. As shown in the paper, this model can achieve a top-1 accuracy of 72.7 on ImageNet.\n\n [`PerceiverForImageClassificationLearned`] uses [`~models.perceiver.modeling_perceiver.PerceiverImagePreprocessor`]\n (with `prep_type="conv1x1"`) to preprocess the input images, and\n [`~models.perceiver.modeling_perceiver.PerceiverClassificationDecoder`] to decode the latent representation of\n [`PerceiverModel`] into classification logits.\n ')
class PerceiverForImageClassificationLearned(PerceiverPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, inputs: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, labels: Optional[torch.Tensor]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None, pixel_values: Optional[torch.Tensor]=None) -> Union[tuple, PerceiverClassifierOutput]:
'''
inputs (`torch.FloatTensor`):
Inputs to the perceiver. Can be anything: images, text, audio, video, etc.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> from transformers import AutoImageProcessor, PerceiverForImageClassificationLearned
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("deepmind/vision-perceiver-learned")
>>> model = PerceiverForImageClassificationLearned.from_pretrained("deepmind/vision-perceiver-learned")
>>> inputs = image_processor(images=image, return_tensors="pt").pixel_values
>>> outputs = model(inputs=inputs)
>>> logits = outputs.logits
>>> list(logits.shape)
[1, 1000]
>>> # model predicts one of the 1000 ImageNet classes
>>> predicted_class_idx = logits.argmax(-1).item()
>>> print("Predicted class:", model.config.id2label[predicted_class_idx])
Predicted class: tabby, tabby cat
```'''
pass
| 5
| 1
| 63
| 8
| 42
| 13
| 8
| 0.3
| 1
| 9
| 4
| 0
| 2
| 2
| 2
| 3
| 129
| 17
| 86
| 24
| 70
| 26
| 35
| 12
| 32
| 15
| 2
| 3
| 16
|
4,495
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/perceiver/modeling_perceiver.py
|
transformers.models.perceiver.modeling_perceiver.PerceiverForMaskedLM
|
from .configuration_perceiver import PerceiverConfig
import torch
from ...utils import ModelOutput, auto_docstring, logging, torch_int
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from typing import Any, Callable, Optional, Union
@auto_docstring(custom_intro='\n Example use of Perceiver for masked language modeling.\n ')
class PerceiverForMaskedLM(PerceiverPreTrainedModel):
def __init__(self, config: PerceiverConfig):
super().__init__(config)
text_preprocessor = PerceiverTextPreprocessor(config)
trainable_position_encoding_kwargs_decoder = {'num_channels': text_preprocessor.num_channels, 'index_dims': config.max_position_embeddings}
self.perceiver = PerceiverModel(config, input_preprocessor=text_preprocessor, decoder=PerceiverBasicDecoder(config, output_num_channels=config.d_latents, output_index_dims=config.max_position_embeddings, num_channels=text_preprocessor.num_channels, qk_channels=8 * 32, v_channels=text_preprocessor.num_channels, num_heads=8, use_query_residual=False, final_project=False, trainable_position_encoding_kwargs=trainable_position_encoding_kwargs_decoder))
self.embedding_decoder = PerceiverEmbeddingDecoder(config)
self.post_init()
@auto_docstring
def forward(self, inputs: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, labels: Optional[torch.Tensor]=None, return_dict: Optional[bool]=None, input_ids: Optional[torch.Tensor]=None) -> Union[tuple, PerceiverMaskedLMOutput]:
"""
inputs (`torch.FloatTensor`):
Inputs to the perceiver. Can be anything: images, text, audio, video, etc.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
Examples:
```python
>>> from transformers import AutoTokenizer, PerceiverForMaskedLM
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("deepmind/language-perceiver")
>>> model = PerceiverForMaskedLM.from_pretrained("deepmind/language-perceiver")
>>> # training
>>> text = "This is an incomplete sentence where some words are missing."
>>> inputs = tokenizer(text, padding="max_length", return_tensors="pt")
>>> # mask " missing."
>>> inputs["input_ids"][0, 52:61] = tokenizer.mask_token_id
>>> labels = tokenizer(text, padding="max_length", return_tensors="pt").input_ids
>>> outputs = model(**inputs, labels=labels)
>>> loss = outputs.loss
>>> round(loss.item(), 2)
19.87
>>> logits = outputs.logits
>>> list(logits.shape)
[1, 2048, 262]
>>> # inference
>>> text = "This is an incomplete sentence where some words are missing."
>>> encoding = tokenizer(text, padding="max_length", return_tensors="pt")
>>> # mask bytes corresponding to " missing.". Note that the model performs much better if the masked span starts with a space.
>>> encoding["input_ids"][0, 52:61] = tokenizer.mask_token_id
>>> # forward pass
>>> with torch.no_grad():
... outputs = model(**encoding)
>>> logits = outputs.logits
>>> list(logits.shape)
[1, 2048, 262]
>>> masked_tokens_predictions = logits[0, 52:61].argmax(dim=-1).tolist()
>>> tokenizer.decode(masked_tokens_predictions)
' missing.'
```"""
if inputs is not None and input_ids is not None:
raise ValueError('You cannot use both `inputs` and `input_ids`')
elif inputs is None and input_ids is not None:
inputs = input_ids
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.perceiver(inputs=inputs, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
logits = self.embedding_decoder(outputs.logits if return_dict else outputs[0], embedding_layer=self.perceiver.input_preprocessor.embeddings)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return (masked_lm_loss,) + output if masked_lm_loss is not None else output
return PerceiverMaskedLMOutput(loss=masked_lm_loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions)
|
@auto_docstring(custom_intro='\n Example use of Perceiver for masked language modeling.\n ')
class PerceiverForMaskedLM(PerceiverPreTrainedModel):
def __init__(self, config: PerceiverConfig):
pass
@auto_docstring
def forward(self, inputs: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, labels: Optional[torch.Tensor]=None, return_dict: Optional[bool]=None, input_ids: Optional[torch.Tensor]=None) -> Union[tuple, PerceiverMaskedLMOutput]:
'''
inputs (`torch.FloatTensor`):
Inputs to the perceiver. Can be anything: images, text, audio, video, etc.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
Examples:
```python
>>> from transformers import AutoTokenizer, PerceiverForMaskedLM
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("deepmind/language-perceiver")
>>> model = PerceiverForMaskedLM.from_pretrained("deepmind/language-perceiver")
>>> # training
>>> text = "This is an incomplete sentence where some words are missing."
>>> inputs = tokenizer(text, padding="max_length", return_tensors="pt")
>>> # mask " missing."
>>> inputs["input_ids"][0, 52:61] = tokenizer.mask_token_id
>>> labels = tokenizer(text, padding="max_length", return_tensors="pt").input_ids
>>> outputs = model(**inputs, labels=labels)
>>> loss = outputs.loss
>>> round(loss.item(), 2)
19.87
>>> logits = outputs.logits
>>> list(logits.shape)
[1, 2048, 262]
>>> # inference
>>> text = "This is an incomplete sentence where some words are missing."
>>> encoding = tokenizer(text, padding="max_length", return_tensors="pt")
>>> # mask bytes corresponding to " missing.". Note that the model performs much better if the masked span starts with a space.
>>> encoding["input_ids"][0, 52:61] = tokenizer.mask_token_id
>>> # forward pass
>>> with torch.no_grad():
... outputs = model(**encoding)
>>> logits = outputs.logits
>>> list(logits.shape)
[1, 2048, 262]
>>> masked_tokens_predictions = logits[0, 52:61].argmax(dim=-1).tolist()
>>> tokenizer.decode(masked_tokens_predictions)
' missing.'
```'''
pass
| 5
| 1
| 64
| 11
| 33
| 22
| 5
| 0.62
| 1
| 10
| 6
| 0
| 2
| 2
| 2
| 3
| 132
| 22
| 69
| 23
| 54
| 43
| 23
| 12
| 20
| 8
| 2
| 1
| 9
|
4,496
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/perceiver/modeling_perceiver.py
|
transformers.models.perceiver.modeling_perceiver.PerceiverForMultimodalAutoencoding
|
from ...utils import ModelOutput, auto_docstring, logging, torch_int
import torch
from typing import Any, Callable, Optional, Union
from .configuration_perceiver import PerceiverConfig
@auto_docstring(custom_intro='\n Example use of Perceiver for multimodal (video) autoencoding, for tasks such as Kinetics-700.\n\n [`PerceiverForMultimodalAutoencoding`] uses [`~models.perceiver.modeling_perceiver.PerceiverMultimodalPreprocessor`] to\n preprocess the 3 modalities: images, audio and class labels. This preprocessor uses modality-specific preprocessors to\n preprocess every modality separately, after which they are concatenated. Trainable position embeddings are used to pad\n each modality to the same number of channels to make concatenation along the time dimension possible. Next, one applies\n the Perceiver encoder.\n\n [`~models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder`] is used to decode the latent representation of\n [`PerceiverModel`]. This decoder uses each modality-specific decoder to construct queries. The decoder queries are\n created based on the inputs after preprocessing. However, autoencoding an entire video in a single forward pass is\n computationally infeasible, hence one only uses parts of the decoder queries to do cross-attention with the latent\n representation. This is determined by the subsampled indices for each modality, which can be provided as additional\n input to the forward pass of [`PerceiverForMultimodalAutoencoding`].\n\n [`~models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder`] also pads the decoder queries of the different\n modalities to the same number of channels, in order to concatenate them along the time dimension. Next, cross-attention\n is performed with the latent representation of [`PerceiverModel`].\n\n Finally, [`~models.perceiver.modeling_perceiver.PerceiverMultiModalPostprocessor`] is used to turn this tensor into an\n actual video. It first splits up the output into the different modalities, and then applies the respective\n postprocessor for each modality.\n\n Note that, by masking the classification label during evaluation (i.e. simply providing a tensor of zeros for the\n "label" modality), this auto-encoding model becomes a Kinetics 700 video classifier.\n ')
class PerceiverForMultimodalAutoencoding(PerceiverPreTrainedModel):
def __init__(self, config: PerceiverConfig):
super().__init__(config)
n_audio_samples = config.num_frames * config.audio_samples_per_frame
input_preprocessor = PerceiverMultimodalPreprocessor(min_padding_size=4, modalities={'audio': PerceiverAudioPreprocessor(config, position_encoding_type='fourier', fourier_position_encoding_kwargs={'num_bands': 192, 'max_resolution': (n_audio_samples,), 'sine_only': False, 'concat_pos': True}, prep_type='patches', samples_per_patch=config.samples_per_patch), 'image': PerceiverImagePreprocessor(config, position_encoding_type='fourier', fourier_position_encoding_kwargs={'num_bands': 32, 'max_resolution': (config.num_frames, config.image_size, config.image_size), 'sine_only': False, 'concat_pos': True}, prep_type='patches', spatial_downsample=4, temporal_downsample=1), 'label': PerceiverOneHotPreprocessor(config)}, mask_probs={'image': 0.0, 'audio': 0.0, 'label': 1.0})
image_decoder = PerceiverBasicVideoAutoencodingDecoder(config, concat_preprocessed_input=False, output_shape=config.output_shape, output_num_channels=config.output_num_channels, use_query_residual=False, position_encoding_only=True, position_encoding_type='fourier', fourier_position_encoding_kwargs={'num_bands': 32, 'max_resolution': (config.num_frames, config.image_size, config.image_size), 'sine_only': False, 'concat_pos': True})
decoder = PerceiverMultimodalDecoder(config, concat_preprocessed_input=False, modalities={'audio': PerceiverBasicDecoder(config, concat_preprocessed_input=False, output_index_dims=(n_audio_samples // config.samples_per_patch,), output_num_channels=config.output_num_channels, use_query_residual=False, position_encoding_only=True, position_encoding_type='fourier', fourier_position_encoding_kwargs={'num_bands': 192, 'max_resolution': (n_audio_samples,), 'sine_only': False, 'concat_pos': True}), 'image': image_decoder, 'label': PerceiverClassificationDecoder(config, concat_preprocessed_input=False, use_query_residual=False, position_encoding_only=True, position_encoding_type='trainable', trainable_position_encoding_kwargs={'num_channels': config._label_trainable_num_channels, 'index_dims': 1})}, num_outputs=None, output_num_channels=config.output_num_channels, use_query_residual=False)
output_postprocessor = PerceiverMultimodalPostprocessor(modalities={'audio': PerceiverAudioPostprocessor(config, in_channels=config.output_num_channels), 'image': PerceiverProjectionPostprocessor(in_channels=config.output_num_channels, out_channels=3), 'label': PerceiverClassificationPostprocessor(config, in_channels=config.output_num_channels)})
self.perceiver = PerceiverModel(config, input_preprocessor=input_preprocessor, decoder=decoder, output_postprocessor=output_postprocessor)
self.post_init()
@auto_docstring
def forward(self, inputs: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, subsampled_output_points: Optional[dict[str, torch.Tensor]]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, labels: Optional[torch.Tensor]=None, return_dict: Optional[bool]=None) -> Union[tuple, PerceiverClassifierOutput]:
"""
inputs (`torch.FloatTensor`):
Inputs to the perceiver. Can be anything: images, text, audio, video, etc.
subsampled_output_points (`dict[str, torch.Tensor]`, *optional*):
Dictionary of tensors used as queries for the decoder. The decoder maps these queries to the latent
representation of the model. Used for subsampled decoding, e.g. when only decoding certain image patches.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> from transformers import PerceiverForMultimodalAutoencoding
>>> import torch
>>> import numpy as np
>>> # create multimodal inputs
>>> images = torch.randn((1, 16, 3, 224, 224))
>>> audio = torch.randn((1, 30720, 1))
>>> inputs = dict(image=images, audio=audio, label=torch.zeros((images.shape[0], 700)))
>>> model = PerceiverForMultimodalAutoencoding.from_pretrained("deepmind/multimodal-perceiver")
>>> # in the Perceiver IO paper, videos are auto-encoded in chunks
>>> # each chunk subsamples different index dimensions of the image and audio modality decoder queries
>>> nchunks = 128
>>> image_chunk_size = np.prod((16, 224, 224)) // nchunks
>>> audio_chunk_size = audio.shape[1] // model.config.samples_per_patch // nchunks
>>> # process the first chunk
>>> chunk_idx = 0
>>> subsampling = {
... "image": torch.arange(image_chunk_size * chunk_idx, image_chunk_size * (chunk_idx + 1)),
... "audio": torch.arange(audio_chunk_size * chunk_idx, audio_chunk_size * (chunk_idx + 1)),
... "label": None,
... }
>>> outputs = model(inputs=inputs, subsampled_output_points=subsampling)
>>> logits = outputs.logits
>>> list(logits["audio"].shape)
[1, 240]
>>> list(logits["image"].shape)
[1, 6272, 3]
>>> list(logits["label"].shape)
[1, 700]
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
loss = None
if labels is not None:
raise NotImplementedError('Multimodal autoencoding training is not yet supported')
outputs = self.perceiver(inputs=inputs, attention_mask=attention_mask, subsampled_output_points=subsampled_output_points, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
logits = outputs.logits if return_dict else outputs[0]
if not return_dict:
output = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return PerceiverClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions)
| null | 5
| 1
| 100
| 10
| 68
| 22
| 4
| 0.32
| 1
| 20
| 15
| 0
| 2
| 1
| 2
| 3
| 203
| 21
| 138
| 24
| 123
| 44
| 21
| 13
| 18
| 6
| 2
| 1
| 7
|
4,497
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/perceiver/modeling_perceiver.py
|
transformers.models.perceiver.modeling_perceiver.PerceiverForOpticalFlow
|
import torch
from typing import Any, Callable, Optional, Union
from ...utils import ModelOutput, auto_docstring, logging, torch_int
@auto_docstring(custom_intro='\n Example use of Perceiver for optical flow, for tasks such as Sintel and KITTI. [`PerceiverForOpticalFlow`] uses\n [`~models.perceiver.modeling_perceiver.PerceiverImagePreprocessor`] (with *prep_type="patches"*) to preprocess the\n input images, and [`~models.perceiver.modeling_perceiver.PerceiverOpticalFlowDecoder`] to decode the latent\n representation of [`PerceiverModel`].\n\n As input, one concatenates 2 subsequent frames along the channel dimension and extract a 3 x 3 patch around each pixel\n (leading to 3 x 3 x 3 x 2 = 54 values for each pixel). Fixed Fourier position encodings are used to encode the position\n of each pixel in the patch. Next, one applies the Perceiver encoder. To decode, one queries the latent representation\n using the same encoding used for the input.\n ')
class PerceiverForOpticalFlow(PerceiverPreTrainedModel):
def __init__(self, config):
super().__init__(config)
fourier_position_encoding_kwargs_preprocessor = {'num_bands': 64, 'max_resolution': config.train_size, 'sine_only': False, 'concat_pos': True}
fourier_position_encoding_kwargs_decoder = {'concat_pos': True, 'max_resolution': config.train_size, 'num_bands': 64, 'sine_only': False}
image_preprocessor = PerceiverImagePreprocessor(config, prep_type='patches', spatial_downsample=1, conv_after_patching=True, conv_after_patching_in_channels=54, temporal_downsample=2, position_encoding_type='fourier', fourier_position_encoding_kwargs=fourier_position_encoding_kwargs_preprocessor)
self.perceiver = PerceiverModel(config, input_preprocessor=image_preprocessor, decoder=PerceiverOpticalFlowDecoder(config, num_channels=image_preprocessor.num_channels, output_image_shape=config.train_size, rescale_factor=100.0, use_query_residual=False, output_num_channels=2, position_encoding_type='fourier', fourier_position_encoding_kwargs=fourier_position_encoding_kwargs_decoder))
self.post_init()
@auto_docstring
def forward(self, inputs: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, labels: Optional[torch.Tensor]=None, return_dict: Optional[bool]=None) -> Union[tuple, PerceiverClassifierOutput]:
"""
inputs (`torch.FloatTensor`):
Inputs to the perceiver. Can be anything: images, text, audio, video, etc.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the optical flow loss. Indices should be in `[0, ..., config.num_labels - 1]`.
Examples:
```python
>>> from transformers import PerceiverForOpticalFlow
>>> import torch
>>> model = PerceiverForOpticalFlow.from_pretrained("deepmind/optical-flow-perceiver")
>>> # in the Perceiver IO paper, the authors extract a 3 x 3 patch around each pixel,
>>> # leading to 3 x 3 x 3 = 27 values for each pixel (as each pixel also has 3 color channels)
>>> # patches have shape (batch_size, num_frames, num_channels, height, width)
>>> # the authors train on resolutions of 368 x 496
>>> patches = torch.randn(1, 2, 27, 368, 496)
>>> outputs = model(inputs=patches)
>>> logits = outputs.logits
>>> list(logits.shape)
[1, 368, 496, 2]
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
loss = None
if labels is not None:
raise NotImplementedError('Optical flow training is not yet supported')
outputs = self.perceiver(inputs=inputs, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
logits = outputs.logits if return_dict else outputs[0]
if not return_dict:
output = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return PerceiverClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions)
|
@auto_docstring(custom_intro='\n Example use of Perceiver for optical flow, for tasks such as Sintel and KITTI. [`PerceiverForOpticalFlow`] uses\n [`~models.perceiver.modeling_perceiver.PerceiverImagePreprocessor`] (with *prep_type="patches"*) to preprocess the\n input images, and [`~models.perceiver.modeling_perceiver.PerceiverOpticalFlowDecoder`] to decode the latent\n representation of [`PerceiverModel`].\n\n As input, one concatenates 2 subsequent frames along the channel dimension and extract a 3 x 3 patch around each pixel\n (leading to 3 x 3 x 3 x 2 = 54 values for each pixel). Fixed Fourier position encodings are used to encode the position\n of each pixel in the patch. Next, one applies the Perceiver encoder. To decode, one queries the latent representation\n using the same encoding used for the input.\n ')
class PerceiverForOpticalFlow(PerceiverPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, inputs: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, labels: Optional[torch.Tensor]=None, return_dict: Optional[bool]=None) -> Union[tuple, PerceiverClassifierOutput]:
'''
inputs (`torch.FloatTensor`):
Inputs to the perceiver. Can be anything: images, text, audio, video, etc.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the optical flow loss. Indices should be in `[0, ..., config.num_labels - 1]`.
Examples:
```python
>>> from transformers import PerceiverForOpticalFlow
>>> import torch
>>> model = PerceiverForOpticalFlow.from_pretrained("deepmind/optical-flow-perceiver")
>>> # in the Perceiver IO paper, the authors extract a 3 x 3 patch around each pixel,
>>> # leading to 3 x 3 x 3 = 27 values for each pixel (as each pixel also has 3 color channels)
>>> # patches have shape (batch_size, num_frames, num_channels, height, width)
>>> # the authors train on resolutions of 368 x 496
>>> patches = torch.randn(1, 2, 27, 368, 496)
>>> outputs = model(inputs=patches)
>>> logits = outputs.logits
>>> list(logits.shape)
[1, 368, 496, 2]
```'''
pass
| 5
| 1
| 55
| 7
| 36
| 12
| 4
| 0.32
| 1
| 8
| 4
| 0
| 2
| 1
| 2
| 3
| 113
| 14
| 75
| 21
| 61
| 24
| 19
| 11
| 16
| 6
| 2
| 1
| 7
|
4,498
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/perceiver/modeling_perceiver.py
|
transformers.models.perceiver.modeling_perceiver.PerceiverForSequenceClassification
|
import torch
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from typing import Any, Callable, Optional, Union
from ...utils import ModelOutput, auto_docstring, logging, torch_int
@auto_docstring(custom_intro='\n Example use of Perceiver for text classification.\n ')
class PerceiverForSequenceClassification(PerceiverPreTrainedModel):
def __init__(self, config):
super().__init__(config)
trainable_position_encoding_kwargs_decoder = {'num_channels': config.d_latents, 'index_dims': 1}
self.num_labels = config.num_labels
self.perceiver = PerceiverModel(config, input_preprocessor=PerceiverTextPreprocessor(config), decoder=PerceiverClassificationDecoder(config, num_channels=config.d_latents, trainable_position_encoding_kwargs=trainable_position_encoding_kwargs_decoder, use_query_residual=True))
self.post_init()
@auto_docstring
def forward(self, inputs: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, labels: Optional[torch.Tensor]=None, return_dict: Optional[bool]=None, input_ids: Optional[torch.Tensor]=None) -> Union[tuple, PerceiverClassifierOutput]:
"""
inputs (`torch.FloatTensor`):
Inputs to the perceiver. Can be anything: images, text, audio, video, etc.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the classification/regression loss. Indices should be in `[0, ..., config.num_labels -
1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels >
1` a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> from transformers import AutoTokenizer, PerceiverForSequenceClassification
>>> tokenizer = AutoTokenizer.from_pretrained("deepmind/language-perceiver")
>>> model = PerceiverForSequenceClassification.from_pretrained("deepmind/language-perceiver")
>>> text = "hello world"
>>> inputs = tokenizer(text, return_tensors="pt").input_ids
>>> outputs = model(inputs=inputs)
>>> logits = outputs.logits
>>> list(logits.shape)
[1, 2]
```"""
if inputs is not None and input_ids is not None:
raise ValueError('You cannot use both `inputs` and `input_ids`')
elif inputs is None and input_ids is not None:
inputs = input_ids
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.perceiver(inputs=inputs, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
logits = outputs.logits if return_dict else outputs[0]
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = 'single_label_classification'
else:
self.config.problem_type = 'multi_label_classification'
if self.config.problem_type == 'regression':
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == 'single_label_classification':
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == 'multi_label_classification':
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return PerceiverClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions)
|
@auto_docstring(custom_intro='\n Example use of Perceiver for text classification.\n ')
class PerceiverForSequenceClassification(PerceiverPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, inputs: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, labels: Optional[torch.Tensor]=None, return_dict: Optional[bool]=None, input_ids: Optional[torch.Tensor]=None) -> Union[tuple, PerceiverClassifierOutput]:
'''
inputs (`torch.FloatTensor`):
Inputs to the perceiver. Can be anything: images, text, audio, video, etc.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the classification/regression loss. Indices should be in `[0, ..., config.num_labels -
1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels >
1` a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> from transformers import AutoTokenizer, PerceiverForSequenceClassification
>>> tokenizer = AutoTokenizer.from_pretrained("deepmind/language-perceiver")
>>> model = PerceiverForSequenceClassification.from_pretrained("deepmind/language-perceiver")
>>> text = "hello world"
>>> inputs = tokenizer(text, return_tensors="pt").input_ids
>>> outputs = model(inputs=inputs)
>>> logits = outputs.logits
>>> list(logits.shape)
[1, 2]
```'''
pass
| 5
| 1
| 53
| 8
| 36
| 10
| 8
| 0.26
| 1
| 9
| 4
| 0
| 2
| 2
| 2
| 3
| 109
| 16
| 74
| 22
| 59
| 19
| 34
| 11
| 31
| 15
| 2
| 3
| 16
|
4,499
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/perceiver/modeling_perceiver.py
|
transformers.models.perceiver.modeling_perceiver.PerceiverFourierPositionEncoding
|
import torch
from typing import Any, Callable, Optional, Union
class PerceiverFourierPositionEncoding(PerceiverAbstractPositionEncoding):
"""Fourier (Sinusoidal) position encoding."""
def __init__(self, num_bands, max_resolution, concat_pos=True, sine_only=False):
super().__init__()
self.num_bands = num_bands
self.max_resolution = max_resolution
self.concat_pos = concat_pos
self.sine_only = sine_only
@property
def num_dimensions(self) -> int:
return len(self.max_resolution)
def output_size(self):
"""Returns size of positional encodings last dimension."""
num_dims = len(self.max_resolution)
encoding_size = self.num_bands * num_dims
if not self.sine_only:
encoding_size *= 2
if self.concat_pos:
encoding_size += self.num_dimensions
return encoding_size
def forward(self, index_dims: list[int], batch_size: int, device: torch.device, dtype: torch.dtype, pos: Optional[torch.FloatTensor]=None) -> torch.FloatTensor:
pos = _check_or_build_spatial_positions(pos, index_dims, batch_size)
fourier_pos_enc = generate_fourier_features(pos, num_bands=self.num_bands, max_resolution=self.max_resolution, concat_pos=self.concat_pos, sine_only=self.sine_only).to(device=device, dtype=dtype)
return fourier_pos_enc
|
class PerceiverFourierPositionEncoding(PerceiverAbstractPositionEncoding):
'''Fourier (Sinusoidal) position encoding.'''
def __init__(self, num_bands, max_resolution, concat_pos=True, sine_only=False):
pass
@property
def num_dimensions(self) -> int:
pass
def output_size(self):
'''Returns size of positional encodings last dimension.'''
pass
def forward(self, index_dims: list[int], batch_size: int, device: torch.device, dtype: torch.dtype, pos: Optional[torch.FloatTensor]=None) -> torch.FloatTensor:
pass
| 6
| 2
| 9
| 0
| 8
| 0
| 2
| 0.06
| 1
| 2
| 0
| 0
| 4
| 4
| 4
| 37
| 42
| 5
| 35
| 20
| 22
| 2
| 21
| 12
| 16
| 3
| 4
| 1
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.