id
int64 0
328k
| repository_name
stringlengths 7
58
| file_path
stringlengths 9
302
| class_name
stringlengths 5
256
| human_written_code
stringlengths 16
2.16M
| class_skeleton
stringlengths 18
1.49M
⌀ | total_program_units
int64 1
1.76k
| total_doc_str
int64 0
771
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
297
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
168
| CountClassBase
float64 0
40
| CountClassCoupled
float64 0
583
| CountClassCoupledModified
float64 0
575
| CountClassDerived
float64 0
5.35k
| CountDeclInstanceMethod
float64 0
529
| CountDeclInstanceVariable
float64 0
296
| CountDeclMethod
float64 0
599
| CountDeclMethodAll
float64 0
1.12k
| CountLine
float64 1
40.4k
| CountLineBlank
float64 0
8.16k
| CountLineCode
float64 1
25.7k
| CountLineCodeDecl
float64 1
8.15k
| CountLineCodeExe
float64 0
24.2k
| CountLineComment
float64 0
16.5k
| CountStmt
float64 1
9.71k
| CountStmtDecl
float64 1
8.15k
| CountStmtExe
float64 0
9.69k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
2.9k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5,500
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swin2sr/modeling_swin2sr.py
|
transformers.models.swin2sr.modeling_swin2sr.Swin2SRSelfAttention
|
from typing import Optional, Union
import math
from torch import nn
import collections.abc
import torch
from ...pytorch_utils import find_pruneable_heads_and_indices, meshgrid, prune_linear_layer
class Swin2SRSelfAttention(nn.Module):
def __init__(self, config, dim, num_heads, window_size, pretrained_window_size=[0, 0]):
super().__init__()
if dim % num_heads != 0:
raise ValueError(f'The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})')
self.num_attention_heads = num_heads
self.attention_head_size = int(dim / num_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.window_size = window_size if isinstance(window_size, collections.abc.Iterable) else (window_size, window_size)
self.pretrained_window_size = pretrained_window_size
self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1))))
self.continuous_position_bias_mlp = nn.Sequential(nn.Linear(2, 512, bias=True), nn.ReLU(inplace=True), nn.Linear(512, num_heads, bias=False))
relative_coords_h = torch.arange(-(self.window_size[0] - 1), self.window_size[0], dtype=torch.int64).float()
relative_coords_w = torch.arange(-(self.window_size[1] - 1), self.window_size[1], dtype=torch.int64).float()
relative_coords_table = torch.stack(meshgrid([relative_coords_h, relative_coords_w], indexing='ij')).permute(1, 2, 0).contiguous().unsqueeze(0)
if pretrained_window_size[0] > 0:
relative_coords_table[:, :, :, 0] /= pretrained_window_size[0] - 1
relative_coords_table[:, :, :, 1] /= pretrained_window_size[1] - 1
elif window_size > 1:
relative_coords_table[:, :, :, 0] /= self.window_size[0] - 1
relative_coords_table[:, :, :, 1] /= self.window_size[1] - 1
relative_coords_table *= 8
relative_coords_table = torch.sign(relative_coords_table) * torch.log2(torch.abs(relative_coords_table) + 1.0) / math.log2(8)
relative_coords_table = relative_coords_table.to(next(self.continuous_position_bias_mlp.parameters()).dtype)
self.register_buffer('relative_coords_table', relative_coords_table, persistent=False)
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(meshgrid([coords_h, coords_w], indexing='ij'))
coords_flatten = torch.flatten(coords, 1)
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]
relative_coords = relative_coords.permute(1, 2, 0).contiguous()
relative_coords[:, :, 0] += self.window_size[0] - 1
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1)
self.register_buffer('relative_position_index', relative_position_index, persistent=False)
self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=False)
self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
batch_size, dim, num_channels = hidden_states.shape
query_layer = self.query(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
key_layer = self.key(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
value_layer = self.value(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
attention_scores = nn.functional.normalize(query_layer, dim=-1) @ nn.functional.normalize(key_layer, dim=-1).transpose(-2, -1)
logit_scale = torch.clamp(self.logit_scale, max=math.log(1.0 / 0.01)).exp()
attention_scores = attention_scores * logit_scale
relative_position_bias_table = self.continuous_position_bias_mlp(self.relative_coords_table).view(-1, self.num_attention_heads)
relative_position_bias = relative_position_bias_table[self.relative_position_index.view(-1)].view(self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1)
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous()
relative_position_bias = 16 * torch.sigmoid(relative_position_bias)
attention_scores = attention_scores + relative_position_bias.unsqueeze(0)
if attention_mask is not None:
mask_shape = attention_mask.shape[0]
attention_scores = attention_scores.view(batch_size // mask_shape, mask_shape, self.num_attention_heads, dim, dim) + attention_mask.unsqueeze(1).unsqueeze(0)
attention_scores = attention_scores + attention_mask.unsqueeze(1).unsqueeze(0)
attention_scores = attention_scores.view(-1, self.num_attention_heads, dim, dim)
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
attention_probs = self.dropout(attention_probs)
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
|
class Swin2SRSelfAttention(nn.Module):
def __init__(self, config, dim, num_heads, window_size, pretrained_window_size=[0, 0]):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
pass
| 3
| 0
| 41
| 4
| 33
| 5
| 3
| 0.15
| 1
| 6
| 0
| 0
| 3
| 11
| 3
| 13
| 127
| 15
| 100
| 45
| 90
| 15
| 72
| 39
| 68
| 5
| 1
| 1
| 10
|
5,501
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swin2sr/modeling_swin2sr.py
|
transformers.models.swin2sr.modeling_swin2sr.Swin2SRSelfOutput
|
import torch
from torch import nn
class Swin2SRSelfOutput(nn.Module):
def __init__(self, config, dim):
super().__init__()
self.dense = nn.Linear(dim, dim)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
|
class Swin2SRSelfOutput(nn.Module):
def __init__(self, config, dim):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 1
| 4
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 2
| 2
| 12
| 11
| 2
| 9
| 5
| 6
| 0
| 9
| 5
| 6
| 1
| 1
| 0
| 2
|
5,502
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swin2sr/modeling_swin2sr.py
|
transformers.models.swin2sr.modeling_swin2sr.Swin2SRStage
|
from torch import nn
from typing import Optional, Union
import torch
from ...modeling_layers import GradientCheckpointingLayer
class Swin2SRStage(GradientCheckpointingLayer):
"""
This corresponds to the Residual Swin Transformer Block (RSTB) in the original implementation.
"""
def __init__(self, config, dim, input_resolution, depth, num_heads, drop_path, pretrained_window_size=0):
super().__init__()
self.config = config
self.dim = dim
self.layers = nn.ModuleList([Swin2SRLayer(config=config, dim=dim, input_resolution=input_resolution, num_heads=num_heads, shift_size=0 if i % 2 == 0 else config.window_size // 2, pretrained_window_size=pretrained_window_size) for i in range(depth)])
if config.resi_connection == '1conv':
self.conv = nn.Conv2d(dim, dim, 3, 1, 1)
elif config.resi_connection == '3conv':
self.conv = nn.Sequential(nn.Conv2d(dim, dim // 4, 3, 1, 1), nn.LeakyReLU(negative_slope=0.2, inplace=True), nn.Conv2d(dim // 4, dim // 4, 1, 1, 0), nn.LeakyReLU(negative_slope=0.2, inplace=True), nn.Conv2d(dim // 4, dim, 3, 1, 1))
self.patch_embed = Swin2SRPatchEmbeddings(config, normalize_patches=False)
self.patch_unembed = Swin2SRPatchUnEmbeddings(config)
def forward(self, hidden_states: torch.Tensor, input_dimensions: tuple[int, int], head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
residual = hidden_states
height, width = input_dimensions
for i, layer_module in enumerate(self.layers):
layer_head_mask = head_mask[i] if head_mask is not None else None
layer_outputs = layer_module(hidden_states, input_dimensions, layer_head_mask, output_attentions)
hidden_states = layer_outputs[0]
output_dimensions = (height, width, height, width)
hidden_states = self.patch_unembed(hidden_states, input_dimensions)
hidden_states = self.conv(hidden_states)
hidden_states, _ = self.patch_embed(hidden_states)
hidden_states = hidden_states + residual
stage_outputs = (hidden_states, output_dimensions)
if output_attentions:
stage_outputs += layer_outputs[1:]
return stage_outputs
|
class Swin2SRStage(GradientCheckpointingLayer):
'''
This corresponds to the Residual Swin Transformer Block (RSTB) in the original implementation.
'''
def __init__(self, config, dim, input_resolution, depth, num_heads, drop_path, pretrained_window_size=0):
pass
def forward(self, hidden_states: torch.Tensor, input_dimensions: tuple[int, int], head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
pass
| 3
| 1
| 32
| 6
| 26
| 1
| 4
| 0.08
| 1
| 9
| 3
| 0
| 2
| 6
| 2
| 12
| 69
| 13
| 52
| 23
| 43
| 4
| 27
| 17
| 24
| 4
| 1
| 1
| 8
|
5,503
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swin2sr/modeling_swin2sr.py
|
transformers.models.swin2sr.modeling_swin2sr.Upsample
|
from torch import nn
import math
class Upsample(nn.Module):
"""Upsample module.
Args:
scale (`int`):
Scale factor. Supported scales: 2^n and 3.
num_features (`int`):
Channel number of intermediate features.
"""
def __init__(self, scale, num_features):
super().__init__()
self.scale = scale
if scale & scale - 1 == 0:
for i in range(int(math.log(scale, 2))):
self.add_module(f'convolution_{i}', nn.Conv2d(num_features, 4 * num_features, 3, 1, 1))
self.add_module(f'pixelshuffle_{i}', nn.PixelShuffle(2))
elif scale == 3:
self.convolution = nn.Conv2d(num_features, 9 * num_features, 3, 1, 1)
self.pixelshuffle = nn.PixelShuffle(3)
else:
raise ValueError(f'Scale {scale} is not supported. Supported scales: 2^n and 3.')
def forward(self, hidden_state):
if self.scale & self.scale - 1 == 0:
for i in range(int(math.log(self.scale, 2))):
hidden_state = self.__getattr__(f'convolution_{i}')(hidden_state)
hidden_state = self.__getattr__(f'pixelshuffle_{i}')(hidden_state)
elif self.scale == 3:
hidden_state = self.convolution(hidden_state)
hidden_state = self.pixelshuffle(hidden_state)
return hidden_state
|
class Upsample(nn.Module):
'''Upsample module.
Args:
scale (`int`):
Scale factor. Supported scales: 2^n and 3.
num_features (`int`):
Channel number of intermediate features.
'''
def __init__(self, scale, num_features):
pass
def forward(self, hidden_state):
pass
| 3
| 1
| 13
| 2
| 11
| 1
| 4
| 0.36
| 1
| 4
| 0
| 0
| 2
| 3
| 2
| 12
| 36
| 6
| 22
| 8
| 19
| 8
| 19
| 8
| 16
| 4
| 1
| 2
| 8
|
5,504
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swin2sr/modeling_swin2sr.py
|
transformers.models.swin2sr.modeling_swin2sr.UpsampleOneStep
|
from torch import nn
class UpsampleOneStep(nn.Module):
"""UpsampleOneStep module (the difference with Upsample is that it always only has 1conv + 1pixelshuffle)
Used in lightweight SR to save parameters.
Args:
scale (int):
Scale factor. Supported scales: 2^n and 3.
in_channels (int):
Channel number of intermediate features.
out_channels (int):
Channel number of output features.
"""
def __init__(self, scale, in_channels, out_channels):
super().__init__()
self.conv = nn.Conv2d(in_channels, scale ** 2 * out_channels, 3, 1, 1)
self.pixel_shuffle = nn.PixelShuffle(scale)
def forward(self, x):
x = self.conv(x)
x = self.pixel_shuffle(x)
return x
|
class UpsampleOneStep(nn.Module):
'''UpsampleOneStep module (the difference with Upsample is that it always only has 1conv + 1pixelshuffle)
Used in lightweight SR to save parameters.
Args:
scale (int):
Scale factor. Supported scales: 2^n and 3.
in_channels (int):
Channel number of intermediate features.
out_channels (int):
Channel number of output features.
'''
def __init__(self, scale, in_channels, out_channels):
pass
def forward(self, x):
pass
| 3
| 1
| 5
| 1
| 4
| 0
| 1
| 1.11
| 1
| 1
| 0
| 0
| 2
| 2
| 2
| 12
| 25
| 6
| 9
| 5
| 6
| 10
| 9
| 5
| 6
| 1
| 1
| 0
| 2
|
5,505
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swinv2/configuration_swinv2.py
|
transformers.models.swinv2.configuration_swinv2.Swinv2Config
|
from ...configuration_utils import PretrainedConfig
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
class Swinv2Config(BackboneConfigMixin, PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`Swinv2Model`]. It is used to instantiate a Swin
Transformer v2 model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the Swin Transformer v2
[microsoft/swinv2-tiny-patch4-window8-256](https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256)
architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 4):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
embed_dim (`int`, *optional*, defaults to 96):
Dimensionality of patch embedding.
depths (`list(int)`, *optional*, defaults to `[2, 2, 6, 2]`):
Depth of each layer in the Transformer encoder.
num_heads (`list(int)`, *optional*, defaults to `[3, 6, 12, 24]`):
Number of attention heads in each layer of the Transformer encoder.
window_size (`int`, *optional*, defaults to 7):
Size of windows.
pretrained_window_sizes (`list(int)`, *optional*, defaults to `[0, 0, 0, 0]`):
Size of windows during pretraining.
mlp_ratio (`float`, *optional*, defaults to 4.0):
Ratio of MLP hidden dimensionality to embedding dimensionality.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether or not a learnable bias should be added to the queries, keys and values.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings and encoder.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
drop_path_rate (`float`, *optional*, defaults to 0.1):
Stochastic depth rate.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`,
`"selu"` and `"gelu_new"` are supported.
use_absolute_embeddings (`bool`, *optional*, defaults to `False`):
Whether or not to add absolute position embeddings to the patch embeddings.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
encoder_stride (`int`, *optional*, defaults to 32):
Factor to increase the spatial resolution by in the decoder head for masked image modeling.
out_features (`list[str]`, *optional*):
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
corresponding stages. If unset and `out_indices` is unset, will default to the last stage.
out_indices (`list[int]`, *optional*):
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
If unset and `out_features` is unset, will default to the last stage.
Example:
```python
>>> from transformers import Swinv2Config, Swinv2Model
>>> # Initializing a Swinv2 microsoft/swinv2-tiny-patch4-window8-256 style configuration
>>> configuration = Swinv2Config()
>>> # Initializing a model (with random weights) from the microsoft/swinv2-tiny-patch4-window8-256 style configuration
>>> model = Swinv2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'swinv2'
attribute_map = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__(self, image_size=224, patch_size=4, num_channels=3, embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7, pretrained_window_sizes=[0, 0, 0, 0], mlp_ratio=4.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act='gelu', use_absolute_embeddings=False, initializer_range=0.02, layer_norm_eps=1e-05, encoder_stride=32, out_features=None, out_indices=None, **kwargs):
super().__init__(**kwargs)
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.embed_dim = embed_dim
self.depths = depths
self.num_layers = len(depths)
self.num_heads = num_heads
self.window_size = window_size
self.pretrained_window_sizes = pretrained_window_sizes
self.mlp_ratio = mlp_ratio
self.qkv_bias = qkv_bias
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.drop_path_rate = drop_path_rate
self.hidden_act = hidden_act
self.use_absolute_embeddings = use_absolute_embeddings
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
self.encoder_stride = encoder_stride
self.stage_names = ['stem'] + [f'stage{idx}' for idx in range(1, len(depths) + 1)]
self._out_features, self._out_indices = get_aligned_output_features_output_indices(out_features=out_features, out_indices=out_indices, stage_names=self.stage_names)
self.hidden_size = int(embed_dim * 2 ** (len(depths) - 1))
|
class Swinv2Config(BackboneConfigMixin, PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`Swinv2Model`]. It is used to instantiate a Swin
Transformer v2 model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the Swin Transformer v2
[microsoft/swinv2-tiny-patch4-window8-256](https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256)
architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 4):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
embed_dim (`int`, *optional*, defaults to 96):
Dimensionality of patch embedding.
depths (`list(int)`, *optional*, defaults to `[2, 2, 6, 2]`):
Depth of each layer in the Transformer encoder.
num_heads (`list(int)`, *optional*, defaults to `[3, 6, 12, 24]`):
Number of attention heads in each layer of the Transformer encoder.
window_size (`int`, *optional*, defaults to 7):
Size of windows.
pretrained_window_sizes (`list(int)`, *optional*, defaults to `[0, 0, 0, 0]`):
Size of windows during pretraining.
mlp_ratio (`float`, *optional*, defaults to 4.0):
Ratio of MLP hidden dimensionality to embedding dimensionality.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether or not a learnable bias should be added to the queries, keys and values.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings and encoder.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
drop_path_rate (`float`, *optional*, defaults to 0.1):
Stochastic depth rate.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`,
`"selu"` and `"gelu_new"` are supported.
use_absolute_embeddings (`bool`, *optional*, defaults to `False`):
Whether or not to add absolute position embeddings to the patch embeddings.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
encoder_stride (`int`, *optional*, defaults to 32):
Factor to increase the spatial resolution by in the decoder head for masked image modeling.
out_features (`list[str]`, *optional*):
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
corresponding stages. If unset and `out_indices` is unset, will default to the last stage.
out_indices (`list[int]`, *optional*):
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
If unset and `out_features` is unset, will default to the last stage.
Example:
```python
>>> from transformers import Swinv2Config, Swinv2Model
>>> # Initializing a Swinv2 microsoft/swinv2-tiny-patch4-window8-256 style configuration
>>> configuration = Swinv2Config()
>>> # Initializing a model (with random weights) from the microsoft/swinv2-tiny-patch4-window8-256 style configuration
>>> model = Swinv2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, image_size=224, patch_size=4, num_channels=3, embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7, pretrained_window_sizes=[0, 0, 0, 0], mlp_ratio=4.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act='gelu', use_absolute_embeddings=False, initializer_range=0.02, layer_norm_eps=1e-05, encoder_stride=32, out_features=None, out_indices=None, **kwargs):
pass
| 2
| 1
| 52
| 1
| 49
| 2
| 1
| 1.2
| 2
| 3
| 0
| 0
| 1
| 23
| 1
| 6
| 132
| 11
| 55
| 49
| 30
| 66
| 27
| 26
| 25
| 1
| 1
| 0
| 1
|
5,506
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swinv2/modeling_swinv2.py
|
transformers.models.swinv2.modeling_swinv2.Swinv2Attention
|
from typing import Optional, Union
from ...pytorch_utils import find_pruneable_heads_and_indices, meshgrid, prune_linear_layer
from torch import Tensor, nn
import torch
import collections.abc
class Swinv2Attention(nn.Module):
def __init__(self, config, dim, num_heads, window_size, pretrained_window_size=0):
super().__init__()
self.self = Swinv2SelfAttention(config=config, dim=dim, num_heads=num_heads, window_size=window_size, pretrained_window_size=pretrained_window_size if isinstance(pretrained_window_size, collections.abc.Iterable) else (pretrained_window_size, pretrained_window_size))
self.output = Swinv2SelfOutput(config, dim)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads)
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
self_outputs = self.self(hidden_states, attention_mask, head_mask, output_attentions)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:]
return outputs
|
class Swinv2Attention(nn.Module):
def __init__(self, config, dim, num_heads, window_size, pretrained_window_size=0):
pass
def prune_heads(self, heads):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
pass
| 4
| 0
| 14
| 1
| 12
| 1
| 2
| 0.08
| 1
| 7
| 2
| 0
| 3
| 3
| 3
| 13
| 44
| 4
| 38
| 17
| 28
| 3
| 22
| 11
| 18
| 2
| 1
| 1
| 5
|
5,507
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swinv2/modeling_swinv2.py
|
transformers.models.swinv2.modeling_swinv2.Swinv2Backbone
|
from ...modeling_outputs import BackboneOutput
from ...utils.backbone_utils import BackboneMixin
from typing import Optional, Union
from ...utils import ModelOutput, auto_docstring, logging, torch_int
from torch import Tensor, nn
@auto_docstring(custom_intro='\n Swinv2 backbone, to be used with frameworks like DETR and MaskFormer.\n ')
class Swinv2Backbone(Swinv2PreTrainedModel, BackboneMixin):
def __init__(self, config):
super().__init__(config)
super()._init_backbone(config)
self.num_features = [config.embed_dim] + [int(config.embed_dim * 2 ** i) for i in range(len(config.depths))]
self.embeddings = Swinv2Embeddings(config)
self.encoder = Swinv2Encoder(config, self.embeddings.patch_grid)
self.post_init()
def get_input_embeddings(self):
return self.embeddings.patch_embeddings
@auto_docstring
def forward(self, pixel_values: Tensor, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> BackboneOutput:
"""
Examples:
```python
>>> from transformers import AutoImageProcessor, AutoBackbone
>>> import torch
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> processor = AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256")
>>> model = AutoBackbone.from_pretrained(
... "microsoft/swinv2-tiny-patch4-window8-256", out_features=["stage1", "stage2", "stage3", "stage4"]
... )
>>> inputs = processor(image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> feature_maps = outputs.feature_maps
>>> list(feature_maps[-1].shape)
[1, 2048, 7, 7]
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
embedding_output, input_dimensions = self.embeddings(pixel_values)
outputs = self.encoder(embedding_output, input_dimensions, head_mask=None, output_attentions=output_attentions, output_hidden_states=True, output_hidden_states_before_downsampling=True, return_dict=return_dict)
hidden_states = outputs.reshaped_hidden_states if return_dict else outputs[-1]
feature_maps = ()
for stage, hidden_state in zip(self.stage_names, hidden_states):
if stage in self.out_features:
feature_maps += (hidden_state,)
if not return_dict:
output = (feature_maps,)
if output_hidden_states:
output += (outputs[1],)
if output_attentions:
output += (outputs[2],)
return output
return BackboneOutput(feature_maps=feature_maps, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n Swinv2 backbone, to be used with frameworks like DETR and MaskFormer.\n ')
class Swinv2Backbone(Swinv2PreTrainedModel, BackboneMixin):
def __init__(self, config):
pass
def get_input_embeddings(self):
pass
@auto_docstring
def forward(self, pixel_values: Tensor, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> BackboneOutput:
'''
Examples:
```python
>>> from transformers import AutoImageProcessor, AutoBackbone
>>> import torch
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> processor = AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256")
>>> model = AutoBackbone.from_pretrained(
... "microsoft/swinv2-tiny-patch4-window8-256", out_features=["stage1", "stage2", "stage3", "stage4"]
... )
>>> inputs = processor(image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> feature_maps = outputs.feature_maps
>>> list(feature_maps[-1].shape)
[1, 2048, 7, 7]
```'''
pass
| 6
| 1
| 28
| 5
| 16
| 7
| 4
| 0.41
| 2
| 9
| 3
| 0
| 3
| 3
| 3
| 16
| 88
| 16
| 51
| 20
| 39
| 21
| 29
| 13
| 25
| 11
| 2
| 2
| 13
|
5,508
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swinv2/modeling_swinv2.py
|
transformers.models.swinv2.modeling_swinv2.Swinv2DropPath
|
from typing import Optional, Union
import torch
from torch import Tensor, nn
class Swinv2DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob: Optional[float]=None) -> None:
super().__init__()
self.drop_prob = drop_prob
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return drop_path(hidden_states, self.drop_prob, self.training)
def extra_repr(self) -> str:
return f'p={self.drop_prob}'
|
class Swinv2DropPath(nn.Module):
'''Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).'''
def __init__(self, drop_prob: Optional[float]=None) -> None:
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
def extra_repr(self) -> str:
pass
| 4
| 1
| 2
| 0
| 2
| 0
| 1
| 0.13
| 1
| 4
| 0
| 0
| 3
| 1
| 3
| 13
| 12
| 3
| 8
| 5
| 4
| 1
| 8
| 5
| 4
| 1
| 1
| 0
| 3
|
5,509
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swinv2/modeling_swinv2.py
|
transformers.models.swinv2.modeling_swinv2.Swinv2Embeddings
|
from torch import Tensor, nn
from ...utils import ModelOutput, auto_docstring, logging, torch_int
from typing import Optional, Union
import torch
class Swinv2Embeddings(nn.Module):
"""
Construct the patch and position embeddings. Optionally, also the mask token.
"""
def __init__(self, config, use_mask_token=False):
super().__init__()
self.patch_embeddings = Swinv2PatchEmbeddings(config)
num_patches = self.patch_embeddings.num_patches
self.patch_grid = self.patch_embeddings.grid_size
self.mask_token = nn.Parameter(torch.zeros(1, 1, config.embed_dim)) if use_mask_token else None
if config.use_absolute_embeddings:
self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.embed_dim))
else:
self.position_embeddings = None
self.norm = nn.LayerNorm(config.embed_dim)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.patch_size = config.patch_size
self.config = config
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
"""
num_patches = embeddings.shape[1] - 1
num_positions = self.position_embeddings.shape[1] - 1
if not torch.jit.is_tracing() and num_patches == num_positions and (height == width):
return self.position_embeddings
class_pos_embed = self.position_embeddings[:, :1]
patch_pos_embed = self.position_embeddings[:, 1:]
dim = embeddings.shape[-1]
new_height = height // self.patch_size
new_width = width // self.patch_size
sqrt_num_positions = torch_int(num_positions ** 0.5)
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
patch_pos_embed = nn.functional.interpolate(patch_pos_embed, size=(new_height, new_width), mode='bicubic', align_corners=False)
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed, patch_pos_embed), dim=1)
def forward(self, pixel_values: Optional[torch.FloatTensor], bool_masked_pos: Optional[torch.BoolTensor]=None, interpolate_pos_encoding: bool=False) -> tuple[torch.Tensor]:
_, num_channels, height, width = pixel_values.shape
embeddings, output_dimensions = self.patch_embeddings(pixel_values)
embeddings = self.norm(embeddings)
batch_size, seq_len, _ = embeddings.size()
if bool_masked_pos is not None:
mask_tokens = self.mask_token.expand(batch_size, seq_len, -1)
mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens)
embeddings = embeddings * (1.0 - mask) + mask_tokens * mask
if self.position_embeddings is not None:
if interpolate_pos_encoding:
embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
else:
embeddings = embeddings + self.position_embeddings
embeddings = self.dropout(embeddings)
return (embeddings, output_dimensions)
|
class Swinv2Embeddings(nn.Module):
'''
Construct the patch and position embeddings. Optionally, also the mask token.
'''
def __init__(self, config, use_mask_token=False):
pass
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
'''
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
'''
pass
def forward(self, pixel_values: Optional[torch.FloatTensor], bool_masked_pos: Optional[torch.BoolTensor]=None, interpolate_pos_encoding: bool=False) -> tuple[torch.Tensor]:
pass
| 4
| 2
| 27
| 6
| 19
| 3
| 3
| 0.23
| 1
| 5
| 1
| 0
| 3
| 8
| 3
| 13
| 90
| 20
| 57
| 31
| 48
| 13
| 45
| 26
| 41
| 4
| 1
| 2
| 9
|
5,510
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swinv2/modeling_swinv2.py
|
transformers.models.swinv2.modeling_swinv2.Swinv2Encoder
|
from torch import Tensor, nn
import torch
from typing import Optional, Union
class Swinv2Encoder(nn.Module):
def __init__(self, config, grid_size, pretrained_window_sizes=(0, 0, 0, 0)):
super().__init__()
self.num_layers = len(config.depths)
self.config = config
if self.config.pretrained_window_sizes is not None:
pretrained_window_sizes = config.pretrained_window_sizes
dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths), device='cpu')]
layers = []
for i_layer in range(self.num_layers):
stage = Swinv2Stage(config=config, dim=int(config.embed_dim * 2 ** i_layer), input_resolution=(grid_size[0] // 2 ** i_layer, grid_size[1] // 2 ** i_layer), depth=config.depths[i_layer], num_heads=config.num_heads[i_layer], drop_path=dpr[sum(config.depths[:i_layer]):sum(config.depths[:i_layer + 1])], downsample=Swinv2PatchMerging if i_layer < self.num_layers - 1 else None, pretrained_window_size=pretrained_window_sizes[i_layer])
layers.append(stage)
self.layers = nn.ModuleList(layers)
self.gradient_checkpointing = False
def forward(self, hidden_states: torch.Tensor, input_dimensions: tuple[int, int], head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, output_hidden_states_before_downsampling: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[tuple, Swinv2EncoderOutput]:
all_hidden_states = () if output_hidden_states else None
all_reshaped_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if output_hidden_states:
batch_size, _, hidden_size = hidden_states.shape
reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size)
reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
all_hidden_states += (hidden_states,)
all_reshaped_hidden_states += (reshaped_hidden_state,)
for i, layer_module in enumerate(self.layers):
layer_head_mask = head_mask[i] if head_mask is not None else None
layer_outputs = layer_module(hidden_states, input_dimensions, layer_head_mask, output_attentions)
hidden_states = layer_outputs[0]
hidden_states_before_downsampling = layer_outputs[1]
output_dimensions = layer_outputs[2]
input_dimensions = (output_dimensions[-2], output_dimensions[-1])
if output_hidden_states and output_hidden_states_before_downsampling:
batch_size, _, hidden_size = hidden_states_before_downsampling.shape
reshaped_hidden_state = hidden_states_before_downsampling.view(batch_size, *(output_dimensions[0], output_dimensions[1]), hidden_size)
reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
all_hidden_states += (hidden_states_before_downsampling,)
all_reshaped_hidden_states += (reshaped_hidden_state,)
elif output_hidden_states and (not output_hidden_states_before_downsampling):
batch_size, _, hidden_size = hidden_states.shape
reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size)
reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
all_hidden_states += (hidden_states,)
all_reshaped_hidden_states += (reshaped_hidden_state,)
if output_attentions:
all_self_attentions += layer_outputs[3:]
if not return_dict:
return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions, all_reshaped_hidden_states] if v is not None))
return Swinv2EncoderOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, reshaped_hidden_states=all_reshaped_hidden_states)
|
class Swinv2Encoder(nn.Module):
def __init__(self, config, grid_size, pretrained_window_sizes=(0, 0, 0, 0)):
pass
def forward(self, hidden_states: torch.Tensor, input_dimensions: tuple[int, int], head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, output_hidden_states_before_downsampling: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[tuple, Swinv2EncoderOutput]:
pass
| 3
| 0
| 51
| 6
| 43
| 2
| 8
| 0.05
| 1
| 10
| 3
| 0
| 2
| 4
| 2
| 12
| 103
| 12
| 87
| 30
| 75
| 4
| 49
| 21
| 46
| 12
| 1
| 2
| 16
|
5,511
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swinv2/modeling_swinv2.py
|
transformers.models.swinv2.modeling_swinv2.Swinv2EncoderOutput
|
from dataclasses import dataclass
import torch
from ...utils import ModelOutput, auto_docstring, logging, torch_int
from typing import Optional, Union
@dataclass
@auto_docstring(custom_intro="\n Swinv2 encoder's outputs, with potential hidden states and attentions.\n ")
class Swinv2EncoderOutput(ModelOutput):
"""
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
reshaped_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
|
@dataclass
@auto_docstring(custom_intro="\n Swinv2 encoder's outputs, with potential hidden states and attentions.\n ")
class Swinv2EncoderOutput(ModelOutput):
'''
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 30
| 5
| 5
| 5
| 4
| 20
| 5
| 5
| 4
| 0
| 1
| 0
| 0
|
5,512
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swinv2/modeling_swinv2.py
|
transformers.models.swinv2.modeling_swinv2.Swinv2ForImageClassification
|
import torch
from typing import Optional, Union
from torch import Tensor, nn
from ...utils import ModelOutput, auto_docstring, logging, torch_int
@auto_docstring(custom_intro="\n Swinv2 Model transformer with an image classification head on top (a linear layer on top of the final hidden state\n of the [CLS] token) e.g. for ImageNet.\n\n <Tip>\n\n Note that it's possible to fine-tune SwinV2 on higher resolution images than the ones it has been trained on, by\n setting `interpolate_pos_encoding` to `True` in the forward of the model. This will interpolate the pre-trained\n position embeddings to the higher resolution.\n\n </Tip>\n ")
class Swinv2ForImageClassification(Swinv2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.swinv2 = Swinv2Model(config)
self.classifier = nn.Linear(self.swinv2.num_features, config.num_labels) if config.num_labels > 0 else nn.Identity()
self.post_init()
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Union[tuple, Swinv2ImageClassifierOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.swinv2(pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict)
pooled_output = outputs[1]
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config)
if not return_dict:
output = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return Swinv2ImageClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, reshaped_hidden_states=outputs.reshaped_hidden_states)
|
@auto_docstring(custom_intro="\n Swinv2 Model transformer with an image classification head on top (a linear layer on top of the final hidden state\n of the [CLS] token) e.g. for ImageNet.\n\n <Tip>\n\n Note that it's possible to fine-tune SwinV2 on higher resolution images than the ones it has been trained on, by\n setting `interpolate_pos_encoding` to `True` in the forward of the model. This will interpolate the pre-trained\n position embeddings to the higher resolution.\n\n </Tip>\n ")
class Swinv2ForImageClassification(Swinv2PreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Union[tuple, Swinv2ImageClassifierOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
'''
pass
| 5
| 1
| 39
| 5
| 30
| 4
| 7
| 0.12
| 1
| 5
| 2
| 0
| 2
| 3
| 2
| 3
| 87
| 11
| 68
| 22
| 49
| 8
| 32
| 12
| 29
| 12
| 2
| 3
| 14
|
5,513
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swinv2/modeling_swinv2.py
|
transformers.models.swinv2.modeling_swinv2.Swinv2ForMaskedImageModeling
|
from torch import Tensor, nn
from ...utils import ModelOutput, auto_docstring, logging, torch_int
from typing import Optional, Union
import torch
import math
@auto_docstring(custom_intro='\n Swinv2 Model with a decoder on top for masked image modeling, as proposed in\n [SimMIM](https://huggingface.co/papers/2111.09886).\n\n <Tip>\n\n Note that we provide a script to pre-train this model on custom data in our [examples\n directory](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining).\n\n </Tip>\n ')
class Swinv2ForMaskedImageModeling(Swinv2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.swinv2 = Swinv2Model(config, add_pooling_layer=False, use_mask_token=True)
num_features = int(config.embed_dim * 2 ** (config.num_layers - 1))
self.decoder = nn.Sequential(nn.Conv2d(in_channels=num_features, out_channels=config.encoder_stride ** 2 * config.num_channels, kernel_size=1), nn.PixelShuffle(config.encoder_stride))
self.post_init()
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, bool_masked_pos: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Union[tuple, Swinv2MaskedImageModelingOutput]:
"""
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`):
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
Examples:
```python
>>> from transformers import AutoImageProcessor, Swinv2ForMaskedImageModeling
>>> import torch
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256")
>>> model = Swinv2ForMaskedImageModeling.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256")
>>> num_patches = (model.config.image_size // model.config.patch_size) ** 2
>>> pixel_values = image_processor(images=image, return_tensors="pt").pixel_values
>>> # create random boolean mask of shape (batch_size, num_patches)
>>> bool_masked_pos = torch.randint(low=0, high=2, size=(1, num_patches)).bool()
>>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos)
>>> loss, reconstructed_pixel_values = outputs.loss, outputs.reconstruction
>>> list(reconstructed_pixel_values.shape)
[1, 3, 256, 256]
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.swinv2(pixel_values, bool_masked_pos=bool_masked_pos, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict)
sequence_output = outputs[0]
sequence_output = sequence_output.transpose(1, 2)
batch_size, num_channels, sequence_length = sequence_output.shape
height = width = math.floor(sequence_length ** 0.5)
sequence_output = sequence_output.reshape(batch_size, num_channels, height, width)
reconstructed_pixel_values = self.decoder(sequence_output)
masked_im_loss = None
if bool_masked_pos is not None:
size = self.config.image_size // self.config.patch_size
bool_masked_pos = bool_masked_pos.reshape(-1, size, size)
mask = bool_masked_pos.repeat_interleave(self.config.patch_size, 1).repeat_interleave(self.config.patch_size, 2).unsqueeze(1).contiguous()
reconstruction_loss = nn.functional.l1_loss(pixel_values, reconstructed_pixel_values, reduction='none')
masked_im_loss = (reconstruction_loss * mask).sum() / (mask.sum() + 1e-05) / self.config.num_channels
if not return_dict:
output = (reconstructed_pixel_values,) + outputs[2:]
return (masked_im_loss,) + output if masked_im_loss is not None else output
return Swinv2MaskedImageModelingOutput(loss=masked_im_loss, reconstruction=reconstructed_pixel_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, reshaped_hidden_states=outputs.reshaped_hidden_states)
|
@auto_docstring(custom_intro='\n Swinv2 Model with a decoder on top for masked image modeling, as proposed in\n [SimMIM](https://huggingface.co/papers/2111.09886).\n\n <Tip>\n\n Note that we provide a script to pre-train this model on custom data in our [examples\n directory](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining).\n\n </Tip>\n ')
class Swinv2ForMaskedImageModeling(Swinv2PreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, bool_masked_pos: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Union[tuple, Swinv2MaskedImageModelingOutput]:
'''
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`):
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
Examples:
```python
>>> from transformers import AutoImageProcessor, Swinv2ForMaskedImageModeling
>>> import torch
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256")
>>> model = Swinv2ForMaskedImageModeling.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256")
>>> num_patches = (model.config.image_size // model.config.patch_size) ** 2
>>> pixel_values = image_processor(images=image, return_tensors="pt").pixel_values
>>> # create random boolean mask of shape (batch_size, num_patches)
>>> bool_masked_pos = torch.randint(low=0, high=2, size=(1, num_patches)).bool()
>>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos)
>>> loss, reconstructed_pixel_values = outputs.loss, outputs.reconstruction
>>> list(reconstructed_pixel_values.shape)
[1, 3, 256, 256]
```'''
pass
| 5
| 1
| 50
| 8
| 30
| 13
| 3
| 0.42
| 1
| 5
| 2
| 0
| 2
| 2
| 2
| 3
| 104
| 16
| 62
| 26
| 48
| 26
| 27
| 16
| 24
| 5
| 2
| 1
| 6
|
5,514
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swinv2/modeling_swinv2.py
|
transformers.models.swinv2.modeling_swinv2.Swinv2ImageClassifierOutput
|
from ...utils import ModelOutput, auto_docstring, logging, torch_int
import torch
from dataclasses import dataclass
from typing import Optional, Union
@dataclass
@auto_docstring(custom_intro='\n Swinv2 outputs for image classification.\n ')
class Swinv2ImageClassifierOutput(ModelOutput):
"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
reshaped_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
|
@dataclass
@auto_docstring(custom_intro='\n Swinv2 outputs for image classification.\n ')
class Swinv2ImageClassifierOutput(ModelOutput):
'''
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.67
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 33
| 5
| 6
| 6
| 5
| 22
| 6
| 6
| 5
| 0
| 1
| 0
| 0
|
5,515
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swinv2/modeling_swinv2.py
|
transformers.models.swinv2.modeling_swinv2.Swinv2Intermediate
|
from ...activations import ACT2FN
from torch import Tensor, nn
import torch
class Swinv2Intermediate(nn.Module):
def __init__(self, config, dim):
super().__init__()
self.dense = nn.Linear(dim, int(config.mlp_ratio * dim))
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
|
class Swinv2Intermediate(nn.Module):
def __init__(self, config, dim):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 2
| 0
| 1
| 4
| 0
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 12
| 5
| 9
| 0
| 11
| 5
| 8
| 2
| 1
| 1
| 3
|
5,516
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swinv2/modeling_swinv2.py
|
transformers.models.swinv2.modeling_swinv2.Swinv2Layer
|
from typing import Optional, Union
import collections.abc
from torch import Tensor, nn
import torch
class Swinv2Layer(nn.Module):
def __init__(self, config, dim, input_resolution, num_heads, drop_path_rate=0.0, shift_size=0, pretrained_window_size=0):
super().__init__()
self.input_resolution = input_resolution
window_size, shift_size = self._compute_window_shift((config.window_size, config.window_size), (shift_size, shift_size))
self.window_size = window_size[0]
self.shift_size = shift_size[0]
self.attention = Swinv2Attention(config=config, dim=dim, num_heads=num_heads, window_size=self.window_size, pretrained_window_size=pretrained_window_size if isinstance(pretrained_window_size, collections.abc.Iterable) else (pretrained_window_size, pretrained_window_size))
self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps)
self.drop_path = Swinv2DropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity()
self.intermediate = Swinv2Intermediate(config, dim)
self.output = Swinv2Output(config, dim)
self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps)
def _compute_window_shift(self, target_window_size, target_shift_size) -> tuple[tuple[int, int], tuple[int, int]]:
window_size = [r if r <= w else w for r, w in zip(self.input_resolution, target_window_size)]
shift_size = [0 if r <= w else s for r, w, s in zip(self.input_resolution, window_size, target_shift_size)]
return (window_size, shift_size)
def get_attn_mask(self, height, width, dtype):
if self.shift_size > 0:
img_mask = torch.zeros((1, height, width, 1), dtype=dtype)
height_slices = (slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None))
width_slices = (slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None))
count = 0
for height_slice in height_slices:
for width_slice in width_slices:
img_mask[:, height_slice, width_slice, :] = count
count += 1
mask_windows = window_partition(img_mask, self.window_size)
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, -100.0).masked_fill(attn_mask == 0, 0.0)
else:
attn_mask = None
return attn_mask
def maybe_pad(self, hidden_states, height, width):
pad_right = (self.window_size - width % self.window_size) % self.window_size
pad_bottom = (self.window_size - height % self.window_size) % self.window_size
pad_values = (0, 0, 0, pad_right, 0, pad_bottom)
hidden_states = nn.functional.pad(hidden_states, pad_values)
return (hidden_states, pad_values)
def forward(self, hidden_states: torch.Tensor, input_dimensions: tuple[int, int], head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor, torch.Tensor]:
height, width = input_dimensions
batch_size, _, channels = hidden_states.size()
shortcut = hidden_states
hidden_states = hidden_states.view(batch_size, height, width, channels)
hidden_states, pad_values = self.maybe_pad(hidden_states, height, width)
_, height_pad, width_pad, _ = hidden_states.shape
if self.shift_size > 0:
shifted_hidden_states = torch.roll(hidden_states, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_hidden_states = hidden_states
hidden_states_windows = window_partition(shifted_hidden_states, self.window_size)
hidden_states_windows = hidden_states_windows.view(-1, self.window_size * self.window_size, channels)
attn_mask = self.get_attn_mask(height_pad, width_pad, dtype=hidden_states.dtype)
if attn_mask is not None:
attn_mask = attn_mask.to(hidden_states_windows.device)
attention_outputs = self.attention(hidden_states_windows, attn_mask, head_mask, output_attentions=output_attentions)
attention_output = attention_outputs[0]
attention_windows = attention_output.view(-1, self.window_size, self.window_size, channels)
shifted_windows = window_reverse(attention_windows, self.window_size, height_pad, width_pad)
if self.shift_size > 0:
attention_windows = torch.roll(shifted_windows, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
attention_windows = shifted_windows
was_padded = pad_values[3] > 0 or pad_values[5] > 0
if was_padded:
attention_windows = attention_windows[:, :height, :width, :].contiguous()
attention_windows = attention_windows.view(batch_size, height * width, channels)
hidden_states = self.layernorm_before(attention_windows)
hidden_states = shortcut + self.drop_path(hidden_states)
layer_output = self.intermediate(hidden_states)
layer_output = self.output(layer_output)
layer_output = hidden_states + self.drop_path(self.layernorm_after(layer_output))
layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,)
return layer_outputs
|
class Swinv2Layer(nn.Module):
def __init__(self, config, dim, input_resolution, num_heads, drop_path_rate=0.0, shift_size=0, pretrained_window_size=0):
pass
def _compute_window_shift(self, target_window_size, target_shift_size) -> tuple[tuple[int, int], tuple[int, int]]:
pass
def get_attn_mask(self, height, width, dtype):
pass
def maybe_pad(self, hidden_states, height, width):
pass
def forward(self, hidden_states: torch.Tensor, input_dimensions: tuple[int, int], head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor, torch.Tensor]:
pass
| 6
| 0
| 24
| 2
| 20
| 1
| 3
| 0.05
| 1
| 12
| 4
| 0
| 5
| 9
| 5
| 15
| 123
| 15
| 103
| 52
| 89
| 5
| 72
| 44
| 66
| 6
| 1
| 3
| 17
|
5,517
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swinv2/modeling_swinv2.py
|
transformers.models.swinv2.modeling_swinv2.Swinv2MaskedImageModelingOutput
|
import warnings
from dataclasses import dataclass
import torch
from ...utils import ModelOutput, auto_docstring, logging, torch_int
from typing import Optional, Union
@dataclass
@auto_docstring(custom_intro='\n Swinv2 masked image model outputs.\n ')
class Swinv2MaskedImageModelingOutput(ModelOutput):
"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `bool_masked_pos` is provided):
Masked image modeling (MLM) loss.
reconstruction (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Reconstructed pixel values.
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
loss: Optional[torch.FloatTensor] = None
reconstruction: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
reshaped_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
@property
def logits(self):
warnings.warn('logits attribute is deprecated and will be removed in version 5 of Transformers. Please use the reconstruction attribute to retrieve the final output instead.', FutureWarning)
return self.reconstruction
|
@dataclass
@auto_docstring(custom_intro='\n Swinv2 masked image model outputs.\n ')
class Swinv2MaskedImageModelingOutput(ModelOutput):
'''
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `bool_masked_pos` is provided):
Masked image modeling (MLM) loss.
reconstruction (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Reconstructed pixel values.
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
'''
@property
def logits(self):
pass
| 5
| 1
| 7
| 0
| 7
| 0
| 1
| 1.57
| 1
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 42
| 6
| 14
| 8
| 11
| 22
| 9
| 7
| 7
| 1
| 1
| 0
| 1
|
5,518
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swinv2/modeling_swinv2.py
|
transformers.models.swinv2.modeling_swinv2.Swinv2Model
|
import torch
from ...utils import ModelOutput, auto_docstring, logging, torch_int
from typing import Optional, Union
from torch import Tensor, nn
@auto_docstring
class Swinv2Model(Swinv2PreTrainedModel):
def __init__(self, config, add_pooling_layer=True, use_mask_token=False):
"""
add_pooling_layer (`bool`, *optional*, defaults to `True`):
Whether or not to apply pooling layer.
use_mask_token (`bool`, *optional*, defaults to `False`):
Whether or not to create and apply mask tokens in the embedding layer.
"""
super().__init__(config)
self.config = config
self.num_layers = len(config.depths)
self.num_features = int(config.embed_dim * 2 ** (self.num_layers - 1))
self.embeddings = Swinv2Embeddings(config, use_mask_token=use_mask_token)
self.encoder = Swinv2Encoder(config, self.embeddings.patch_grid)
self.layernorm = nn.LayerNorm(self.num_features, eps=config.layer_norm_eps)
self.pooler = nn.AdaptiveAvgPool1d(1) if add_pooling_layer else None
self.post_init()
def get_input_embeddings(self):
return self.embeddings.patch_embeddings
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, bool_masked_pos: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Union[tuple, Swinv2ModelOutput]:
"""
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*):
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values')
head_mask = self.get_head_mask(head_mask, len(self.config.depths))
embedding_output, input_dimensions = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos, interpolate_pos_encoding=interpolate_pos_encoding)
encoder_outputs = self.encoder(embedding_output, input_dimensions, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = encoder_outputs[0]
sequence_output = self.layernorm(sequence_output)
pooled_output = None
if self.pooler is not None:
pooled_output = self.pooler(sequence_output.transpose(1, 2))
pooled_output = torch.flatten(pooled_output, 1)
if not return_dict:
output = (sequence_output, pooled_output) + encoder_outputs[1:]
return output
return Swinv2ModelOutput(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, reshaped_hidden_states=encoder_outputs.reshaped_hidden_states)
|
@auto_docstring
class Swinv2Model(Swinv2PreTrainedModel):
def __init__(self, config, add_pooling_layer=True, use_mask_token=False):
'''
add_pooling_layer (`bool`, *optional*, defaults to `True`):
Whether or not to apply pooling layer.
use_mask_token (`bool`, *optional*, defaults to `False`):
Whether or not to create and apply mask tokens in the embedding layer.
'''
pass
def get_input_embeddings(self):
pass
def _prune_heads(self, heads_to_prune):
'''
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
'''
pass
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, bool_masked_pos: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Union[tuple, Swinv2ModelOutput]:
'''
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*):
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
'''
pass
| 7
| 3
| 22
| 3
| 15
| 4
| 3
| 0.2
| 1
| 7
| 3
| 0
| 4
| 7
| 4
| 5
| 98
| 15
| 69
| 28
| 47
| 14
| 35
| 18
| 30
| 7
| 2
| 1
| 12
|
5,519
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swinv2/modeling_swinv2.py
|
transformers.models.swinv2.modeling_swinv2.Swinv2ModelOutput
|
from ...utils import ModelOutput, auto_docstring, logging, torch_int
from typing import Optional, Union
from dataclasses import dataclass
import torch
@dataclass
@auto_docstring(custom_intro="\n Swinv2 model's outputs that also contains a pooling of the last hidden states.\n ")
class Swinv2ModelOutput(ModelOutput):
"""
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed):
Average pooling of the last layer hidden-state.
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
pooler_output: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
reshaped_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
|
@dataclass
@auto_docstring(custom_intro="\n Swinv2 model's outputs that also contains a pooling of the last hidden states.\n ")
class Swinv2ModelOutput(ModelOutput):
'''
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed):
Average pooling of the last layer hidden-state.
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.67
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 33
| 5
| 6
| 6
| 5
| 22
| 6
| 6
| 5
| 0
| 1
| 0
| 0
|
5,520
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swinv2/modeling_swinv2.py
|
transformers.models.swinv2.modeling_swinv2.Swinv2Output
|
import torch
from torch import Tensor, nn
class Swinv2Output(nn.Module):
def __init__(self, config, dim):
super().__init__()
self.dense = nn.Linear(int(config.mlp_ratio * dim), dim)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
|
class Swinv2Output(nn.Module):
def __init__(self, config, dim):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 4
| 0
| 4
| 0
| 1
| 0
| 1
| 3
| 0
| 0
| 2
| 2
| 2
| 12
| 10
| 1
| 9
| 5
| 6
| 0
| 9
| 5
| 6
| 1
| 1
| 0
| 2
|
5,521
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swinv2/modeling_swinv2.py
|
transformers.models.swinv2.modeling_swinv2.Swinv2PatchEmbeddings
|
import torch
import collections.abc
from torch import Tensor, nn
from typing import Optional, Union
class Swinv2PatchEmbeddings(nn.Module):
"""
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
Transformer.
"""
def __init__(self, config):
super().__init__()
image_size, patch_size = (config.image_size, config.patch_size)
num_channels, hidden_size = (config.num_channels, config.embed_dim)
image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
num_patches = image_size[1] // patch_size[1] * (image_size[0] // patch_size[0])
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.num_patches = num_patches
self.grid_size = (image_size[0] // patch_size[0], image_size[1] // patch_size[1])
self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
def maybe_pad(self, pixel_values, height, width):
if width % self.patch_size[1] != 0:
pad_values = (0, self.patch_size[1] - width % self.patch_size[1])
pixel_values = nn.functional.pad(pixel_values, pad_values)
if height % self.patch_size[0] != 0:
pad_values = (0, 0, 0, self.patch_size[0] - height % self.patch_size[0])
pixel_values = nn.functional.pad(pixel_values, pad_values)
return pixel_values
def forward(self, pixel_values: Optional[torch.FloatTensor]) -> tuple[torch.Tensor, tuple[int]]:
_, num_channels, height, width = pixel_values.shape
pixel_values = self.maybe_pad(pixel_values, height, width)
embeddings = self.projection(pixel_values)
_, _, height, width = embeddings.shape
output_dimensions = (height, width)
embeddings = embeddings.flatten(2).transpose(1, 2)
return (embeddings, output_dimensions)
|
class Swinv2PatchEmbeddings(nn.Module):
'''
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
Transformer.
'''
def __init__(self, config):
pass
def maybe_pad(self, pixel_values, height, width):
pass
def forward(self, pixel_values: Optional[torch.FloatTensor]) -> tuple[torch.Tensor, tuple[int]]:
pass
| 4
| 1
| 11
| 1
| 10
| 0
| 2
| 0.2
| 1
| 4
| 0
| 0
| 3
| 6
| 3
| 13
| 41
| 5
| 30
| 17
| 26
| 6
| 30
| 17
| 26
| 3
| 1
| 1
| 7
|
5,522
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swinv2/modeling_swinv2.py
|
transformers.models.swinv2.modeling_swinv2.Swinv2PatchMerging
|
from torch import Tensor, nn
import torch
class Swinv2PatchMerging(nn.Module):
"""
Patch Merging Layer.
Args:
input_resolution (`tuple[int]`):
Resolution of input feature.
dim (`int`):
Number of input channels.
norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`):
Normalization layer class.
"""
def __init__(self, input_resolution: tuple[int], dim: int, norm_layer: nn.Module=nn.LayerNorm) -> None:
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(2 * dim)
def maybe_pad(self, input_feature, height, width):
should_pad = height % 2 == 1 or width % 2 == 1
if should_pad:
pad_values = (0, 0, 0, width % 2, 0, height % 2)
input_feature = nn.functional.pad(input_feature, pad_values)
return input_feature
def forward(self, input_feature: torch.Tensor, input_dimensions: tuple[int, int]) -> torch.Tensor:
height, width = input_dimensions
batch_size, dim, num_channels = input_feature.shape
input_feature = input_feature.view(batch_size, height, width, num_channels)
input_feature = self.maybe_pad(input_feature, height, width)
input_feature_0 = input_feature[:, 0::2, 0::2, :]
input_feature_1 = input_feature[:, 1::2, 0::2, :]
input_feature_2 = input_feature[:, 0::2, 1::2, :]
input_feature_3 = input_feature[:, 1::2, 1::2, :]
input_feature = torch.cat([input_feature_0, input_feature_1, input_feature_2, input_feature_3], -1)
input_feature = input_feature.view(batch_size, -1, 4 * num_channels)
input_feature = self.reduction(input_feature)
input_feature = self.norm(input_feature)
return input_feature
|
class Swinv2PatchMerging(nn.Module):
'''
Patch Merging Layer.
Args:
input_resolution (`tuple[int]`):
Resolution of input feature.
dim (`int`):
Number of input channels.
norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`):
Normalization layer class.
'''
def __init__(self, input_resolution: tuple[int], dim: int, norm_layer: nn.Module=nn.LayerNorm) -> None:
pass
def maybe_pad(self, input_feature, height, width):
pass
def forward(self, input_feature: torch.Tensor, input_dimensions: tuple[int, int]) -> torch.Tensor:
pass
| 4
| 1
| 12
| 1
| 9
| 3
| 1
| 0.67
| 1
| 3
| 0
| 0
| 3
| 4
| 3
| 13
| 52
| 8
| 27
| 16
| 23
| 18
| 27
| 16
| 23
| 2
| 1
| 1
| 4
|
5,523
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swinv2/modeling_swinv2.py
|
transformers.models.swinv2.modeling_swinv2.Swinv2PreTrainedModel
|
import math
from .configuration_swinv2 import Swinv2Config
from ...utils import ModelOutput, auto_docstring, logging, torch_int
from torch import Tensor, nn
from ...modeling_utils import PreTrainedModel
@auto_docstring
class Swinv2PreTrainedModel(PreTrainedModel):
config: Swinv2Config
base_model_prefix = 'swinv2'
main_input_name = 'pixel_values'
supports_gradient_checkpointing = True
_no_split_modules = ['Swinv2Stage']
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, Swinv2Embeddings):
if module.mask_token is not None:
module.mask_token.data.zero_()
if module.position_embeddings is not None:
module.position_embeddings.data.zero_()
elif isinstance(module, Swinv2SelfAttention):
module.logit_scale.data.fill_(math.log(10))
|
@auto_docstring
class Swinv2PreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
'''Initialize the weights'''
pass
| 3
| 1
| 11
| 0
| 8
| 3
| 4
| 0.5
| 1
| 0
| 0
| 4
| 1
| 0
| 1
| 1
| 23
| 2
| 14
| 7
| 12
| 7
| 13
| 7
| 11
| 4
| 1
| 2
| 4
|
5,524
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swinv2/modeling_swinv2.py
|
transformers.models.swinv2.modeling_swinv2.Swinv2SelfAttention
|
import torch
from torch import Tensor, nn
import collections.abc
from typing import Optional, Union
import math
from ...pytorch_utils import find_pruneable_heads_and_indices, meshgrid, prune_linear_layer
class Swinv2SelfAttention(nn.Module):
def __init__(self, config, dim, num_heads, window_size, pretrained_window_size=[0, 0]):
super().__init__()
if dim % num_heads != 0:
raise ValueError(f'The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})')
self.num_attention_heads = num_heads
self.attention_head_size = int(dim / num_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.window_size = window_size if isinstance(window_size, collections.abc.Iterable) else (window_size, window_size)
self.pretrained_window_size = pretrained_window_size
self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1))))
self.continuous_position_bias_mlp = nn.Sequential(nn.Linear(2, 512, bias=True), nn.ReLU(inplace=True), nn.Linear(512, num_heads, bias=False))
relative_coords_h = torch.arange(-(self.window_size[0] - 1), self.window_size[0], dtype=torch.int64).float()
relative_coords_w = torch.arange(-(self.window_size[1] - 1), self.window_size[1], dtype=torch.int64).float()
relative_coords_table = torch.stack(meshgrid([relative_coords_h, relative_coords_w], indexing='ij')).permute(1, 2, 0).contiguous().unsqueeze(0)
if pretrained_window_size[0] > 0:
relative_coords_table[:, :, :, 0] /= pretrained_window_size[0] - 1
relative_coords_table[:, :, :, 1] /= pretrained_window_size[1] - 1
elif window_size > 1:
relative_coords_table[:, :, :, 0] /= self.window_size[0] - 1
relative_coords_table[:, :, :, 1] /= self.window_size[1] - 1
relative_coords_table *= 8
relative_coords_table = torch.sign(relative_coords_table) * torch.log2(torch.abs(relative_coords_table) + 1.0) / math.log2(8)
relative_coords_table = relative_coords_table.to(next(self.continuous_position_bias_mlp.parameters()).dtype)
self.register_buffer('relative_coords_table', relative_coords_table, persistent=False)
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(meshgrid([coords_h, coords_w], indexing='ij'))
coords_flatten = torch.flatten(coords, 1)
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]
relative_coords = relative_coords.permute(1, 2, 0).contiguous()
relative_coords[:, :, 0] += self.window_size[0] - 1
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1)
self.register_buffer('relative_position_index', relative_position_index, persistent=False)
self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=False)
self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
batch_size, dim, num_channels = hidden_states.shape
query_layer = self.query(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
key_layer = self.key(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
value_layer = self.value(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
attention_scores = nn.functional.normalize(query_layer, dim=-1) @ nn.functional.normalize(key_layer, dim=-1).transpose(-2, -1)
logit_scale = torch.clamp(self.logit_scale, max=math.log(1.0 / 0.01)).exp()
attention_scores = attention_scores * logit_scale
relative_position_bias_table = self.continuous_position_bias_mlp(self.relative_coords_table).view(-1, self.num_attention_heads)
relative_position_bias = relative_position_bias_table[self.relative_position_index.view(-1)].view(self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1)
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous()
relative_position_bias = 16 * torch.sigmoid(relative_position_bias)
attention_scores = attention_scores + relative_position_bias.unsqueeze(0)
if attention_mask is not None:
mask_shape = attention_mask.shape[0]
attention_scores = attention_scores.view(batch_size // mask_shape, mask_shape, self.num_attention_heads, dim, dim) + attention_mask.unsqueeze(1).unsqueeze(0)
attention_scores = attention_scores + attention_mask.unsqueeze(1).unsqueeze(0)
attention_scores = attention_scores.view(-1, self.num_attention_heads, dim, dim)
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
attention_probs = self.dropout(attention_probs)
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
|
class Swinv2SelfAttention(nn.Module):
def __init__(self, config, dim, num_heads, window_size, pretrained_window_size=[0, 0]):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
pass
| 3
| 0
| 41
| 4
| 33
| 5
| 3
| 0.15
| 1
| 6
| 0
| 0
| 3
| 11
| 3
| 13
| 127
| 15
| 100
| 45
| 90
| 15
| 72
| 39
| 68
| 5
| 1
| 1
| 10
|
5,525
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swinv2/modeling_swinv2.py
|
transformers.models.swinv2.modeling_swinv2.Swinv2SelfOutput
|
from torch import Tensor, nn
import torch
class Swinv2SelfOutput(nn.Module):
def __init__(self, config, dim):
super().__init__()
self.dense = nn.Linear(dim, dim)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
|
class Swinv2SelfOutput(nn.Module):
def __init__(self, config, dim):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 1
| 4
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 2
| 2
| 12
| 11
| 2
| 9
| 5
| 6
| 0
| 9
| 5
| 6
| 1
| 1
| 0
| 2
|
5,526
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swinv2/modeling_swinv2.py
|
transformers.models.swinv2.modeling_swinv2.Swinv2Stage
|
import torch
from typing import Optional, Union
from torch import Tensor, nn
from ...modeling_layers import GradientCheckpointingLayer
class Swinv2Stage(GradientCheckpointingLayer):
def __init__(self, config, dim, input_resolution, depth, num_heads, drop_path, downsample, pretrained_window_size=0):
super().__init__()
self.config = config
self.dim = dim
blocks = []
for i in range(depth):
block = Swinv2Layer(config=config, dim=dim, input_resolution=input_resolution, num_heads=num_heads, drop_path_rate=drop_path[i], shift_size=0 if i % 2 == 0 else config.window_size // 2, pretrained_window_size=pretrained_window_size)
blocks.append(block)
self.blocks = nn.ModuleList(blocks)
if downsample is not None:
self.downsample = downsample(input_resolution, dim=dim, norm_layer=nn.LayerNorm)
else:
self.downsample = None
self.pointing = False
def forward(self, hidden_states: torch.Tensor, input_dimensions: tuple[int, int], head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
height, width = input_dimensions
for i, layer_module in enumerate(self.blocks):
layer_head_mask = head_mask[i] if head_mask is not None else None
layer_outputs = layer_module(hidden_states, input_dimensions, layer_head_mask, output_attentions)
hidden_states = layer_outputs[0]
hidden_states_before_downsampling = hidden_states
if self.downsample is not None:
height_downsampled, width_downsampled = ((height + 1) // 2, (width + 1) // 2)
output_dimensions = (height, width, height_downsampled, width_downsampled)
hidden_states = self.downsample(hidden_states_before_downsampling, input_dimensions)
else:
output_dimensions = (height, width, height, width)
stage_outputs = (hidden_states, hidden_states_before_downsampling, output_dimensions)
if output_attentions:
stage_outputs += layer_outputs[1:]
return stage_outputs
|
class Swinv2Stage(GradientCheckpointingLayer):
def __init__(self, config, dim, input_resolution, depth, num_heads, drop_path, downsample, pretrained_window_size=0):
pass
def forward(self, hidden_states: torch.Tensor, input_dimensions: tuple[int, int], head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
pass
| 3
| 0
| 30
| 4
| 26
| 1
| 5
| 0.02
| 1
| 7
| 1
| 0
| 2
| 5
| 2
| 12
| 62
| 8
| 53
| 27
| 42
| 1
| 30
| 19
| 27
| 5
| 1
| 1
| 9
|
5,527
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/switch_transformers/configuration_switch_transformers.py
|
transformers.models.switch_transformers.configuration_switch_transformers.SwitchTransformersConfig
|
from ...configuration_utils import PretrainedConfig
class SwitchTransformersConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`SwitchTransformersModel`]. It is used to
instantiate a SwitchTransformers model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the
SwitchTransformers [google/switch-base-8](https://huggingface.co/google/switch-base-8) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Arguments:
vocab_size (`int`, *optional*, defaults to 32128):
Vocabulary size of the SwitchTransformers model. Defines the number of different tokens that can be
represented by the `inputs_ids` passed when calling [`SwitchTransformersModel`].
d_model (`int`, *optional*, defaults to 768):
Size of the encoder layers and the pooler layer.
d_kv (`int`, *optional*, defaults to 64):
Size of the key, query, value projections per attention head. `d_kv` has to be equal to `d_model //
num_heads`.
d_ff (`int`, *optional*, defaults to 2048):
Size of the intermediate feed forward layer in each `SwitchTransformersBlock`.
expert_capacity (`int`, *optional*, defaults to 64):
Number of tokens that can be stored in each expert. If set to 1, the model will behave like a regular
Transformer.
num_layers (`int`, *optional*, defaults to 12):
Number of dense hidden layers in the Transformer encoder layer.
num_sparse_encoder_layers (`int`, *optional*, defaults to 3):
Number of sparse (MoE) dense hidden layers in the Transformer encoder layer.
num_decoder_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer decoder. Will use the same value as `num_layers` if not set.
num_sparse_decoder_layers (`int`, *optional*, defaults to 3):
Number of sparse (MoE) dense hidden layers in the Transformer decoder layer.
num_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
num_experts (`int`, *optional*, defaults to 8):
Number of experts for each SwitchTransformer layer.
router_bias (`bool`, *optional*, defaults to `False`):
Whether to add a bias to the router.
router_jitter_noise (`float`, *optional*, defaults to 0.01):
Amount of noise to add to the router.
router_dtype (`str`, *optional*, default to `"float32"`):
The `dtype` used for the routers. It is preferable to keep the `dtype` to `"float32"` as specified in the
*selective precision* discussion in [the paper](https://huggingface.co/papers/2101.03961).
router_ignore_padding_tokens (`bool`, *optional*, defaults to `False`):
Whether to ignore padding tokens when routing.
relative_attention_num_buckets (`int`, *optional*, defaults to 32):
The number of buckets to use for each attention layer.
relative_attention_max_distance (`int`, *optional*, defaults to 128):
The maximum distance of the longer sequences for the bucket separation.
dropout_rate (`float`, *optional*, defaults to 0.1):
The ratio for all dropout layers.
layer_norm_eps (`float`, *optional*, defaults to 1e-6):
The epsilon used by the layer normalization layers.
router_z_loss_coef (`float`, *optional*, defaults to 0.001):
The z loss factor for the total loss.
router_aux_loss_coef (`float`, *optional*, defaults to 0.001):
The aux loss factor for the total loss.
initializer_factor (`float`, *optional*, defaults to 1.0):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
dense_act_fn (`string`, *optional*, defaults to `"relu"`):
Type of feed forward layer to be used. Should be one of `"relu"` or `"gated-gelu"`. SwitchTransformersv1.1
uses the `"gated-gelu"` feed forward projection. Original SwitchTransformers uses `"relu"`.
add_router_probs (`bool`, *optional*, defaults to `False`):
Whether to output router probabilities to compute router auxiliary loss.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
"""
model_type = 'switch_transformers'
keys_to_ignore_at_inference = ['past_key_values']
attribute_map = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__(self, vocab_size=32128, d_model=768, d_kv=64, d_ff=2048, expert_capacity=64, num_layers=12, num_sparse_encoder_layers=3, num_decoder_layers=12, num_sparse_decoder_layers=3, num_heads=12, num_experts=8, router_bias=False, router_jitter_noise=0.01, router_dtype='float32', router_ignore_padding_tokens=False, relative_attention_num_buckets=32, relative_attention_max_distance=128, dropout_rate=0.1, layer_norm_epsilon=1e-06, router_z_loss_coef=0.001, router_aux_loss_coef=0.001, initializer_factor=1.0, dense_act_fn='relu', is_encoder_decoder=True, add_router_probs=False, use_cache=True, pad_token_id=0, eos_token_id=1, **kwargs):
self.vocab_size = vocab_size
self.d_model = d_model
self.d_kv = d_kv
self.d_ff = d_ff
self.num_sparse_encoder_layers = num_sparse_encoder_layers
self.num_layers = num_layers
self.num_decoder_layers = num_decoder_layers if num_decoder_layers is not None else self.num_layers
self.num_sparse_decoder_layers = num_sparse_decoder_layers
if self.num_sparse_encoder_layers > 0:
self.encoder_sparse_step = self.num_layers // self.num_sparse_encoder_layers
else:
self.encoder_sparse_step = self.num_layers
if self.num_sparse_decoder_layers > 0:
self.decoder_sparse_step = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
self.decoder_sparse_step = self.num_decoder_layers
self.num_heads = num_heads
self.num_experts = num_experts
self.expert_capacity = expert_capacity
self.router_bias = router_bias
self.router_jitter_noise = router_jitter_noise
if router_dtype not in ['float32', 'float16', 'bfloat16']:
raise ValueError(f"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}")
self.router_dtype = router_dtype
self.router_ignore_padding_tokens = router_ignore_padding_tokens
self.relative_attention_num_buckets = relative_attention_num_buckets
self.relative_attention_max_distance = relative_attention_max_distance
self.dropout_rate = dropout_rate
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_factor = initializer_factor
self.use_cache = use_cache
self.add_router_probs = add_router_probs
self.router_z_loss_coef = router_z_loss_coef
self.router_aux_loss_coef = router_aux_loss_coef
self.dense_act_fn = dense_act_fn
super().__init__(pad_token_id=pad_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, **kwargs)
|
class SwitchTransformersConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`SwitchTransformersModel`]. It is used to
instantiate a SwitchTransformers model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the
SwitchTransformers [google/switch-base-8](https://huggingface.co/google/switch-base-8) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Arguments:
vocab_size (`int`, *optional*, defaults to 32128):
Vocabulary size of the SwitchTransformers model. Defines the number of different tokens that can be
represented by the `inputs_ids` passed when calling [`SwitchTransformersModel`].
d_model (`int`, *optional*, defaults to 768):
Size of the encoder layers and the pooler layer.
d_kv (`int`, *optional*, defaults to 64):
Size of the key, query, value projections per attention head. `d_kv` has to be equal to `d_model //
num_heads`.
d_ff (`int`, *optional*, defaults to 2048):
Size of the intermediate feed forward layer in each `SwitchTransformersBlock`.
expert_capacity (`int`, *optional*, defaults to 64):
Number of tokens that can be stored in each expert. If set to 1, the model will behave like a regular
Transformer.
num_layers (`int`, *optional*, defaults to 12):
Number of dense hidden layers in the Transformer encoder layer.
num_sparse_encoder_layers (`int`, *optional*, defaults to 3):
Number of sparse (MoE) dense hidden layers in the Transformer encoder layer.
num_decoder_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer decoder. Will use the same value as `num_layers` if not set.
num_sparse_decoder_layers (`int`, *optional*, defaults to 3):
Number of sparse (MoE) dense hidden layers in the Transformer decoder layer.
num_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
num_experts (`int`, *optional*, defaults to 8):
Number of experts for each SwitchTransformer layer.
router_bias (`bool`, *optional*, defaults to `False`):
Whether to add a bias to the router.
router_jitter_noise (`float`, *optional*, defaults to 0.01):
Amount of noise to add to the router.
router_dtype (`str`, *optional*, default to `"float32"`):
The `dtype` used for the routers. It is preferable to keep the `dtype` to `"float32"` as specified in the
*selective precision* discussion in [the paper](https://huggingface.co/papers/2101.03961).
router_ignore_padding_tokens (`bool`, *optional*, defaults to `False`):
Whether to ignore padding tokens when routing.
relative_attention_num_buckets (`int`, *optional*, defaults to 32):
The number of buckets to use for each attention layer.
relative_attention_max_distance (`int`, *optional*, defaults to 128):
The maximum distance of the longer sequences for the bucket separation.
dropout_rate (`float`, *optional*, defaults to 0.1):
The ratio for all dropout layers.
layer_norm_eps (`float`, *optional*, defaults to 1e-6):
The epsilon used by the layer normalization layers.
router_z_loss_coef (`float`, *optional*, defaults to 0.001):
The z loss factor for the total loss.
router_aux_loss_coef (`float`, *optional*, defaults to 0.001):
The aux loss factor for the total loss.
initializer_factor (`float`, *optional*, defaults to 1.0):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
dense_act_fn (`string`, *optional*, defaults to `"relu"`):
Type of feed forward layer to be used. Should be one of `"relu"` or `"gated-gelu"`. SwitchTransformersv1.1
uses the `"gated-gelu"` feed forward projection. Original SwitchTransformers uses `"relu"`.
add_router_probs (`bool`, *optional*, defaults to `False`):
Whether to output router probabilities to compute router auxiliary loss.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
'''
def __init__(self, vocab_size=32128, d_model=768, d_kv=64, d_ff=2048, expert_capacity=64, num_layers=12, num_sparse_encoder_layers=3, num_decoder_layers=12, num_sparse_decoder_layers=3, num_heads=12, num_experts=8, router_bias=False, router_jitter_noise=0.01, router_dtype='float32', router_ignore_padding_tokens=False, relative_attention_num_buckets=32, relative_attention_max_distance=128, dropout_rate=0.1, layer_norm_epsilon=1e-06, router_z_loss_coef=0.001, router_aux_loss_coef=0.001, initializer_factor=1.0, dense_act_fn='relu', is_encoder_decoder=True, add_router_probs=False, use_cache=True, pad_token_id=0, eos_token_id=1, **kwargs):
pass
| 2
| 1
| 86
| 9
| 75
| 5
| 5
| 0.89
| 1
| 2
| 0
| 0
| 1
| 27
| 1
| 1
| 159
| 13
| 79
| 63
| 46
| 70
| 39
| 32
| 37
| 5
| 1
| 1
| 5
|
5,528
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/switch_transformers/modeling_switch_transformers.py
|
transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersAttention
|
from ...utils.deprecation import deprecate_kwarg
from .configuration_switch_transformers import SwitchTransformersConfig
import math
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
import torch.nn as nn
import torch
from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
from typing import Optional, Union
class SwitchTransformersAttention(nn.Module):
def __init__(self, config: SwitchTransformersConfig, has_relative_attention_bias=False, layer_idx: Optional[int]=None):
super().__init__()
self.is_decoder = config.is_decoder
self.has_relative_attention_bias = has_relative_attention_bias
self.relative_attention_num_buckets = config.relative_attention_num_buckets
self.relative_attention_max_distance = config.relative_attention_max_distance
self.d_model = config.d_model
self.key_value_proj_dim = config.d_kv
self.n_heads = config.num_heads
self.dropout = config.dropout_rate
self.inner_dim = self.n_heads * self.key_value_proj_dim
self.layer_idx = layer_idx
if layer_idx is None and self.is_decoder:
logger.warning_once(f'Instantiating a decoder {self.__class__.__name__} without passing `layer_idx` is not recommended and will to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` when creating this class.')
self.q = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.k = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.v = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.o = nn.Linear(self.inner_dim, self.d_model, bias=False)
if self.has_relative_attention_bias:
self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)
self.pruned_heads = set()
self.gradient_checkpointing = False
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads)
self.q = prune_linear_layer(self.q, index)
self.k = prune_linear_layer(self.k, index)
self.v = prune_linear_layer(self.v, index)
self.o = prune_linear_layer(self.o, index, dim=1)
self.n_heads = self.n_heads - len(heads)
self.inner_dim = self.key_value_proj_dim * self.n_heads
self.pruned_heads = self.pruned_heads.union(heads)
@staticmethod
def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
"""
Adapted from Mesh Tensorflow:
https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
Translate relative position to a bucket number for relative attention. The relative position is defined as
memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
This should allow for more graceful generalization to longer sequences than the model has been trained on
Args:
relative_position: an int32 Tensor
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
"""
relative_buckets = 0
if bidirectional:
num_buckets //= 2
relative_buckets += (relative_position > 0).to(torch.long) * num_buckets
relative_position = torch.abs(relative_position)
else:
relative_position = -torch.min(relative_position, torch.zeros_like(relative_position))
max_exact = num_buckets // 2
is_small = relative_position < max_exact
relative_position_if_large = max_exact + (torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)).to(torch.long)
relative_position_if_large = torch.min(relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1))
relative_buckets += torch.where(is_small, relative_position, relative_position_if_large)
return relative_buckets
def compute_bias(self, query_length, key_length, device=None, cache_position=None):
"""Compute binned relative position bias"""
if device is None:
device = self.relative_attention_bias.weight.device
if cache_position is None:
context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None]
else:
context_position = cache_position[:, None].to(device)
memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :]
relative_position = memory_position - context_position
relative_position_bucket = self._relative_position_bucket(relative_position, bidirectional=not self.is_decoder, num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance)
values = self.relative_attention_bias(relative_position_bucket)
values = values.permute([2, 0, 1]).unsqueeze(0)
return values
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states, mask=None, key_value_states=None, position_bias=None, past_key_values=None, layer_head_mask=None, query_length=None, use_cache=False, output_attentions=False, cache_position=None):
"""
Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states).
"""
batch_size, seq_length = hidden_states.shape[:2]
is_cross_attention = key_value_states is not None
query_states = self.q(hidden_states)
query_states = query_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
is_updated = False
if isinstance(past_key_values, EncoderDecoderCache):
is_updated = past_key_values.is_updated.get(self.layer_idx)
if is_cross_attention:
curr_past_key_value = past_key_values.cross_attention_cache
else:
curr_past_key_value = past_key_values.self_attention_cache
else:
curr_past_key_value = past_key_values
current_states = key_value_states if is_cross_attention else hidden_states
if is_cross_attention and past_key_values is not None and is_updated:
key_states = curr_past_key_value.layers[self.layer_idx].keys
value_states = curr_past_key_value.layers[self.layer_idx].values
else:
key_states = self.k(current_states)
value_states = self.v(current_states)
key_states = key_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
value_states = value_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
if past_key_values is not None:
cache_position = cache_position if not is_cross_attention else None
key_states, value_states = curr_past_key_value.update(key_states, value_states, self.layer_idx, {'cache_position': cache_position})
if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache):
past_key_values.is_updated[self.layer_idx] = True
scores = torch.matmul(query_states, key_states.transpose(3, 2))
if position_bias is None:
key_length = key_states.shape[-2]
real_seq_length = query_length if query_length is not None else cache_position[-1] + 1
if not self.has_relative_attention_bias:
position_bias = torch.zeros((1, self.n_heads, seq_length, key_length), device=scores.device, dtype=scores.dtype)
if self.gradient_checkpointing and self.training:
position_bias.requires_grad = True
else:
position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device, cache_position=cache_position)
position_bias = position_bias[:, :, -seq_length:, :]
if mask is not None:
causal_mask = mask[:, :, :, :key_states.shape[-2]]
position_bias = position_bias + causal_mask
if self.pruned_heads:
mask = torch.ones(position_bias.shape[1])
mask[list(self.pruned_heads)] = 0
position_bias_masked = position_bias[:, mask.bool()]
else:
position_bias_masked = position_bias
scores += position_bias_masked
attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores)
attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
if layer_head_mask is not None:
attn_weights = attn_weights * layer_head_mask
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
attn_output = attn_output.view(batch_size, -1, self.inner_dim)
attn_output = self.o(attn_output)
outputs = (attn_output, position_bias)
if output_attentions:
outputs = outputs + (attn_weights,)
return outputs
|
class SwitchTransformersAttention(nn.Module):
def __init__(self, config: SwitchTransformersConfig, has_relative_attention_bias=False, layer_idx: Optional[int]=None):
pass
def prune_heads(self, heads):
pass
@staticmethod
def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
'''
Adapted from Mesh Tensorflow:
https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
Translate relative position to a bucket number for relative attention. The relative position is defined as
memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
This should allow for more graceful generalization to longer sequences than the model has been trained on
Args:
relative_position: an int32 Tensor
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
'''
pass
def compute_bias(self, query_length, key_length, device=None, cache_position=None):
'''Compute binned relative position bias'''
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states, mask=None, key_value_states=None, position_bias=None, past_key_values=None, layer_head_mask=None, query_length=None, use_cache=False, output_attentions=False, cache_position=None):
'''
Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states).
'''
pass
| 8
| 3
| 44
| 5
| 32
| 8
| 5
| 0.26
| 1
| 5
| 1
| 0
| 4
| 17
| 5
| 15
| 226
| 28
| 160
| 67
| 136
| 42
| 113
| 49
| 107
| 16
| 1
| 3
| 26
|
5,529
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/switch_transformers/modeling_switch_transformers.py
|
transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersBlock
|
from ...modeling_layers import GradientCheckpointingLayer
import torch
from typing import Optional, Union
import torch.nn as nn
class SwitchTransformersBlock(GradientCheckpointingLayer):
def __init__(self, config, has_relative_attention_bias=False, is_sparse=False, layer_idx: Optional[int]=None):
super().__init__()
self.is_decoder = config.is_decoder
self.is_sparse = is_sparse
self.layer = nn.ModuleList()
self.layer.append(SwitchTransformersLayerSelfAttention(config, has_relative_attention_bias=has_relative_attention_bias, layer_idx=layer_idx))
if self.is_decoder:
self.layer.append(SwitchTransformersLayerCrossAttention(config, layer_idx=layer_idx))
self.layer.append(SwitchTransformersLayerFF(config, is_sparse=self.is_sparse))
def forward(self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, cross_attn_layer_head_mask=None, past_key_values=None, use_cache=False, output_attentions=False, output_router_logits=True, return_dict=True, cache_position=None):
self_attention_outputs = self.layer[0](hidden_states, attention_mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position)
hidden_states = self_attention_outputs[0]
attention_outputs = self_attention_outputs[1:]
if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
do_cross_attention = self.is_decoder and encoder_hidden_states is not None
if do_cross_attention:
cross_attention_outputs = self.layer[1](hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, position_bias=encoder_decoder_position_bias, layer_head_mask=cross_attn_layer_head_mask, past_key_values=past_key_values, query_length=cache_position[-1] + 1, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position)
hidden_states = cross_attention_outputs[0]
if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
attention_outputs = attention_outputs + cross_attention_outputs[1:]
hidden_states = self.layer[-1](hidden_states, output_router_logits)
if isinstance(hidden_states, tuple):
hidden_states, router_tuple = hidden_states
else:
router_tuple = (torch.zeros((1,), device=hidden_states.device, dtype=torch.int64),)
if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
return outputs + attention_outputs + (router_tuple,)
|
class SwitchTransformersBlock(GradientCheckpointingLayer):
def __init__(self, config, has_relative_attention_bias=False, is_sparse=False, layer_idx: Optional[int]=None):
pass
def forward(self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, cross_attn_layer_head_mask=None, past_key_values=None, use_cache=False, output_attentions=False, output_router_logits=True, return_dict=True, cache_position=None):
pass
| 3
| 0
| 47
| 6
| 39
| 4
| 5
| 0.09
| 1
| 6
| 3
| 0
| 2
| 3
| 2
| 12
| 96
| 12
| 79
| 29
| 60
| 7
| 37
| 13
| 34
| 7
| 1
| 2
| 9
|
5,530
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/switch_transformers/modeling_switch_transformers.py
|
transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersDenseActDense
|
from .configuration_switch_transformers import SwitchTransformersConfig
from ...activations import ACT2FN
import torch
import torch.nn as nn
class SwitchTransformersDenseActDense(nn.Module):
def __init__(self, config: SwitchTransformersConfig):
super().__init__()
self.wi = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
self.dropout = nn.Dropout(config.dropout_rate)
self.act = ACT2FN[config.dense_act_fn]
def forward(self, hidden_states):
hidden_states = self.wi(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.dropout(hidden_states)
if isinstance(self.wo.weight, torch.Tensor) and hidden_states.dtype != self.wo.weight.dtype and (self.wo.weight.dtype != torch.int8):
hidden_states = hidden_states.to(self.wo.weight.dtype)
hidden_states = self.wo(hidden_states)
return hidden_states
|
class SwitchTransformersDenseActDense(nn.Module):
def __init__(self, config: SwitchTransformersConfig):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 9
| 0
| 9
| 0
| 2
| 0
| 1
| 3
| 1
| 0
| 2
| 4
| 2
| 12
| 20
| 1
| 19
| 7
| 16
| 0
| 15
| 7
| 12
| 2
| 1
| 1
| 3
|
5,531
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/switch_transformers/modeling_switch_transformers.py
|
transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersEncoderModel
|
from .configuration_switch_transformers import SwitchTransformersConfig
import torch.nn as nn
import copy
from ...utils import DUMMY_INPUTS, DUMMY_MASK, auto_docstring, is_torch_flex_attn_available, is_torch_fx_proxy, is_torchdynamo_compiling, logging
import torch
from ...modeling_outputs import MoEModelOutput, MoEModelOutputWithPastAndCrossAttentions, Seq2SeqMoEModelOutput, Seq2SeqMoEOutput
from typing import Optional, Union
@auto_docstring(custom_intro="\n The bare SWITCH_TRANSFORMERS Model transformer outputting encoder's raw hidden-states without any specific head\n ")
class SwitchTransformersEncoderModel(SwitchTransformersPreTrainedModel):
_tied_weights_keys = ['encoder.embed_tokens.weight']
def __init__(self, config: SwitchTransformersConfig):
super().__init__(config)
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = copy.deepcopy(config)
encoder_config.use_cache = False
encoder_config.is_encoder_decoder = False
self.encoder = SwitchTransformersStack(encoder_config, self.shared)
self.post_init()
self.device_map = None
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
def _tie_weights(self):
if self.config.tie_word_embeddings:
self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared)
def get_encoder(self):
return self.encoder
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.block[layer].layer[0].SelfAttention.prune_heads(heads)
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_router_logits: Optional[bool]=True, return_dict: Optional[bool]=None) -> Union[tuple[torch.FloatTensor], MoEModelOutput]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. SWITCH_TRANSFORMERS is a model with relative position
embeddings so you should be able to pad the inputs on both the right and the left.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for detail.
To know more on how to prepare `input_ids` for pretraining take a look a [SWITCH_TRANSFORMERS
Training](./switch_transformers#training).
Example:
```python
>>> from transformers import AutoTokenizer, SwitchTransformersEncoderModel
>>> tokenizer = AutoTokenizer.from_pretrained("google/switch-base-8")
>>> model = SwitchTransformersEncoderModel.from_pretrained("google/switch-base-8")
>>> input_ids = tokenizer(
... "Studies have been shown that owning a dog is good for you", return_tensors="pt"
... ).input_ids # Batch size 1
>>> outputs = model(input_ids=input_ids)
>>> last_hidden_states = outputs.last_hidden_state
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_router_logits=output_router_logits, return_dict=return_dict)
return encoder_outputs
|
@auto_docstring(custom_intro="\n The bare SWITCH_TRANSFORMERS Model transformer outputting encoder's raw hidden-states without any specific head\n ")
class SwitchTransformersEncoderModel(SwitchTransformersPreTrainedModel):
def __init__(self, config: SwitchTransformersConfig):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, new_embeddings):
pass
def _tie_weights(self):
pass
def get_encoder(self):
pass
def _prune_heads(self, heads_to_prune):
'''
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
'''
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_router_logits: Optional[bool]=True, return_dict: Optional[bool]=None) -> Union[tuple[torch.FloatTensor], MoEModelOutput]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. SWITCH_TRANSFORMERS is a model with relative position
embeddings so you should be able to pad the inputs on both the right and the left.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for detail.
To know more on how to prepare `input_ids` for pretraining take a look a [SWITCH_TRANSFORMERS
Training](./switch_transformers#training).
Example:
```python
>>> from transformers import AutoTokenizer, SwitchTransformersEncoderModel
>>> tokenizer = AutoTokenizer.from_pretrained("google/switch-base-8")
>>> model = SwitchTransformersEncoderModel.from_pretrained("google/switch-base-8")
>>> input_ids = tokenizer(
... "Studies have been shown that owning a dog is good for you", return_tensors="pt"
... ).input_ids # Batch size 1
>>> outputs = model(input_ids=input_ids)
>>> last_hidden_states = outputs.last_hidden_state
```'''
pass
| 10
| 2
| 10
| 1
| 6
| 3
| 1
| 0.39
| 1
| 5
| 3
| 0
| 7
| 3
| 7
| 10
| 83
| 15
| 49
| 26
| 29
| 19
| 28
| 15
| 20
| 2
| 2
| 1
| 10
|
5,532
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/switch_transformers/modeling_switch_transformers.py
|
transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersForConditionalGeneration
|
import copy
import torch
from ...modeling_outputs import MoEModelOutput, MoEModelOutputWithPastAndCrossAttentions, Seq2SeqMoEModelOutput, Seq2SeqMoEOutput
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Optional, Union
from ...generation import GenerationMixin
import warnings
from ...utils import DUMMY_INPUTS, DUMMY_MASK, auto_docstring, is_torch_flex_attn_available, is_torch_fx_proxy, is_torchdynamo_compiling, logging
from torch.nn import CrossEntropyLoss
from .configuration_switch_transformers import SwitchTransformersConfig
import torch.nn as nn
@auto_docstring(custom_intro='\n SWITCH_TRANSFORMERS Model with a `language modeling` head on top.\n ')
class SwitchTransformersForConditionalGeneration(SwitchTransformersPreTrainedModel, GenerationMixin):
_tied_weights_keys = ['encoder.embed_tokens.weight', 'decoder.embed_tokens.weight', 'lm_head.weight']
def __init__(self, config: SwitchTransformersConfig):
super().__init__(config)
self.model_dim = config.d_model
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = copy.deepcopy(config)
encoder_config.is_decoder = False
encoder_config.use_cache = False
encoder_config.tie_encoder_decoder = False
self.encoder = SwitchTransformersStack(encoder_config, self.shared)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
decoder_config.tie_encoder_decoder = False
decoder_config.num_layers = config.num_decoder_layers
self.decoder = SwitchTransformersStack(decoder_config, self.shared)
self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)
self.router_z_loss_coef = config.router_z_loss_coef
self.router_aux_loss_coef = config.router_aux_loss_coef
self.post_init()
self.device_map = None
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
self.decoder.set_input_embeddings(new_embeddings)
def _tie_weights(self):
if self.config.tie_word_embeddings:
self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared)
self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared)
def get_encoder(self):
return self.encoder
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.FloatTensor]=None, decoder_head_mask: Optional[torch.FloatTensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.Tensor]]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_router_logits: Optional[bool]=True, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple[torch.FloatTensor], Seq2SeqMoEOutput]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. SWITCH_TRANSFORMERS is a model with relative position
embeddings so you should be able to pad the inputs on both the right and the left.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for detail.
[What are input IDs?](../glossary#input-ids)
To know more on how to prepare `input_ids` for pretraining take a look a [SWITCH_TRANSFORMERS
Training](./switch_transformers#training).
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
SWITCH_TRANSFORMERS uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
To know more on how to prepare `decoder_input_ids` for pretraining take a look at [SWITCH_TRANSFORMERS
Training](./switch_transformers#training).
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0,
1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in
`[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ...,
config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for
labels in `[0, ..., config.vocab_size]`
Examples:
```python
>>> from transformers import AutoTokenizer, SwitchTransformersForConditionalGeneration
>>> tokenizer = AutoTokenizer.from_pretrained("google/switch-base-8")
>>> model = SwitchTransformersForConditionalGeneration.from_pretrained("google/switch-base-8")
>>> # training
>>> input_ids = tokenizer("The <extra_id_0> walks in <extra_id_1> park", return_tensors="pt").input_ids
>>> labels = tokenizer("<extra_id_0> cute dog <extra_id_1> the <extra_id_2>", return_tensors="pt").input_ids
>>> outputs = model(input_ids=input_ids, labels=labels)
>>> loss = outputs.loss
>>> logits = outputs.logits
>>> # inference
>>> input_ids = tokenizer(
... "summarize: studies have shown that owning a dog is good for you", return_tensors="pt"
... ).input_ids # Batch size 1
>>> outputs = model.generate(input_ids)
>>> # . To, let’s say you have a dog. To summarize:
>>> # Since the model has been trained on MLM, this will output gibberish
```"""
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if head_mask is not None and decoder_head_mask is None:
if self.config.num_layers == self.config.num_decoder_layers:
warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning)
decoder_head_mask = head_mask
if encoder_outputs is None:
encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_router_logits=output_router_logits, return_dict=return_dict)
elif return_dict and (not isinstance(encoder_outputs, MoEModelOutput)):
encoder_outputs = MoEModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, router_probs=encoder_outputs[3] if len(encoder_outputs) > 3 else None)
hidden_states = encoder_outputs[0]
if labels is not None and decoder_input_ids is None and (decoder_inputs_embeds is None):
decoder_input_ids = self._shift_right(labels)
decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_router_logits=output_router_logits, return_dict=return_dict, cache_position=cache_position)
sequence_output = decoder_outputs[0]
if self.config.tie_word_embeddings:
sequence_output = sequence_output * self.model_dim ** (-0.5)
lm_logits = self.lm_head(sequence_output)
loss = None
encoder_z_loss = None
encoder_aux_loss = None
decoder_z_loss = None
decoder_aux_loss = None
if output_router_logits:
if self.encoder.config.encoder_sparse_step > 1:
encoder_router_logits, encoder_expert_indexes = self._unpack_router_logits(encoder_outputs[-1])
encoder_z_loss = router_z_loss_func(encoder_router_logits)
encoder_router_probs = nn.Softmax(dim=-1)(encoder_router_logits)
encoder_aux_loss = load_balancing_loss_func(encoder_router_probs, encoder_expert_indexes)
else:
encoder_z_loss = 0
encoder_aux_loss = 0
if self.decoder.config.decoder_sparse_step > 1:
decoder_router_logits, decoder_expert_indexes = self._unpack_router_logits(decoder_outputs[-1])
decoder_z_loss = router_z_loss_func(decoder_router_logits)
decoder_router_probs = nn.Softmax(dim=-1)(decoder_router_logits)
decoder_aux_loss = load_balancing_loss_func(decoder_router_probs, decoder_expert_indexes)
else:
decoder_z_loss = 0
decoder_aux_loss = 0
if labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-100)
labels = labels.to(lm_logits.device)
loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))
if output_router_logits:
z_loss = self.router_z_loss_coef * (encoder_z_loss + decoder_z_loss)
aux_loss = self.router_aux_loss_coef * (encoder_aux_loss + decoder_aux_loss)
loss = loss + z_loss + aux_loss
if not return_dict:
output = (lm_logits,)
if output_router_logits:
output += (encoder_z_loss, encoder_aux_loss, decoder_z_loss, decoder_aux_loss)
output += (*decoder_outputs[1:], *encoder_outputs)
return (loss,) + output if loss is not None else output
return Seq2SeqMoEOutput(loss=loss, logits=lm_logits, encoder_z_loss=encoder_z_loss, encoder_aux_loss=encoder_aux_loss, decoder_z_loss=decoder_z_loss, decoder_aux_loss=decoder_aux_loss, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, decoder_router_logits=decoder_outputs.router_probs, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, encoder_router_logits=encoder_outputs.router_probs)
def _unpack_router_logits(self, router_outputs):
total_router_logits = []
total_expert_indexes = []
for router_output in router_outputs:
if len(router_output[0].shape) > 1:
router_logits, expert_indexes = router_output
total_router_logits.append(router_logits)
total_expert_indexes.append(expert_indexes)
return (torch.cat(total_router_logits, dim=1), torch.cat(total_expert_indexes, dim=1))
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
return self._shift_right(labels)
|
@auto_docstring(custom_intro='\n SWITCH_TRANSFORMERS Model with a `language modeling` head on top.\n ')
class SwitchTransformersForConditionalGeneration(SwitchTransformersPreTrainedModel, GenerationMixin):
def __init__(self, config: SwitchTransformersConfig):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, new_embeddings):
pass
def _tie_weights(self):
pass
def get_encoder(self):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.FloatTensor]=None, decoder_head_mask: Optional[torch.FloatTensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.Tensor]]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_router_logits: Optional[bool]=True, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple[torch.FloatTensor], Seq2SeqMoEOutput]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. SWITCH_TRANSFORMERS is a model with relative position
embeddings so you should be able to pad the inputs on both the right and the left.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for detail.
[What are input IDs?](../glossary#input-ids)
To know more on how to prepare `input_ids` for pretraining take a look a [SWITCH_TRANSFORMERS
Training](./switch_transformers#training).
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
SWITCH_TRANSFORMERS uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
To know more on how to prepare `decoder_input_ids` for pretraining take a look at [SWITCH_TRANSFORMERS
Training](./switch_transformers#training).
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0,
1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in
`[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ...,
config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for
labels in `[0, ..., config.vocab_size]`
Examples:
```python
>>> from transformers import AutoTokenizer, SwitchTransformersForConditionalGeneration
>>> tokenizer = AutoTokenizer.from_pretrained("google/switch-base-8")
>>> model = SwitchTransformersForConditionalGeneration.from_pretrained("google/switch-base-8")
>>> # training
>>> input_ids = tokenizer("The <extra_id_0> walks in <extra_id_1> park", return_tensors="pt").input_ids
>>> labels = tokenizer("<extra_id_0> cute dog <extra_id_1> the <extra_id_2>", return_tensors="pt").input_ids
>>> outputs = model(input_ids=input_ids, labels=labels)
>>> loss = outputs.loss
>>> logits = outputs.logits
>>> # inference
>>> input_ids = tokenizer(
... "summarize: studies have shown that owning a dog is good for you", return_tensors="pt"
... ).input_ids # Batch size 1
>>> outputs = model.generate(input_ids)
>>> # . To, let’s say you have a dog. To summarize:
>>> # Since the model has been trained on MLM, this will output gibberish
```'''
pass
def _unpack_router_logits(self, router_outputs):
pass
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
pass
| 11
| 1
| 22
| 3
| 16
| 3
| 3
| 0.21
| 2
| 9
| 4
| 0
| 12
| 8
| 12
| 15
| 281
| 44
| 196
| 70
| 161
| 41
| 118
| 49
| 105
| 20
| 2
| 2
| 39
|
5,533
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/switch_transformers/modeling_switch_transformers.py
|
transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersLayerCrossAttention
|
from ...utils.deprecation import deprecate_kwarg
from typing import Optional, Union
import torch.nn as nn
class SwitchTransformersLayerCrossAttention(nn.Module):
def __init__(self, config, layer_idx: Optional[int]=None):
super().__init__()
self.EncDecAttention = SwitchTransformersAttention(config, has_relative_attention_bias=False, layer_idx=layer_idx)
self.layer_norm = SwitchTransformersLayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states, key_value_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_values=None, use_cache=False, query_length=None, output_attentions=False, cache_position=None):
normed_hidden_states = self.layer_norm(hidden_states)
attention_output = self.EncDecAttention(normed_hidden_states, mask=attention_mask, key_value_states=key_value_states, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_values=past_key_values, use_cache=use_cache, query_length=query_length, output_attentions=output_attentions, cache_position=cache_position)
layer_output = hidden_states + self.dropout(attention_output[0])
outputs = (layer_output,) + attention_output[1:]
return outputs
|
class SwitchTransformersLayerCrossAttention(nn.Module):
def __init__(self, config, layer_idx: Optional[int]=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states, key_value_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_values=None, use_cache=False, query_length=None, output_attentions=False, cache_position=None):
pass
| 4
| 0
| 18
| 0
| 18
| 1
| 1
| 0.03
| 1
| 4
| 2
| 0
| 2
| 3
| 2
| 12
| 38
| 1
| 37
| 22
| 22
| 1
| 12
| 10
| 9
| 1
| 1
| 0
| 2
|
5,534
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/switch_transformers/modeling_switch_transformers.py
|
transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersLayerFF
|
from .configuration_switch_transformers import SwitchTransformersConfig
import torch.nn as nn
class SwitchTransformersLayerFF(nn.Module):
"""
Switch Transformers Feed Forward layer module. This is a wrapper around the Mixture of Experts module.
Parameters:
config : ([`SwitchTransformersConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
is_sparse (`bool`):
Whether the MLP layer is a `Sparse` layer (contains a Mixture of Experts) or not
"""
def __init__(self, config: SwitchTransformersConfig, is_sparse=False):
super().__init__()
self.is_sparse = is_sparse
if not self.is_sparse:
self.mlp = SwitchTransformersDenseActDense(config)
else:
self.mlp = SwitchTransformersSparseMLP(config)
self.layer_norm = SwitchTransformersLayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(self, hidden_states, output_router_logits):
forwarded_states = self.layer_norm(hidden_states)
forwarded_states = self.mlp(forwarded_states)
if isinstance(forwarded_states, tuple):
forwarded_states, router_tuple = forwarded_states
else:
router_tuple = None
output = hidden_states + self.dropout(forwarded_states)
if output_router_logits and router_tuple is not None:
output = (output, router_tuple)
return output
|
class SwitchTransformersLayerFF(nn.Module):
'''
Switch Transformers Feed Forward layer module. This is a wrapper around the Mixture of Experts module.
Parameters:
config : ([`SwitchTransformersConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
is_sparse (`bool`):
Whether the MLP layer is a `Sparse` layer (contains a Mixture of Experts) or not
'''
def __init__(self, config: SwitchTransformersConfig, is_sparse=False):
pass
def forward(self, hidden_states, output_router_logits):
pass
| 3
| 1
| 14
| 3
| 10
| 1
| 3
| 0.48
| 1
| 6
| 4
| 0
| 2
| 4
| 2
| 12
| 40
| 9
| 21
| 10
| 18
| 10
| 19
| 10
| 16
| 3
| 1
| 1
| 5
|
5,535
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/switch_transformers/modeling_switch_transformers.py
|
transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersLayerNorm
|
import torch
import torch.nn as nn
class SwitchTransformersLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-06):
"""
Construct a layernorm module in the SwitchTransformers style. No bias and no subtraction of mean.
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
if self.weight.dtype in [torch.float16, torch.bfloat16]:
hidden_states = hidden_states.to(self.weight.dtype)
return self.weight * hidden_states
|
class SwitchTransformersLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-06):
'''
Construct a layernorm module in the SwitchTransformers style. No bias and no subtraction of mean.
'''
pass
def forward(self, hidden_states):
pass
| 3
| 1
| 11
| 2
| 5
| 4
| 2
| 0.73
| 1
| 1
| 0
| 0
| 2
| 2
| 2
| 12
| 23
| 4
| 11
| 6
| 8
| 8
| 11
| 6
| 8
| 2
| 1
| 1
| 3
|
5,536
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/switch_transformers/modeling_switch_transformers.py
|
transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersLayerSelfAttention
|
from ...utils.deprecation import deprecate_kwarg
from typing import Optional, Union
import torch.nn as nn
class SwitchTransformersLayerSelfAttention(nn.Module):
def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int]=None):
super().__init__()
self.SelfAttention = SwitchTransformersAttention(config, has_relative_attention_bias=has_relative_attention_bias, layer_idx=layer_idx)
self.layer_norm = SwitchTransformersLayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_values=None, use_cache=False, output_attentions=False, cache_position=None):
normed_hidden_states = self.layer_norm(hidden_states)
attention_output = self.SelfAttention(normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position)
hidden_states = hidden_states + self.dropout(attention_output[0])
outputs = (hidden_states,) + attention_output[1:]
return outputs
|
class SwitchTransformersLayerSelfAttention(nn.Module):
def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int]=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_values=None, use_cache=False, output_attentions=False, cache_position=None):
pass
| 4
| 0
| 16
| 0
| 16
| 1
| 1
| 0.03
| 1
| 4
| 2
| 0
| 2
| 3
| 2
| 12
| 34
| 1
| 33
| 19
| 20
| 1
| 12
| 9
| 9
| 1
| 1
| 0
| 2
|
5,537
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/switch_transformers/modeling_switch_transformers.py
|
transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersModel
|
import warnings
from .configuration_switch_transformers import SwitchTransformersConfig
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
import torch.nn as nn
import copy
from ...utils import DUMMY_INPUTS, DUMMY_MASK, auto_docstring, is_torch_flex_attn_available, is_torch_fx_proxy, is_torchdynamo_compiling, logging
import torch
from ...modeling_outputs import MoEModelOutput, MoEModelOutputWithPastAndCrossAttentions, Seq2SeqMoEModelOutput, Seq2SeqMoEOutput
from typing import Optional, Union
@auto_docstring
class SwitchTransformersModel(SwitchTransformersPreTrainedModel):
_tied_weights_keys = ['encoder.embed_tokens.weight', 'decoder.embed_tokens.weight']
def __init__(self, config: SwitchTransformersConfig):
super().__init__(config)
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = copy.deepcopy(config)
encoder_config.is_decoder = False
encoder_config.use_cache = False
encoder_config.tie_encoder_decoder = False
self.encoder = SwitchTransformersStack(encoder_config, self.shared)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
decoder_config.tie_encoder_decoder = False
self.decoder = SwitchTransformersStack(decoder_config, self.shared)
self.post_init()
self.device_map = None
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
self.decoder.set_input_embeddings(new_embeddings)
def _tie_weights(self):
if self.config.tie_word_embeddings:
self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared)
self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared)
def get_encoder(self):
return self.encoder
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.FloatTensor]=None, decoder_head_mask: Optional[torch.FloatTensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.Tensor]=None, decoder_inputs_embeds: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_router_logits: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple[torch.FloatTensor], Seq2SeqMoEModelOutput]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. SWITCH_TRANSFORMERS is a model with relative position
embeddings so you should be able to pad the inputs on both the right and the left.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for detail.
[What are input IDs?](../glossary#input-ids)
To know more on how to prepare `input_ids` for pretraining take a look a [SWITCH_TRANSFORMERS
Training](./switch_transformers#training).
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
SWITCH_TRANSFORMERS uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
To know more on how to prepare `decoder_input_ids` for pretraining take a look at [SWITCH_TRANSFORMERS
Training](./switch_transformers#training).
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0,
1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in
`[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
Example:
```python
>>> from transformers import AutoTokenizer, SwitchTransformersModel
>>> tokenizer = AutoTokenizer.from_pretrained("google/switch-base-8")
>>> model = SwitchTransformersModel.from_pretrained("google/switch-base-8")
>>> input_ids = tokenizer(
... "Studies have been shown that owning a dog is good for you", return_tensors="pt"
... ).input_ids # Batch size 1
>>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
>>> # preprocess: Prepend decoder_input_ids with start token which is pad token for SwitchTransformersModel.
>>> # This is not needed for torch's SwitchTransformersForConditionalGeneration as it does this internally using labels arg.
>>> decoder_input_ids = model._shift_right(decoder_input_ids)
>>> # forward pass
>>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
>>> last_hidden_states = outputs.last_hidden_state
```"""
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if head_mask is not None and decoder_head_mask is None:
if self.config.num_layers == self.config.num_decoder_layers:
warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning)
decoder_head_mask = head_mask
if output_router_logits and self.config.num_sparse_encoder_layers == 0 and (self.config.num_sparse_encoder_layers == 0):
raise ValueError('You asked to return `output_router_logits` but the transformer in dense, and does not contain any sparse MLP Layers. Set `output_router_logits = False` and restart')
if encoder_outputs is None:
encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_router_logits=output_router_logits, return_dict=return_dict)
elif return_dict and (not isinstance(encoder_outputs, MoEModelOutput)):
encoder_outputs = MoEModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, router_probs=encoder_outputs[3] if len(encoder_outputs) > 3 else None)
hidden_states = encoder_outputs[0]
decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_router_logits=output_router_logits, return_dict=return_dict, cache_position=cache_position)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqMoEModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, decoder_router_logits=decoder_outputs.router_probs, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, encoder_router_logits=encoder_outputs.router_probs)
|
@auto_docstring
class SwitchTransformersModel(SwitchTransformersPreTrainedModel):
def __init__(self, config: SwitchTransformersConfig):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, new_embeddings):
pass
def _tie_weights(self):
pass
def get_encoder(self):
pass
def _prune_heads(self, heads_to_prune):
'''
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
'''
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.FloatTensor]=None, decoder_head_mask: Optional[torch.FloatTensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.Tensor]=None, decoder_inputs_embeds: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_router_logits: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple[torch.FloatTensor], Seq2SeqMoEModelOutput]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. SWITCH_TRANSFORMERS is a model with relative position
embeddings so you should be able to pad the inputs on both the right and the left.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for detail.
[What are input IDs?](../glossary#input-ids)
To know more on how to prepare `input_ids` for pretraining take a look a [SWITCH_TRANSFORMERS
Training](./switch_transformers#training).
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
SWITCH_TRANSFORMERS uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
To know more on how to prepare `decoder_input_ids` for pretraining take a look at [SWITCH_TRANSFORMERS
Training](./switch_transformers#training).
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0,
1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in
`[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
Example:
```python
>>> from transformers import AutoTokenizer, SwitchTransformersModel
>>> tokenizer = AutoTokenizer.from_pretrained("google/switch-base-8")
>>> model = SwitchTransformersModel.from_pretrained("google/switch-base-8")
>>> input_ids = tokenizer(
... "Studies have been shown that owning a dog is good for you", return_tensors="pt"
... ).input_ids # Batch size 1
>>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
>>> # preprocess: Prepend decoder_input_ids with start token which is pad token for SwitchTransformersModel.
>>> # This is not needed for torch's SwitchTransformersForConditionalGeneration as it does this internally using labels arg.
>>> decoder_input_ids = model._shift_right(decoder_input_ids)
>>> # forward pass
>>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
>>> last_hidden_states = outputs.last_hidden_state
```'''
pass
| 10
| 2
| 20
| 2
| 14
| 3
| 3
| 0.23
| 1
| 9
| 4
| 0
| 8
| 4
| 8
| 11
| 170
| 24
| 119
| 39
| 89
| 27
| 50
| 19
| 41
| 12
| 2
| 2
| 21
|
5,538
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/switch_transformers/modeling_switch_transformers.py
|
transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersPreTrainedModel
|
from ...utils import DUMMY_INPUTS, DUMMY_MASK, auto_docstring, is_torch_flex_attn_available, is_torch_fx_proxy, is_torchdynamo_compiling, logging
from .configuration_switch_transformers import SwitchTransformersConfig
import torch.nn as nn
import torch
from ...modeling_utils import PreTrainedModel
@auto_docstring
class SwitchTransformersPreTrainedModel(PreTrainedModel):
config: SwitchTransformersConfig
base_model_prefix = 'switch_transformers'
supports_gradient_checkpointing = True
_can_compile_fullgraph = False
_no_split_modules = ['SwitchTransformersBlock']
@property
def dummy_inputs(self):
input_ids = torch.tensor(DUMMY_INPUTS)
input_mask = torch.tensor(DUMMY_MASK)
dummy_inputs = {'decoder_input_ids': input_ids, 'input_ids': input_ids, 'decoder_attention_mask': input_mask}
return dummy_inputs
def _init_weights(self, module):
"""Initialize the weights"""
factor = self.config.initializer_factor
if isinstance(module, SwitchTransformersLayerNorm):
module.weight.data.fill_(factor * 1.0)
elif isinstance(module, (SwitchTransformersModel, SwitchTransformersForConditionalGeneration, SwitchTransformersEncoderModel)):
module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0)
if hasattr(module, 'lm_head') and (not self.config.tie_word_embeddings):
module.lm_head.weight.data.normal_(mean=0.0, std=factor * 1.0)
elif isinstance(module, SwitchTransformersDenseActDense):
module.wi.weight.data.normal_(mean=0.0, std=factor * self.config.d_model ** (-0.5))
if hasattr(module.wi, 'bias') and module.wi.bias is not None:
module.wi.bias.data.zero_()
module.wo.weight.data.normal_(mean=0.0, std=factor * self.config.d_ff ** (-0.5))
if hasattr(module.wo, 'bias') and module.wo.bias is not None:
module.wo.bias.data.zero_()
elif isinstance(module, SwitchTransformersAttention):
d_model = self.config.d_model
key_value_proj_dim = self.config.d_kv
n_heads = self.config.num_heads
module.q.weight.data.normal_(mean=0.0, std=factor * (d_model * key_value_proj_dim) ** (-0.5))
module.k.weight.data.normal_(mean=0.0, std=factor * d_model ** (-0.5))
module.v.weight.data.normal_(mean=0.0, std=factor * d_model ** (-0.5))
module.o.weight.data.normal_(mean=0.0, std=factor * (n_heads * key_value_proj_dim) ** (-0.5))
if module.has_relative_attention_bias:
module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * d_model ** (-0.5))
elif isinstance(module, SwitchTransformersSparseMLP):
d_model = self.config.d_model
key_value_proj_dim = self.config.d_kv
n_heads = self.config.num_heads
module.router.classifier.weight.data.normal_(mean=0.0, std=factor * 1)
for idx in range(self.config.num_experts):
module.experts[f'expert_{idx}'].wi.weight.data.normal_(mean=0.0, std=factor * d_model ** (-0.5))
module.experts[f'expert_{idx}'].wo.weight.data.normal_(mean=0.0, std=factor * d_model ** (-0.5))
def _shift_right(self, input_ids):
decoder_start_token_id = self.config.decoder_start_token_id
pad_token_id = self.config.pad_token_id
if decoder_start_token_id is None:
raise ValueError('self.model.config.decoder_start_token_id has to be defined. In SwitchTransformers it is usually set to the pad_token_id. See SwitchTransformers docs for more information')
if is_torch_fx_proxy(input_ids):
shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id)
shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1)
else:
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
shifted_input_ids[..., 0] = decoder_start_token_id
if pad_token_id is None:
raise ValueError('self.model.config.pad_token_id has to be defined.')
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
return shifted_input_ids
|
@auto_docstring
class SwitchTransformersPreTrainedModel(PreTrainedModel):
@property
def dummy_inputs(self):
pass
def _init_weights(self, module):
'''Initialize the weights'''
pass
def _shift_right(self, input_ids):
pass
| 6
| 1
| 27
| 1
| 21
| 5
| 5
| 0.25
| 1
| 9
| 7
| 4
| 3
| 0
| 3
| 3
| 97
| 8
| 72
| 22
| 67
| 18
| 56
| 21
| 52
| 11
| 1
| 2
| 16
|
5,539
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/switch_transformers/modeling_switch_transformers.py
|
transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersSparseMLP
|
import torch.nn as nn
import torch
from .configuration_switch_transformers import SwitchTransformersConfig
class SwitchTransformersSparseMLP(nn.Module):
"""
Implementation of the Switch Transformers Sparse MLP module.
"""
def __init__(self, config: SwitchTransformersConfig, expert_class: nn.Module=SwitchTransformersDenseActDense):
super().__init__()
self.router = SwitchTransformersTop1Router(config)
self.experts = nn.ModuleDict()
for idx in range(config.num_experts):
self.experts[f'expert_{idx}'] = expert_class(config)
def forward(self, hidden_states):
"""
Hold on, this will be slightly tricky to understand In the correct order, a MoE layer does the following:
1- Gets the `router_mask` from the router. The shape of the mask is `(batch_size, sequence_length, num_expert)`
and corresponds to the argmax of the `router_probs`. The probabilities are needed in the computation of the
hidden states : they are broadcasted to the hidden states values (can be interpreted as a scaling factor).
2- Dispatch the tokens to its associated experts. We do a classic for loop over the experts and assign for each
expert the corresponding hidden states.
"""
router_mask, router_probs, router_logits = self.router(hidden_states)
expert_index = torch.argmax(router_mask, dim=-1)
next_states = torch.zeros(hidden_states.shape, device=hidden_states.device, dtype=hidden_states.dtype)
router_mask = router_mask.bool()
batch_size, seq_len, num_experts = router_mask.shape
idx_mask = router_mask.reshape(batch_size * seq_len, num_experts).sum(dim=0)
idx_mask = torch.nonzero(idx_mask, as_tuple=True)[0].tolist()
for idx in idx_mask:
next_states[router_mask[:, :, idx]] = getattr(self.experts, f'expert_{idx}')(hidden_states[router_mask[:, :, idx]])
hidden_states = router_probs * next_states
return (hidden_states, (router_logits, expert_index))
|
class SwitchTransformersSparseMLP(nn.Module):
'''
Implementation of the Switch Transformers Sparse MLP module.
'''
def __init__(self, config: SwitchTransformersConfig, expert_class: nn.Module=SwitchTransformersDenseActDense):
pass
def forward(self, hidden_states):
'''
Hold on, this will be slightly tricky to understand In the correct order, a MoE layer does the following:
1- Gets the `router_mask` from the router. The shape of the mask is `(batch_size, sequence_length, num_expert)`
and corresponds to the argmax of the `router_probs`. The probabilities are needed in the computation of the
hidden states : they are broadcasted to the hidden states values (can be interpreted as a scaling factor).
2- Dispatch the tokens to its associated experts. We do a classic for loop over the experts and assign for each
expert the corresponding hidden states.
'''
pass
| 3
| 2
| 22
| 4
| 11
| 7
| 2
| 0.74
| 1
| 5
| 3
| 0
| 2
| 2
| 2
| 12
| 49
| 10
| 23
| 12
| 20
| 17
| 19
| 12
| 16
| 2
| 1
| 1
| 4
|
5,540
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/switch_transformers/modeling_switch_transformers.py
|
transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersStack
|
from ...modeling_outputs import MoEModelOutput, MoEModelOutputWithPastAndCrossAttentions, Seq2SeqMoEModelOutput, Seq2SeqMoEOutput
import torch.nn as nn
import torch
from ...modeling_attn_mask_utils import AttentionMaskConverter
from typing import Optional, Union
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from ...utils import DUMMY_INPUTS, DUMMY_MASK, auto_docstring, is_torch_flex_attn_available, is_torch_fx_proxy, is_torchdynamo_compiling, logging
class SwitchTransformersStack(SwitchTransformersPreTrainedModel):
def __init__(self, config, embed_tokens=None):
super().__init__(config)
self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model)
if embed_tokens is not None:
self.embed_tokens.weight = embed_tokens.weight
self.is_decoder = config.is_decoder
sparse_step = config.decoder_sparse_step if self.is_decoder else config.encoder_sparse_step
config.num_layers = config.num_decoder_layers if self.is_decoder else config.num_layers
self.block = nn.ModuleList()
for i in range(config.num_layers):
is_sparse = i % sparse_step == 1 or sparse_step == 1 if sparse_step > 0 else False
self.block.append(SwitchTransformersBlock(config, has_relative_attention_bias=bool(i == 0), is_sparse=is_sparse, layer_idx=i))
self.final_layer_norm = SwitchTransformersLayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
self.post_init()
self.device_map = None
self.gradient_checkpointing = False
def set_input_embeddings(self, new_embeddings):
self.embed_tokens = new_embeddings
def forward(self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, output_router_logits=True, return_dict=None, cache_position=None):
use_cache = use_cache if use_cache is not None else self.config.use_cache
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
err_msg_prefix = 'decoder_' if self.is_decoder else ''
raise ValueError(f'You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time')
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
err_msg_prefix = 'decoder_' if self.is_decoder else ''
raise ValueError(f'You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds')
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...')
use_cache = False
if inputs_embeds is None:
if self.embed_tokens is None:
raise ValueError('You have to initialize the model with valid token embeddings')
inputs_embeds = self.embed_tokens(input_ids)
batch_size, seq_length = input_shape
if use_cache is True:
if not self.is_decoder:
raise ValueError(f'`use_cache` can only be set to `True` if {self} is used as a decoder')
if self.is_decoder:
if use_cache and past_key_values is None:
if self.config.is_encoder_decoder:
past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config))
else:
past_key_values = DynamicCache(config=self.config)
elif not self.is_decoder:
past_key_values = None
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
if cache_position is None:
cache_position = torch.arange(past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device)
if attention_mask is None and (not is_torchdynamo_compiling()):
mask_seq_length = past_key_values_length + seq_length
attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device)
if self.config.is_decoder:
causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, past_key_values.self_attention_cache if isinstance(past_key_values, EncoderDecoderCache) else past_key_values, output_attentions)
else:
causal_mask = attention_mask[:, None, None, :]
causal_mask = causal_mask.to(dtype=inputs_embeds.dtype)
causal_mask = (1.0 - causal_mask) * torch.finfo(inputs_embeds.dtype).min
if self.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
head_mask = self.get_head_mask(head_mask, self.config.num_layers)
cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers)
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
all_router_probs = () if output_router_logits else None
all_cross_attentions = () if output_attentions and self.is_decoder else None
position_bias = None
encoder_decoder_position_bias = None
hidden_states = self.dropout(inputs_embeds)
for i, layer_module in enumerate(self.block):
layer_head_mask = head_mask[i]
cross_attn_layer_head_mask = cross_attn_head_mask[i]
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(hidden_states, causal_mask, position_bias, encoder_hidden_states, encoder_extended_attention_mask, encoder_decoder_position_bias, layer_head_mask=layer_head_mask, cross_attn_layer_head_mask=cross_attn_layer_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_router_logits=output_router_logits, return_dict=return_dict, cache_position=cache_position)
router_probs = layer_outputs[-1]
layer_outputs = layer_outputs[:-1]
hidden_states = layer_outputs[0]
position_bias = layer_outputs[1]
if self.is_decoder and encoder_hidden_states is not None:
encoder_decoder_position_bias = layer_outputs[3 if output_attentions else 2]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[2],)
if self.is_decoder:
all_cross_attentions = all_cross_attentions + (layer_outputs[4],)
if output_router_logits:
all_router_probs = all_router_probs + (router_probs,)
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, past_key_values, all_hidden_states, all_attentions, all_cross_attentions, all_router_probs] if v is not None))
return MoEModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, router_probs=all_router_probs)
def _update_causal_mask(self, attention_mask: Union[torch.Tensor, 'BlockMask'], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool=False):
if self.config._attn_implementation == 'flash_attention_2':
if attention_mask is not None and (attention_mask == 0.0).any():
return attention_mask
return None
if self.config._attn_implementation == 'flex_attention':
if isinstance(attention_mask, torch.Tensor):
attention_mask = make_flex_block_causal_mask(attention_mask)
return attention_mask
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
using_compilable_cache = past_key_values.is_compileable if past_key_values is not None else False
if self.config._attn_implementation == 'sdpa' and (not using_compilable_cache) and (not output_attentions):
if AttentionMaskConverter._ignore_causal_mask_sdpa(attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training):
return None
dtype = input_tensor.dtype
sequence_length = input_tensor.shape[1]
if using_compilable_cache:
target_length = past_key_values.get_max_cache_shape()
else:
target_length = attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1
causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, cache_position=cache_position, batch_size=input_tensor.shape[0])
if self.config._attn_implementation == 'sdpa' and attention_mask is not None and (attention_mask.device.type in ['cuda', 'xpu', 'npu']) and (not output_attentions):
min_dtype = torch.finfo(dtype).min
causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
return causal_mask
@staticmethod
def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs):
"""
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
Args:
attention_mask (`torch.Tensor`):
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
`(batch_size, 1, query_length, key_value_length)`.
sequence_length (`int`):
The sequence length being processed.
target_length (`int`):
The target length: when generating with static cache, the mask should be as long as the static cache,
to account for the 0 padding, the part of the cache that is not filled yet.
dtype (`torch.dtype`):
The dtype to use for the 4D attention mask.
cache_position (`torch.Tensor`):
Indices depicting the position of the input sequence tokens in the sequence.
batch_size (`torch.Tensor`):
Batch size.
"""
if attention_mask is not None and attention_mask.dim() == 4:
causal_mask = attention_mask
else:
min_dtype = torch.finfo(dtype).min
causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device)
if sequence_length != 1:
causal_mask = torch.triu(causal_mask, diagonal=1)
causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1)
causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
if attention_mask is not None:
causal_mask = causal_mask.clone()
mask_length = attention_mask.shape[-1]
padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device)
padding_mask = padding_mask == 0
causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype)
return causal_mask
|
class SwitchTransformersStack(SwitchTransformersPreTrainedModel):
def __init__(self, config, embed_tokens=None):
pass
def set_input_embeddings(self, new_embeddings):
pass
def forward(self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, output_router_logits=True, return_dict=None, cache_position=None):
pass
def _update_causal_mask(self, attention_mask: Union[torch.Tensor, 'BlockMask'], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool=False):
pass
@staticmethod
def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs):
'''
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
Args:
attention_mask (`torch.Tensor`):
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
`(batch_size, 1, query_length, key_value_length)`.
sequence_length (`int`):
The sequence length being processed.
target_length (`int`):
The target length: when generating with static cache, the mask should be as long as the static cache,
to account for the 0 padding, the part of the cache that is not filled yet.
dtype (`torch.dtype`):
The dtype to use for the 4D attention mask.
cache_position (`torch.Tensor`):
Indices depicting the position of the input sequence tokens in the sequence.
batch_size (`torch.Tensor`):
Batch size.
'''
pass
| 7
| 1
| 63
| 7
| 49
| 8
| 11
| 0.16
| 1
| 16
| 8
| 0
| 5
| 7
| 6
| 9
| 388
| 46
| 296
| 86
| 256
| 48
| 155
| 53
| 148
| 46
| 2
| 3
| 67
|
5,541
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/switch_transformers/modeling_switch_transformers.py
|
transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersTop1Router
|
from .configuration_switch_transformers import SwitchTransformersConfig
import torch
import torch.nn as nn
class SwitchTransformersTop1Router(nn.Module):
"""
Router using tokens choose top-1 experts assignment.
This router uses the same mechanism as in Switch Transformer (https://huggingface.co/papers/2101.03961) and V-MoE
(https://huggingface.co/papers/2106.05974): tokens choose their top experts. Items are sorted by router_probs and then
routed to their choice of expert until the expert's expert_capacity is reached. **There is no guarantee that each
token is processed by an expert**, or that each expert receives at least one token.
"""
def __init__(self, config: SwitchTransformersConfig):
super().__init__()
self.num_experts = config.num_experts
self.expert_capacity = config.expert_capacity
self.classifier = nn.Linear(config.hidden_size, self.num_experts, bias=config.router_bias)
self.jitter_noise = config.router_jitter_noise
self.ignore_padding_tokens = config.router_ignore_padding_tokens
self.dtype = getattr(torch, config.router_dtype)
def _compute_router_probabilities(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
"""
Computes router probabilities from input hidden states.
Args:
hidden_states (`torch.Tensor`):
(batch_size, sequence_length, hidden_dim) from which router probabilities are computed.
Returns:
router_probabilities (`torch.Tensor`):
Tensor of shape (batch_size, sequence_length, num_experts) corresponding to the probabilities for each
token and expert. Used for routing tokens to experts.
router_logits (`torch.Tensor`):
Logits tensor of shape (batch_size, sequence_length, num_experts) corresponding to raw router logits.
This is used later for computing router z-loss.
"""
self.input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(self.dtype)
if self.training and self.jitter_noise > 0:
hidden_states *= torch.empty_like(hidden_states).uniform_(1.0 - self.jitter_noise, 1.0 + self.jitter_noise)
self._cast_classifier()
router_logits = self.classifier(hidden_states)
router_probabilities = nn.functional.softmax(router_logits, dim=-1, dtype=self.dtype).to(self.input_dtype)
return (router_probabilities, router_logits)
def _cast_classifier(self):
"""
`bitsandbytes` `Linear8bitLt` layers does not support manual casting Therefore we need to check if they are an
instance of the `Linear8bitLt` class by checking special attributes.
"""
if not (hasattr(self.classifier, 'SCB') or hasattr(self.classifier, 'CB')):
self.classifier = self.classifier.to(self.dtype)
def forward(self, hidden_states: torch.Tensor) -> tuple:
"""
Generic forward function for every Router class. Each Router expects to have the same input hidden states
(`hidden_states`) corresponding to the hidden states for each token, the `expert_capacity` corresponding to the
number of tokens the Router will send to each expert, some Routers can send up to few tokens to each expert.
Each Router works as the following: it expects the hidden states for each token, gets the `router_probs` and
`router_logits` from the `router_weights`. This will assign for each token, the raw probability to be assigned
to an expert. Then each Router class will have to define its own `_compute_routing_instructions`.
Args:
hidden_states (`torch.Tensor`) :
[num_groups, tokens_per_group, hidden_dim] inputs to send to experts.
Returns:
tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`] Tuple containing the expert index, the router probs
and the router logits. The router probabilities and logits are required to compute the loss.
"""
router_probs, router_logits = self._compute_router_probabilities(hidden_states)
expert_index = torch.argmax(router_probs, dim=-1)
expert_index = torch.nn.functional.one_hot(expert_index, num_classes=self.num_experts)
token_priority = torch.cumsum(expert_index, dim=-2)
expert_capacity_mask = token_priority <= self.expert_capacity
expert_index = expert_index * expert_capacity_mask
router_probs = torch.max(router_probs, dim=-1).values.unsqueeze(-1)
return (expert_index, router_probs, router_logits)
|
class SwitchTransformersTop1Router(nn.Module):
'''
Router using tokens choose top-1 experts assignment.
This router uses the same mechanism as in Switch Transformer (https://huggingface.co/papers/2101.03961) and V-MoE
(https://huggingface.co/papers/2106.05974): tokens choose their top experts. Items are sorted by router_probs and then
routed to their choice of expert until the expert's expert_capacity is reached. **There is no guarantee that each
token is processed by an expert**, or that each expert receives at least one token.
'''
def __init__(self, config: SwitchTransformersConfig):
pass
def _compute_router_probabilities(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
'''
Computes router probabilities from input hidden states.
Args:
hidden_states (`torch.Tensor`):
(batch_size, sequence_length, hidden_dim) from which router probabilities are computed.
Returns:
router_probabilities (`torch.Tensor`):
Tensor of shape (batch_size, sequence_length, num_experts) corresponding to the probabilities for each
token and expert. Used for routing tokens to experts.
router_logits (`torch.Tensor`):
Logits tensor of shape (batch_size, sequence_length, num_experts) corresponding to raw router logits.
This is used later for computing router z-loss.
'''
pass
def _cast_classifier(self):
'''
`bitsandbytes` `Linear8bitLt` layers does not support manual casting Therefore we need to check if they are an
instance of the `Linear8bitLt` class by checking special attributes.
'''
pass
def forward(self, hidden_states: torch.Tensor) -> tuple:
'''
Generic forward function for every Router class. Each Router expects to have the same input hidden states
(`hidden_states`) corresponding to the hidden states for each token, the `expert_capacity` corresponding to the
number of tokens the Router will send to each expert, some Routers can send up to few tokens to each expert.
Each Router works as the following: it expects the hidden states for each token, gets the `router_probs` and
`router_logits` from the `router_weights`. This will assign for each token, the raw probability to be assigned
to an expert. Then each Router class will have to define its own `_compute_routing_instructions`.
Args:
hidden_states (`torch.Tensor`) :
[num_groups, tokens_per_group, hidden_dim] inputs to send to experts.
Returns:
tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`] Tuple containing the expert index, the router probs
and the router logits. The router probabilities and logits are required to compute the loss.
'''
pass
| 5
| 4
| 19
| 2
| 7
| 10
| 2
| 1.53
| 1
| 3
| 1
| 0
| 4
| 7
| 4
| 14
| 91
| 15
| 30
| 18
| 25
| 46
| 30
| 18
| 25
| 2
| 1
| 1
| 6
|
5,542
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/t5/configuration_t5.py
|
transformers.models.t5.configuration_t5.T5Config
|
from ...configuration_utils import PretrainedConfig
class T5Config(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`T5Model`] or a [`TFT5Model`]. It is used to
instantiate a T5 model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the T5
[google-t5/t5-small](https://huggingface.co/google-t5/t5-small) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Arguments:
vocab_size (`int`, *optional*, defaults to 32128):
Vocabulary size of the T5 model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`T5Model`] or [`TFT5Model`].
d_model (`int`, *optional*, defaults to 512):
Size of the encoder layers and the pooler layer.
d_kv (`int`, *optional*, defaults to 64):
Size of the key, query, value projections per attention head. The `inner_dim` of the projection layer will
be defined as `num_heads * d_kv`.
d_ff (`int`, *optional*, defaults to 2048):
Size of the intermediate feed forward layer in each `T5Block`.
num_layers (`int`, *optional*, defaults to 6):
Number of hidden layers in the Transformer encoder.
num_decoder_layers (`int`, *optional*):
Number of hidden layers in the Transformer decoder. Will use the same value as `num_layers` if not set.
num_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer encoder.
relative_attention_num_buckets (`int`, *optional*, defaults to 32):
The number of buckets to use for each attention layer.
relative_attention_max_distance (`int`, *optional*, defaults to 128):
The maximum distance of the longer sequences for the bucket separation.
dropout_rate (`float`, *optional*, defaults to 0.1):
The ratio for all dropout layers.
classifier_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for classifier.
layer_norm_eps (`float`, *optional*, defaults to 1e-6):
The epsilon used by the layer normalization layers.
initializer_factor (`float`, *optional*, defaults to 1):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
feed_forward_proj (`string`, *optional*, defaults to `"relu"`):
Type of feed forward layer to be used. Should be one of `"relu"` or `"gated-gelu"`. T5v1.1 uses the
`"gated-gelu"` feed forward projection. Original T5 uses `"relu"`.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
"""
model_type = 't5'
keys_to_ignore_at_inference = ['past_key_values']
attribute_map = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', 'head_dim': 'd_kv'}
def __init__(self, vocab_size=32128, d_model=512, d_kv=64, d_ff=2048, num_layers=6, num_decoder_layers=None, num_heads=8, relative_attention_num_buckets=32, relative_attention_max_distance=128, dropout_rate=0.1, layer_norm_epsilon=1e-06, initializer_factor=1.0, feed_forward_proj='relu', is_encoder_decoder=True, use_cache=True, pad_token_id=0, eos_token_id=1, classifier_dropout=0.0, **kwargs):
self.vocab_size = vocab_size
self.d_model = d_model
self.d_kv = d_kv
self.d_ff = d_ff
self.num_layers = num_layers
self.num_decoder_layers = num_decoder_layers if num_decoder_layers is not None else self.num_layers
self.num_heads = num_heads
self.relative_attention_num_buckets = relative_attention_num_buckets
self.relative_attention_max_distance = relative_attention_max_distance
self.dropout_rate = dropout_rate
self.classifier_dropout = classifier_dropout
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_factor = initializer_factor
self.feed_forward_proj = feed_forward_proj
self.use_cache = use_cache
act_info = self.feed_forward_proj.split('-')
self.dense_act_fn = act_info[-1]
self.is_gated_act = act_info[0] == 'gated'
if len(act_info) > 1 and act_info[0] != 'gated' or len(act_info) > 2:
raise ValueError(f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer. Please make sure `feed_forward_proj` is of the format `gated-{{ACT_FN}}` or `{{ACT_FN}}`, e.g. 'gated-gelu' or 'relu'")
if feed_forward_proj == 'gated-gelu':
self.dense_act_fn = 'gelu_new'
super().__init__(pad_token_id=pad_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, **kwargs)
|
class T5Config(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`T5Model`] or a [`TFT5Model`]. It is used to
instantiate a T5 model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the T5
[google-t5/t5-small](https://huggingface.co/google-t5/t5-small) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Arguments:
vocab_size (`int`, *optional*, defaults to 32128):
Vocabulary size of the T5 model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`T5Model`] or [`TFT5Model`].
d_model (`int`, *optional*, defaults to 512):
Size of the encoder layers and the pooler layer.
d_kv (`int`, *optional*, defaults to 64):
Size of the key, query, value projections per attention head. The `inner_dim` of the projection layer will
be defined as `num_heads * d_kv`.
d_ff (`int`, *optional*, defaults to 2048):
Size of the intermediate feed forward layer in each `T5Block`.
num_layers (`int`, *optional*, defaults to 6):
Number of hidden layers in the Transformer encoder.
num_decoder_layers (`int`, *optional*):
Number of hidden layers in the Transformer decoder. Will use the same value as `num_layers` if not set.
num_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer encoder.
relative_attention_num_buckets (`int`, *optional*, defaults to 32):
The number of buckets to use for each attention layer.
relative_attention_max_distance (`int`, *optional*, defaults to 128):
The maximum distance of the longer sequences for the bucket separation.
dropout_rate (`float`, *optional*, defaults to 0.1):
The ratio for all dropout layers.
classifier_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for classifier.
layer_norm_eps (`float`, *optional*, defaults to 1e-6):
The epsilon used by the layer normalization layers.
initializer_factor (`float`, *optional*, defaults to 1):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
feed_forward_proj (`string`, *optional*, defaults to `"relu"`):
Type of feed forward layer to be used. Should be one of `"relu"` or `"gated-gelu"`. T5v1.1 uses the
`"gated-gelu"` feed forward projection. Original T5 uses `"relu"`.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
'''
def __init__(self, vocab_size=32128, d_model=512, d_kv=64, d_ff=2048, num_layers=6, num_decoder_layers=None, num_heads=8, relative_attention_num_buckets=32, relative_attention_max_distance=128, dropout_rate=0.1, layer_norm_epsilon=1e-06, initializer_factor=1.0, feed_forward_proj='relu', is_encoder_decoder=True, use_cache=True, pad_token_id=0, eos_token_id=1, classifier_dropout=0.0, **kwargs):
pass
| 2
| 1
| 61
| 4
| 56
| 2
| 4
| 0.69
| 1
| 2
| 0
| 0
| 1
| 17
| 1
| 1
| 117
| 8
| 65
| 44
| 42
| 45
| 28
| 23
| 26
| 4
| 1
| 1
| 4
|
5,543
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/t5/configuration_t5.py
|
transformers.models.t5.configuration_t5.T5OnnxConfig
|
from collections.abc import Mapping
from ...onnx import OnnxSeq2SeqConfigWithPast
class T5OnnxConfig(OnnxSeq2SeqConfigWithPast):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
common_inputs = {'input_ids': {0: 'batch', 1: 'encoder_sequence'}, 'attention_mask': {0: 'batch', 1: 'encoder_sequence'}}
if self.use_past:
common_inputs['attention_mask'][1] = 'past_encoder_sequence + sequence'
common_inputs['decoder_input_ids'] = {0: 'batch'}
common_inputs['decoder_attention_mask'] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
common_inputs['decoder_input_ids'] = {0: 'batch', 1: 'decoder_sequence'}
common_inputs['decoder_attention_mask'] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(common_inputs, direction='inputs')
return common_inputs
@property
def default_onnx_opset(self) -> int:
return 13
|
class T5OnnxConfig(OnnxSeq2SeqConfigWithPast):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
pass
@property
def default_onnx_opset(self) -> int:
pass
| 5
| 0
| 10
| 1
| 9
| 0
| 2
| 0
| 1
| 2
| 0
| 0
| 2
| 0
| 2
| 2
| 23
| 3
| 20
| 6
| 15
| 0
| 14
| 4
| 11
| 3
| 1
| 1
| 4
|
5,544
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/t5/modeling_t5.py
|
transformers.models.t5.modeling_t5.T5Attention
|
import torch
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from ...utils.deprecation import deprecate_kwarg
from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
from typing import Optional, Union
from .configuration_t5 import T5Config
import math
from torch import nn
class T5Attention(nn.Module):
def __init__(self, config: T5Config, has_relative_attention_bias=False, layer_idx: Optional[int]=None):
super().__init__()
self.is_decoder = config.is_decoder
self.has_relative_attention_bias = has_relative_attention_bias
self.relative_attention_num_buckets = config.relative_attention_num_buckets
self.relative_attention_max_distance = config.relative_attention_max_distance
self.d_model = config.d_model
self.key_value_proj_dim = config.d_kv
self.n_heads = config.num_heads
self.dropout = config.dropout_rate
self.inner_dim = self.n_heads * self.key_value_proj_dim
self.layer_idx = layer_idx
if layer_idx is None and self.is_decoder:
logger.warning_once(f'Instantiating a decoder {self.__class__.__name__} without passing `layer_idx` is not recommended and will to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` when creating this class.')
self.q = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.k = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.v = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.o = nn.Linear(self.inner_dim, self.d_model, bias=False)
if self.has_relative_attention_bias:
self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)
self.pruned_heads = set()
self.gradient_checkpointing = False
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads)
self.q = prune_linear_layer(self.q, index)
self.k = prune_linear_layer(self.k, index)
self.v = prune_linear_layer(self.v, index)
self.o = prune_linear_layer(self.o, index, dim=1)
self.n_heads = self.n_heads - len(heads)
self.inner_dim = self.key_value_proj_dim * self.n_heads
self.pruned_heads = self.pruned_heads.union(heads)
@staticmethod
def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
"""
Adapted from Mesh Tensorflow:
https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
Translate relative position to a bucket number for relative attention. The relative position is defined as
memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
This should allow for more graceful generalization to longer sequences than the model has been trained on
Args:
relative_position: an int32 Tensor
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
"""
relative_buckets = 0
if bidirectional:
num_buckets //= 2
relative_buckets += (relative_position > 0).to(torch.long) * num_buckets
relative_position = torch.abs(relative_position)
else:
relative_position = -torch.min(relative_position, torch.zeros_like(relative_position))
max_exact = num_buckets // 2
is_small = relative_position < max_exact
relative_position_if_large = max_exact + (torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)).to(torch.long)
relative_position_if_large = torch.min(relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1))
relative_buckets += torch.where(is_small, relative_position, relative_position_if_large)
return relative_buckets
def compute_bias(self, query_length, key_length, device=None, cache_position=None):
"""Compute binned relative position bias"""
if device is None:
device = self.relative_attention_bias.weight.device
if cache_position is None:
context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None]
else:
context_position = cache_position[:, None].to(device)
memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :]
relative_position = memory_position - context_position
relative_position_bucket = self._relative_position_bucket(relative_position, bidirectional=not self.is_decoder, num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance)
values = self.relative_attention_bias(relative_position_bucket)
values = values.permute([2, 0, 1]).unsqueeze(0)
return values
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states, mask=None, key_value_states=None, position_bias=None, past_key_values=None, layer_head_mask=None, query_length=None, use_cache=False, output_attentions=False, cache_position=None):
"""
Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states).
"""
batch_size, seq_length = hidden_states.shape[:2]
is_cross_attention = key_value_states is not None
query_states = self.q(hidden_states)
query_states = query_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
is_updated = False
if isinstance(past_key_values, EncoderDecoderCache):
is_updated = past_key_values.is_updated.get(self.layer_idx)
if is_cross_attention:
curr_past_key_value = past_key_values.cross_attention_cache
else:
curr_past_key_value = past_key_values.self_attention_cache
else:
curr_past_key_value = past_key_values
current_states = key_value_states if is_cross_attention else hidden_states
if is_cross_attention and past_key_values is not None and is_updated:
key_states = curr_past_key_value.layers[self.layer_idx].keys
value_states = curr_past_key_value.layers[self.layer_idx].values
else:
key_states = self.k(current_states)
value_states = self.v(current_states)
key_states = key_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
value_states = value_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
if past_key_values is not None:
cache_position = cache_position if not is_cross_attention else None
key_states, value_states = curr_past_key_value.update(key_states, value_states, self.layer_idx, {'cache_position': cache_position})
if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache):
past_key_values.is_updated[self.layer_idx] = True
scores = torch.matmul(query_states, key_states.transpose(3, 2))
if position_bias is None:
key_length = key_states.shape[-2]
real_seq_length = query_length if query_length is not None else cache_position[-1] + 1
if not self.has_relative_attention_bias:
position_bias = torch.zeros((1, self.n_heads, seq_length, key_length), device=scores.device, dtype=scores.dtype)
if self.gradient_checkpointing and self.training:
position_bias.requires_grad = True
else:
position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device, cache_position=cache_position)
position_bias = position_bias[:, :, -seq_length:, :]
if mask is not None:
causal_mask = mask[:, :, :, :key_states.shape[-2]]
position_bias = position_bias + causal_mask
if self.pruned_heads:
mask = torch.ones(position_bias.shape[1])
mask[list(self.pruned_heads)] = 0
position_bias_masked = position_bias[:, mask.bool()]
else:
position_bias_masked = position_bias
scores += position_bias_masked
attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores)
attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
if layer_head_mask is not None:
attn_weights = attn_weights * layer_head_mask
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
attn_output = attn_output.view(batch_size, -1, self.inner_dim)
attn_output = self.o(attn_output)
outputs = (attn_output, position_bias)
if output_attentions:
outputs = outputs + (attn_weights,)
return outputs
|
class T5Attention(nn.Module):
def __init__(self, config: T5Config, has_relative_attention_bias=False, layer_idx: Optional[int]=None):
pass
def prune_heads(self, heads):
pass
@staticmethod
def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
'''
Adapted from Mesh Tensorflow:
https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
Translate relative position to a bucket number for relative attention. The relative position is defined as
memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
This should allow for more graceful generalization to longer sequences than the model has been trained on
Args:
relative_position: an int32 Tensor
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
'''
pass
def compute_bias(self, query_length, key_length, device=None, cache_position=None):
'''Compute binned relative position bias'''
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states, mask=None, key_value_states=None, position_bias=None, past_key_values=None, layer_head_mask=None, query_length=None, use_cache=False, output_attentions=False, cache_position=None):
'''
Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states).
'''
pass
| 8
| 3
| 44
| 5
| 32
| 8
| 5
| 0.26
| 1
| 5
| 1
| 0
| 4
| 17
| 5
| 15
| 226
| 28
| 160
| 67
| 136
| 42
| 113
| 49
| 107
| 16
| 1
| 3
| 26
|
5,545
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/t5/modeling_t5.py
|
transformers.models.t5.modeling_t5.T5Block
|
import torch
from ...utils.deprecation import deprecate_kwarg
from typing import Optional, Union
from ...modeling_layers import GradientCheckpointingLayer
from torch import nn
class T5Block(GradientCheckpointingLayer):
def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int]=None):
super().__init__()
self.is_decoder = config.is_decoder
self.layer = nn.ModuleList()
self.layer.append(T5LayerSelfAttention(config, has_relative_attention_bias=has_relative_attention_bias, layer_idx=layer_idx))
if self.is_decoder:
self.layer.append(T5LayerCrossAttention(config, layer_idx=layer_idx))
self.layer.append(T5LayerFF(config))
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, cross_attn_layer_head_mask=None, past_key_values=None, use_cache=False, output_attentions=False, return_dict=True, cache_position=None):
self_attention_outputs = self.layer[0](hidden_states, attention_mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position)
hidden_states = self_attention_outputs[0]
attention_outputs = self_attention_outputs[1:]
if hidden_states.dtype == torch.float16:
clamp_value = torch.where(torch.isinf(hidden_states).any(), torch.finfo(hidden_states.dtype).max - 1000, torch.finfo(hidden_states.dtype).max)
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
do_cross_attention = self.is_decoder and encoder_hidden_states is not None
if do_cross_attention:
cross_attention_outputs = self.layer[1](hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, position_bias=encoder_decoder_position_bias, layer_head_mask=cross_attn_layer_head_mask, past_key_values=past_key_values, query_length=cache_position[-1] + 1, use_cache=use_cache, output_attentions=output_attentions)
hidden_states = cross_attention_outputs[0]
if hidden_states.dtype == torch.float16:
clamp_value = torch.where(torch.isinf(hidden_states).any(), torch.finfo(hidden_states.dtype).max - 1000, torch.finfo(hidden_states.dtype).max)
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
attention_outputs = attention_outputs + cross_attention_outputs[1:]
hidden_states = self.layer[-1](hidden_states)
if hidden_states.dtype == torch.float16:
clamp_value = torch.where(torch.isinf(hidden_states).any(), torch.finfo(hidden_states.dtype).max - 1000, torch.finfo(hidden_states.dtype).max)
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
return outputs + attention_outputs
|
class T5Block(GradientCheckpointingLayer):
def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int]=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, cross_attn_layer_head_mask=None, past_key_values=None, use_cache=False, output_attentions=False, return_dict=True, cache_position=None):
pass
| 4
| 0
| 48
| 5
| 41
| 4
| 4
| 0.09
| 1
| 5
| 3
| 0
| 2
| 2
| 2
| 12
| 98
| 11
| 82
| 26
| 64
| 7
| 33
| 11
| 30
| 6
| 1
| 2
| 8
|
5,546
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/t5/modeling_t5.py
|
transformers.models.t5.modeling_t5.T5ClassificationHead
|
import torch
from torch import nn
from .configuration_t5 import T5Config
class T5ClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config: T5Config):
super().__init__()
self.dense = nn.Linear(config.d_model, config.d_model)
self.dropout = nn.Dropout(p=config.classifier_dropout)
self.out_proj = nn.Linear(config.d_model, config.num_labels)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dropout(hidden_states)
hidden_states = self.dense(hidden_states)
hidden_states = torch.tanh(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.out_proj(hidden_states)
return hidden_states
|
class T5ClassificationHead(nn.Module):
'''Head for sentence-level classification tasks.'''
def __init__(self, config: T5Config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 1
| 6
| 0
| 6
| 0
| 1
| 0.08
| 1
| 3
| 1
| 0
| 2
| 3
| 2
| 12
| 16
| 2
| 13
| 6
| 10
| 1
| 13
| 6
| 10
| 1
| 1
| 0
| 2
|
5,547
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/t5/modeling_t5.py
|
transformers.models.t5.modeling_t5.T5DenseActDense
|
import torch
from torch import nn
from .configuration_t5 import T5Config
from ...activations import ACT2FN
class T5DenseActDense(nn.Module):
def __init__(self, config: T5Config):
super().__init__()
self.wi = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
self.dropout = nn.Dropout(config.dropout_rate)
self.act = ACT2FN[config.dense_act_fn]
def forward(self, hidden_states):
hidden_states = self.wi(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.dropout(hidden_states)
if isinstance(self.wo.weight, torch.Tensor) and hidden_states.dtype != self.wo.weight.dtype and (self.wo.weight.dtype != torch.int8):
hidden_states = hidden_states.to(self.wo.weight.dtype)
hidden_states = self.wo(hidden_states)
return hidden_states
|
class T5DenseActDense(nn.Module):
def __init__(self, config: T5Config):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 9
| 0
| 9
| 0
| 2
| 0
| 1
| 3
| 1
| 0
| 2
| 4
| 2
| 12
| 20
| 1
| 19
| 7
| 16
| 0
| 15
| 7
| 12
| 2
| 1
| 1
| 3
|
5,548
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/t5/modeling_t5.py
|
transformers.models.t5.modeling_t5.T5DenseGatedActDense
|
import torch
from torch import nn
from .configuration_t5 import T5Config
from ...activations import ACT2FN
class T5DenseGatedActDense(nn.Module):
def __init__(self, config: T5Config):
super().__init__()
self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
self.dropout = nn.Dropout(config.dropout_rate)
self.act = ACT2FN[config.dense_act_fn]
def forward(self, hidden_states):
hidden_gelu = self.act(self.wi_0(hidden_states))
hidden_linear = self.wi_1(hidden_states)
hidden_states = hidden_gelu * hidden_linear
hidden_states = self.dropout(hidden_states)
if isinstance(self.wo.weight, torch.Tensor) and hidden_states.dtype != self.wo.weight.dtype and (self.wo.weight.dtype != torch.int8):
hidden_states = hidden_states.to(self.wo.weight.dtype)
hidden_states = self.wo(hidden_states)
return hidden_states
|
class T5DenseGatedActDense(nn.Module):
def __init__(self, config: T5Config):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 13
| 1
| 10
| 2
| 2
| 0.14
| 1
| 3
| 1
| 0
| 2
| 5
| 2
| 12
| 27
| 3
| 21
| 10
| 18
| 3
| 17
| 10
| 14
| 2
| 1
| 1
| 3
|
5,549
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/t5/modeling_t5.py
|
transformers.models.t5.modeling_t5.T5EncoderModel
|
import torch
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqQuestionAnsweringModelOutput, Seq2SeqSequenceClassifierOutput, TokenClassifierOutput
from typing import Optional, Union
import warnings
from .configuration_t5 import T5Config
from torch import nn
from ...utils.model_parallel_utils import assert_device_map, get_device_map
from ...utils import DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, auto_docstring, is_torch_flex_attn_available, is_torch_fx_proxy, is_torchdynamo_compiling, logging
@auto_docstring
class T5EncoderModel(T5PreTrainedModel):
_tied_weights_keys = ['encoder.embed_tokens.weight']
_keys_to_ignore_on_load_unexpected = ['decoder']
def __init__(self, config: T5Config):
super().__init__(config)
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = config
encoder_config.use_cache = False
encoder_config.is_encoder_decoder = False
self.encoder = T5Stack(encoder_config, self.shared)
self.post_init()
self.model_parallel = False
self.device_map = None
@add_start_docstrings(PARALLELIZE_DOCSTRING)
def parallelize(self, device_map=None):
warnings.warn("`T5EncoderModel.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own `device_map` but it needs to be a dictionary module_name to device, so for instance {'block.0': 0, 'block.1': 1, ...}", FutureWarning)
self.device_map = get_device_map(len(self.encoder.block), range(torch.cuda.device_count())) if device_map is None else device_map
assert_device_map(self.device_map, len(self.encoder.block))
self.encoder.parallelize(self.device_map)
self.model_parallel = True
@add_start_docstrings(DEPARALLELIZE_DOCSTRING)
def deparallelize(self):
warnings.warn('Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.', FutureWarning)
self.encoder.deparallelize()
self.encoder = self.encoder.to('cpu')
self.model_parallel = False
self.device_map = None
torch.cuda.empty_cache()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
def _tie_weights(self):
if self.config.tie_word_embeddings:
self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared)
def get_encoder(self):
return self.encoder
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.block[layer].layer[0].SelfAttention.prune_heads(heads)
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.FloatTensor], BaseModelOutput]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you
should be able to pad the inputs on both the right and the left.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for detail.
To know more on how to prepare `input_ids` for pretraining take a look a [T5 Training](./t5#training).
Example:
```python
>>> from transformers import AutoTokenizer, T5EncoderModel
>>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-small")
>>> model = T5EncoderModel.from_pretrained("google-t5/t5-small")
>>> input_ids = tokenizer(
... "Studies have been shown that owning a dog is good for you", return_tensors="pt"
... ).input_ids # Batch size 1
>>> outputs = model(input_ids=input_ids)
>>> last_hidden_states = outputs.last_hidden_state
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
return encoder_outputs
|
@auto_docstring
class T5EncoderModel(T5PreTrainedModel):
def __init__(self, config: T5Config):
pass
@add_start_docstrings(PARALLELIZE_DOCSTRING)
def parallelize(self, device_map=None):
pass
@add_start_docstrings(DEPARALLELIZE_DOCSTRING)
def deparallelize(self):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, new_embeddings):
pass
def _tie_weights(self):
pass
def get_encoder(self):
pass
def _prune_heads(self, heads_to_prune):
'''
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
'''
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.FloatTensor], BaseModelOutput]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you
should be able to pad the inputs on both the right and the left.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for detail.
To know more on how to prepare `input_ids` for pretraining take a look a [T5 Training](./t5#training).
Example:
```python
>>> from transformers import AutoTokenizer, T5EncoderModel
>>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-small")
>>> model = T5EncoderModel.from_pretrained("google-t5/t5-small")
>>> input_ids = tokenizer(
... "Studies have been shown that owning a dog is good for you", return_tensors="pt"
... ).input_ids # Batch size 1
>>> outputs = model(input_ids=input_ids)
>>> last_hidden_states = outputs.last_hidden_state
```'''
pass
| 14
| 2
| 11
| 1
| 8
| 2
| 1
| 0.25
| 1
| 7
| 3
| 0
| 9
| 4
| 9
| 12
| 113
| 17
| 77
| 31
| 54
| 19
| 43
| 19
| 33
| 2
| 2
| 1
| 13
|
5,550
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/t5/modeling_t5.py
|
transformers.models.t5.modeling_t5.T5ForConditionalGeneration
|
import torch
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqQuestionAnsweringModelOutput, Seq2SeqSequenceClassifierOutput, TokenClassifierOutput
from typing import Optional, Union
import warnings
from .configuration_t5 import T5Config
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...generation import GenerationMixin
from torch import nn
import copy
from ...utils.model_parallel_utils import assert_device_map, get_device_map
from ...utils import DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, auto_docstring, is_torch_flex_attn_available, is_torch_fx_proxy, is_torchdynamo_compiling, logging
@auto_docstring(custom_intro='\n T5 Model with a `language modeling` head on top.\n ')
class T5ForConditionalGeneration(T5PreTrainedModel, GenerationMixin):
_keys_to_ignore_on_load_unexpected = ['decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight']
_tied_weights_keys = ['encoder.embed_tokens.weight', 'decoder.embed_tokens.weight', 'lm_head.weight']
def __init__(self, config: T5Config):
super().__init__(config)
self.model_dim = config.d_model
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = copy.deepcopy(config)
encoder_config.is_decoder = False
encoder_config.use_cache = False
encoder_config.tie_encoder_decoder = False
self.encoder = T5Stack(encoder_config, self.shared)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
decoder_config.tie_encoder_decoder = False
decoder_config.num_layers = config.num_decoder_layers
self.decoder = T5Stack(decoder_config, self.shared)
self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)
self.post_init()
self.model_parallel = False
self.device_map = None
@add_start_docstrings(PARALLELIZE_DOCSTRING)
def parallelize(self, device_map=None):
warnings.warn("`T5ForConditionalGeneration.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own `device_map` but it needs to be a dictionary module_name to device, so for instance {'encoder.block.0': 0, 'encoder.block.1': 1, ...}", FutureWarning)
self.device_map = get_device_map(len(self.encoder.block), range(torch.cuda.device_count())) if device_map is None else device_map
assert_device_map(self.device_map, len(self.encoder.block))
self.encoder.parallelize(self.device_map)
self.decoder.parallelize(self.device_map)
self.lm_head = self.lm_head.to(self.decoder.first_device)
self.model_parallel = True
@add_start_docstrings(DEPARALLELIZE_DOCSTRING)
def deparallelize(self):
warnings.warn('Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.', FutureWarning)
self.encoder.deparallelize()
self.decoder.deparallelize()
self.encoder = self.encoder.to('cpu')
self.decoder = self.decoder.to('cpu')
self.lm_head = self.lm_head.to('cpu')
self.model_parallel = False
self.device_map = None
torch.cuda.empty_cache()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
self.decoder.set_input_embeddings(new_embeddings)
def _tie_weights(self):
if self.config.tie_word_embeddings:
self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared)
self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared)
def get_encoder(self):
return self.encoder
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.FloatTensor]=None, decoder_head_mask: Optional[torch.FloatTensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.Tensor]]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple[torch.FloatTensor], Seq2SeqLMOutput]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you
should be able to pad the inputs on both the right and the left.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for detail.
[What are input IDs?](../glossary#input-ids)
To know more on how to prepare `input_ids` for pretraining take a look a [T5 Training](./t5#training).
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
T5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`
is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).
To know more on how to prepare `decoder_input_ids` for pretraining take a look at [T5
Training](./t5#training).
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0,
1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in
`[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ...,
config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for
labels in `[0, ..., config.vocab_size]`
Examples:
```python
>>> from transformers import AutoTokenizer, T5ForConditionalGeneration
>>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-small")
>>> model = T5ForConditionalGeneration.from_pretrained("google-t5/t5-small")
>>> # training
>>> input_ids = tokenizer("The <extra_id_0> walks in <extra_id_1> park", return_tensors="pt").input_ids
>>> labels = tokenizer("<extra_id_0> cute dog <extra_id_1> the <extra_id_2>", return_tensors="pt").input_ids
>>> outputs = model(input_ids=input_ids, labels=labels)
>>> loss = outputs.loss
>>> logits = outputs.logits
>>> # inference
>>> input_ids = tokenizer(
... "summarize: studies have shown that owning a dog is good for you", return_tensors="pt"
... ).input_ids # Batch size 1
>>> outputs = model.generate(input_ids)
>>> print(tokenizer.decode(outputs[0], skip_special_tokens=True))
>>> # studies have shown that owning a dog is good for you.
```"""
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if head_mask is not None and decoder_head_mask is None:
if self.config.num_layers == self.config.num_decoder_layers:
warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning)
decoder_head_mask = head_mask
if encoder_outputs is None:
encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)):
encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None)
hidden_states = encoder_outputs[0]
if self.model_parallel:
torch.cuda.set_device(self.decoder.first_device)
if labels is not None and decoder_input_ids is None and (decoder_inputs_embeds is None):
decoder_input_ids = self._shift_right(labels)
if self.model_parallel:
torch.cuda.set_device(self.decoder.first_device)
hidden_states = hidden_states.to(self.decoder.first_device)
if decoder_input_ids is not None:
decoder_input_ids = decoder_input_ids.to(self.decoder.first_device)
if attention_mask is not None:
attention_mask = attention_mask.to(self.decoder.first_device)
if decoder_attention_mask is not None:
decoder_attention_mask = decoder_attention_mask.to(self.decoder.first_device)
decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position)
sequence_output = decoder_outputs[0]
if self.model_parallel:
torch.cuda.set_device(self.encoder.first_device)
self.lm_head = self.lm_head.to(self.encoder.first_device)
sequence_output = sequence_output.to(self.lm_head.weight.device)
if self.config.tie_word_embeddings:
sequence_output = sequence_output * self.model_dim ** (-0.5)
lm_logits = self.lm_head(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-100)
labels = labels.to(lm_logits.device)
loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))
if not return_dict:
output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs
return (loss,) + output if loss is not None else output
return Seq2SeqLMOutput(loss=loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions)
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
return self._shift_right(labels)
|
@auto_docstring(custom_intro='\n T5 Model with a `language modeling` head on top.\n ')
class T5ForConditionalGeneration(T5PreTrainedModel, GenerationMixin):
def __init__(self, config: T5Config):
pass
@add_start_docstrings(PARALLELIZE_DOCSTRING)
def parallelize(self, device_map=None):
pass
@add_start_docstrings(DEPARALLELIZE_DOCSTRING)
def deparallelize(self):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, new_embeddings):
pass
def _tie_weights(self):
pass
def get_encoder(self):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.FloatTensor]=None, decoder_head_mask: Optional[torch.FloatTensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.Tensor]]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple[torch.FloatTensor], Seq2SeqLMOutput]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you
should be able to pad the inputs on both the right and the left.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for detail.
[What are input IDs?](../glossary#input-ids)
To know more on how to prepare `input_ids` for pretraining take a look a [T5 Training](./t5#training).
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
T5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`
is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).
To know more on how to prepare `decoder_input_ids` for pretraining take a look at [T5
Training](./t5#training).
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0,
1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in
`[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ...,
config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for
labels in `[0, ..., config.vocab_size]`
Examples:
```python
>>> from transformers import AutoTokenizer, T5ForConditionalGeneration
>>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-small")
>>> model = T5ForConditionalGeneration.from_pretrained("google-t5/t5-small")
>>> # training
>>> input_ids = tokenizer("The <extra_id_0> walks in <extra_id_1> park", return_tensors="pt").input_ids
>>> labels = tokenizer("<extra_id_0> cute dog <extra_id_1> the <extra_id_2>", return_tensors="pt").input_ids
>>> outputs = model(input_ids=input_ids, labels=labels)
>>> loss = outputs.loss
>>> logits = outputs.logits
>>> # inference
>>> input_ids = tokenizer(
... "summarize: studies have shown that owning a dog is good for you", return_tensors="pt"
... ).input_ids # Batch size 1
>>> outputs = model.generate(input_ids)
>>> print(tokenizer.decode(outputs[0], skip_special_tokens=True))
>>> # studies have shown that owning a dog is good for you.
```'''
pass
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
pass
| 14
| 1
| 20
| 2
| 14
| 3
| 3
| 0.22
| 2
| 10
| 4
| 0
| 13
| 7
| 13
| 16
| 282
| 42
| 197
| 58
| 160
| 43
| 116
| 36
| 102
| 20
| 2
| 2
| 39
|
5,551
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/t5/modeling_t5.py
|
transformers.models.t5.modeling_t5.T5ForQuestionAnswering
|
import copy
from ...utils import DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, auto_docstring, is_torch_flex_attn_available, is_torch_fx_proxy, is_torchdynamo_compiling, logging
import torch
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqQuestionAnsweringModelOutput, Seq2SeqSequenceClassifierOutput, TokenClassifierOutput
from typing import Optional, Union
import warnings
from .configuration_t5 import T5Config
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from torch import nn
@auto_docstring
class T5ForQuestionAnswering(T5PreTrainedModel):
_keys_to_ignore_on_load_unexpected = ['decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight']
_tied_weights_keys = ['encoder.embed_tokens.weight', 'decoder.embed_tokens.weight']
def __init__(self, config: T5Config):
super().__init__(config)
self.model_dim = config.d_model
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = copy.deepcopy(config)
encoder_config.is_decoder = False
encoder_config.use_cache = False
encoder_config.tie_encoder_decoder = False
self.encoder = T5Stack(encoder_config, self.shared)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
decoder_config.tie_encoder_decoder = False
decoder_config.num_layers = config.num_decoder_layers
self.decoder = T5Stack(decoder_config, self.shared)
self.num_labels = config.num_labels
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
self.model_parallel = False
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
self.decoder.set_input_embeddings(new_embeddings)
def _tie_weights(self):
if self.config.tie_word_embeddings:
self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared)
self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared)
def get_encoder(self):
return self.encoder
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.FloatTensor]=None, decoder_head_mask: Optional[torch.FloatTensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.Tensor]]]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.FloatTensor], Seq2SeqQuestionAnsweringModelOutput]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you
should be able to pad the inputs on both the right and the left.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for detail.
[What are input IDs?](../glossary#input-ids)
To know more on how to prepare `input_ids` for pretraining take a look a [T5 Training](./t5#training).
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
T5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`
is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).
To know more on how to prepare `decoder_input_ids` for pretraining take a look at [T5
Training](./t5#training).
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0,
1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in
`[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
use_cache = use_cache if use_cache is not None else self.config.use_cache
if start_positions is not None and end_positions is not None:
use_cache = False
if decoder_input_ids is None and decoder_inputs_embeds is None:
if input_ids is None:
raise ValueError('If no `decoder_input_ids` or `decoder_inputs_embeds` are passed, `input_ids` cannot be `None`. Please pass either `input_ids` or `decoder_input_ids` or `decoder_inputs_embeds`.')
decoder_input_ids = self._shift_right(input_ids)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if head_mask is not None and decoder_head_mask is None:
if self.config.num_layers == self.config.num_decoder_layers:
warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning)
decoder_head_mask = head_mask
if encoder_outputs is None:
encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)):
encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None)
hidden_states = encoder_outputs[0]
decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=None, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = decoder_outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1).to(start_logits.device)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1).to(end_logits.device)
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + decoder_outputs[1:] + encoder_outputs
return (total_loss,) + output if total_loss is not None else output
return Seq2SeqQuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions)
|
@auto_docstring
class T5ForQuestionAnswering(T5PreTrainedModel):
def __init__(self, config: T5Config):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, new_embeddings):
pass
def _tie_weights(self):
pass
def get_encoder(self):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.FloatTensor]=None, decoder_head_mask: Optional[torch.FloatTensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.Tensor]]]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.FloatTensor], Seq2SeqQuestionAnsweringModelOutput]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you
should be able to pad the inputs on both the right and the left.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for detail.
[What are input IDs?](../glossary#input-ids)
To know more on how to prepare `input_ids` for pretraining take a look a [T5 Training](./t5#training).
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
T5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`
is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).
To know more on how to prepare `decoder_input_ids` for pretraining take a look at [T5
Training](./t5#training).
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0,
1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in
`[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
'''
pass
| 9
| 1
| 24
| 3
| 19
| 3
| 4
| 0.14
| 1
| 9
| 4
| 0
| 7
| 7
| 7
| 10
| 183
| 25
| 138
| 49
| 110
| 20
| 77
| 30
| 69
| 19
| 2
| 2
| 26
|
5,552
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/t5/modeling_t5.py
|
transformers.models.t5.modeling_t5.T5ForSequenceClassification
|
import torch
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqQuestionAnsweringModelOutput, Seq2SeqSequenceClassifierOutput, TokenClassifierOutput
from typing import Optional, Union
from .configuration_t5 import T5Config
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...utils import DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, auto_docstring, is_torch_flex_attn_available, is_torch_fx_proxy, is_torchdynamo_compiling, logging
@auto_docstring(custom_intro='\n T5 model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE\n tasks.\n ')
class T5ForSequenceClassification(T5PreTrainedModel):
_keys_to_ignore_on_load_unexpected = ['decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight']
_tied_weights_keys = ['encoder.embed_tokens.weight', 'decoder.embed_tokens.weight']
def __init__(self, config: T5Config):
super().__init__(config)
self.transformer = T5Model(config)
self.classification_head = T5ClassificationHead(config)
self.post_init()
self.model_parallel = False
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[list[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, Seq2SeqSequenceClassifierOutput]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you
should be able to pad the inputs on both the right and the left.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for detail.
[What are input IDs?](../glossary#input-ids)
To know more on how to prepare `input_ids` for pretraining take a look a [T5 Training](./t5#training).
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
T5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`
is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).
To know more on how to prepare `decoder_input_ids` for pretraining take a look at [T5
Training](./t5#training).
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0,
1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in
`[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
if input_ids is None and inputs_embeds is not None:
raise NotImplementedError(f'Passing input embeddings is currently not supported for {self.__class__.__name__}')
if decoder_input_ids is None and decoder_inputs_embeds is None:
if input_ids is None:
raise ValueError('If no `decoder_input_ids` or `decoder_inputs_embeds` are passed, `input_ids` cannot be `None`. Please pass either `input_ids` or `decoder_input_ids` or `decoder_inputs_embeds`.')
decoder_input_ids = self._shift_right(input_ids)
outputs = self.transformer(input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = outputs[0]
eos_mask = input_ids.eq(self.config.eos_token_id).to(sequence_output.device)
if len(torch.unique_consecutive(eos_mask.sum(1))) > 1:
raise ValueError('All examples must have the same number of <eos> tokens.')
batch_size, _, hidden_size = sequence_output.shape
sentence_representation = sequence_output[eos_mask, :].view(batch_size, -1, hidden_size)[:, -1, :]
logits = self.classification_head(sentence_representation)
loss = None
if labels is not None:
labels = labels.to(logits.device)
if self.config.problem_type is None:
if self.config.num_labels == 1:
self.config.problem_type = 'regression'
elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = 'single_label_classification'
else:
self.config.problem_type = 'multi_label_classification'
if self.config.problem_type == 'regression':
loss_fct = MSELoss()
if self.config.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == 'single_label_classification':
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
elif self.config.problem_type == 'multi_label_classification':
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return Seq2SeqSequenceClassifierOutput(loss=loss, logits=logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions)
|
@auto_docstring(custom_intro='\n T5 model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE\n tasks.\n ')
class T5ForSequenceClassification(T5PreTrainedModel):
def __init__(self, config: T5Config):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[list[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, Seq2SeqSequenceClassifierOutput]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you
should be able to pad the inputs on both the right and the left.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for detail.
[What are input IDs?](../glossary#input-ids)
To know more on how to prepare `input_ids` for pretraining take a look a [T5 Training](./t5#training).
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
T5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`
is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).
To know more on how to prepare `decoder_input_ids` for pretraining take a look at [T5
Training](./t5#training).
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0,
1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in
`[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
'''
pass
| 5
| 1
| 59
| 5
| 49
| 5
| 9
| 0.09
| 1
| 10
| 4
| 0
| 2
| 3
| 2
| 5
| 124
| 12
| 103
| 35
| 81
| 9
| 48
| 17
| 45
| 17
| 2
| 3
| 18
|
5,553
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/t5/modeling_t5.py
|
transformers.models.t5.modeling_t5.T5ForTokenClassification
|
import torch
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqQuestionAnsweringModelOutput, Seq2SeqSequenceClassifierOutput, TokenClassifierOutput
from typing import Optional, Union
from .configuration_t5 import T5Config
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from torch import nn
from ...utils import DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, auto_docstring, is_torch_flex_attn_available, is_torch_fx_proxy, is_torchdynamo_compiling, logging
@auto_docstring
class T5ForTokenClassification(T5PreTrainedModel):
_tied_weights_keys = ['transformer.encoder.embed_tokens.weight']
def __init__(self, config: T5Config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = T5EncoderModel(config)
self.dropout = nn.Dropout(config.classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], TokenClassifierOutput]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you
should be able to pad the inputs on both the right and the left.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for detail.
[What are input IDs?](../glossary#input-ids)
To know more on how to prepare `input_ids` for pretraining take a look a [T5 Training](./t5#training).
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.transformer(input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
hidden_states = outputs[0]
hidden_states = self.dropout(hidden_states)
logits = self.classifier(hidden_states)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits, outputs[2:-1])
return (loss,) + output if loss is not None else output
return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class T5ForTokenClassification(T5PreTrainedModel):
def __init__(self, config: T5Config):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], TokenClassifierOutput]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you
should be able to pad the inputs on both the right and the left.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for detail.
[What are input IDs?](../glossary#input-ids)
To know more on how to prepare `input_ids` for pretraining take a look a [T5 Training](./t5#training).
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
'''
pass
| 5
| 1
| 29
| 4
| 22
| 3
| 3
| 0.13
| 1
| 6
| 3
| 0
| 2
| 4
| 2
| 5
| 63
| 9
| 48
| 25
| 33
| 6
| 23
| 14
| 20
| 5
| 2
| 1
| 6
|
5,554
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/t5/modeling_t5.py
|
transformers.models.t5.modeling_t5.T5LayerCrossAttention
|
from typing import Optional, Union
from torch import nn
from ...utils.deprecation import deprecate_kwarg
class T5LayerCrossAttention(nn.Module):
def __init__(self, config, layer_idx: Optional[int]=None):
super().__init__()
self.EncDecAttention = T5Attention(config, has_relative_attention_bias=False, layer_idx=layer_idx)
self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states, key_value_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_values=None, use_cache=False, query_length=None, output_attentions=False, cache_position=None):
normed_hidden_states = self.layer_norm(hidden_states)
attention_output = self.EncDecAttention(normed_hidden_states, mask=attention_mask, key_value_states=key_value_states, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_values=past_key_values, use_cache=use_cache, query_length=query_length, output_attentions=output_attentions, cache_position=cache_position)
layer_output = hidden_states + self.dropout(attention_output[0])
outputs = (layer_output,) + attention_output[1:]
return outputs
|
class T5LayerCrossAttention(nn.Module):
def __init__(self, config, layer_idx: Optional[int]=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states, key_value_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_values=None, use_cache=False, query_length=None, output_attentions=False, cache_position=None):
pass
| 4
| 0
| 17
| 0
| 17
| 1
| 1
| 0.03
| 1
| 4
| 2
| 0
| 2
| 3
| 2
| 12
| 36
| 1
| 35
| 22
| 20
| 1
| 12
| 10
| 9
| 1
| 1
| 0
| 2
|
5,555
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/t5/modeling_t5.py
|
transformers.models.t5.modeling_t5.T5LayerFF
|
from torch import nn
from .configuration_t5 import T5Config
class T5LayerFF(nn.Module):
def __init__(self, config: T5Config):
super().__init__()
if config.is_gated_act:
self.DenseReluDense = T5DenseGatedActDense(config)
else:
self.DenseReluDense = T5DenseActDense(config)
self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(self, hidden_states):
forwarded_states = self.layer_norm(hidden_states)
forwarded_states = self.DenseReluDense(forwarded_states)
hidden_states = hidden_states + self.dropout(forwarded_states)
return hidden_states
|
class T5LayerFF(nn.Module):
def __init__(self, config: T5Config):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 7
| 1
| 7
| 0
| 2
| 0
| 1
| 5
| 4
| 0
| 2
| 3
| 2
| 12
| 16
| 2
| 14
| 7
| 11
| 0
| 13
| 7
| 10
| 2
| 1
| 1
| 3
|
5,556
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/t5/modeling_t5.py
|
transformers.models.t5.modeling_t5.T5LayerNorm
|
import torch
from torch import nn
class T5LayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-06):
"""
Construct a layernorm module in the T5 style. No bias and no subtraction of mean.
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
if self.weight.dtype in [torch.float16, torch.bfloat16]:
hidden_states = hidden_states.to(self.weight.dtype)
return self.weight * hidden_states
|
class T5LayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-06):
'''
Construct a layernorm module in the T5 style. No bias and no subtraction of mean.
'''
pass
def forward(self, hidden_states):
pass
| 3
| 1
| 11
| 2
| 5
| 4
| 2
| 0.73
| 1
| 1
| 0
| 0
| 2
| 2
| 2
| 12
| 23
| 4
| 11
| 6
| 8
| 8
| 11
| 6
| 8
| 2
| 1
| 1
| 3
|
5,557
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/t5/modeling_t5.py
|
transformers.models.t5.modeling_t5.T5LayerSelfAttention
|
from typing import Optional, Union
from torch import nn
from ...utils.deprecation import deprecate_kwarg
class T5LayerSelfAttention(nn.Module):
def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int]=None):
super().__init__()
self.SelfAttention = T5Attention(config, has_relative_attention_bias=has_relative_attention_bias, layer_idx=layer_idx)
self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_values=None, use_cache=False, output_attentions=False, cache_position=None):
normed_hidden_states = self.layer_norm(hidden_states)
attention_output = self.SelfAttention(normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position)
hidden_states = hidden_states + self.dropout(attention_output[0])
outputs = (hidden_states,) + attention_output[1:]
return outputs
|
class T5LayerSelfAttention(nn.Module):
def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int]=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_values=None, use_cache=False, output_attentions=False, cache_position=None):
pass
| 4
| 0
| 16
| 0
| 16
| 1
| 1
| 0.03
| 1
| 4
| 2
| 0
| 2
| 3
| 2
| 12
| 34
| 1
| 33
| 19
| 20
| 1
| 12
| 9
| 9
| 1
| 1
| 0
| 2
|
5,558
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/t5/modeling_t5.py
|
transformers.models.t5.modeling_t5.T5Model
|
import torch
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqQuestionAnsweringModelOutput, Seq2SeqSequenceClassifierOutput, TokenClassifierOutput
from typing import Optional, Union
import warnings
from .configuration_t5 import T5Config
from torch import nn
import copy
from ...utils.model_parallel_utils import assert_device_map, get_device_map
from ...utils import DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, auto_docstring, is_torch_flex_attn_available, is_torch_fx_proxy, is_torchdynamo_compiling, logging
@auto_docstring
class T5Model(T5PreTrainedModel):
_keys_to_ignore_on_load_unexpected = ['decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight']
_tied_weights_keys = ['encoder.embed_tokens.weight', 'decoder.embed_tokens.weight']
def __init__(self, config: T5Config):
super().__init__(config)
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = copy.deepcopy(config)
encoder_config.is_decoder = False
encoder_config.use_cache = False
encoder_config.tie_encoder_decoder = False
self.encoder = T5Stack(encoder_config, self.shared)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
decoder_config.tie_encoder_decoder = False
decoder_config.num_layers = config.num_decoder_layers
self.decoder = T5Stack(decoder_config, self.shared)
self.post_init()
self.model_parallel = False
self.device_map = None
@add_start_docstrings(PARALLELIZE_DOCSTRING)
def parallelize(self, device_map=None):
warnings.warn("`T5Model.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own `device_map` but it needs to be a dictionary module_name to device, so for instance {'encoder.block.0': 0, 'encoder.block.1': 1, ...}", FutureWarning)
self.device_map = get_device_map(len(self.encoder.block), range(torch.cuda.device_count())) if device_map is None else device_map
assert_device_map(self.device_map, len(self.encoder.block))
self.encoder.parallelize(self.device_map)
self.decoder.parallelize(self.device_map)
self.model_parallel = True
@add_start_docstrings(DEPARALLELIZE_DOCSTRING)
def deparallelize(self):
warnings.warn('Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.', FutureWarning)
self.encoder.deparallelize()
self.decoder.deparallelize()
self.encoder = self.encoder.to('cpu')
self.decoder = self.decoder.to('cpu')
self.model_parallel = False
self.device_map = None
torch.cuda.empty_cache()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
self.decoder.set_input_embeddings(new_embeddings)
def _tie_weights(self):
if self.config.tie_word_embeddings:
self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared)
self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared)
def get_encoder(self):
return self.encoder
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.FloatTensor]=None, decoder_head_mask: Optional[torch.FloatTensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.Tensor]=None, decoder_inputs_embeds: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple[torch.FloatTensor], Seq2SeqModelOutput]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you
should be able to pad the inputs on both the right and the left.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for detail.
[What are input IDs?](../glossary#input-ids)
To know more on how to prepare `input_ids` for pretraining take a look a [T5 Training](./t5#training).
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
T5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`
is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).
To know more on how to prepare `decoder_input_ids` for pretraining take a look at [T5
Training](./t5#training).
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0,
1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in
`[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
Example:
```python
>>> from transformers import AutoTokenizer, T5Model
>>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-small")
>>> model = T5Model.from_pretrained("google-t5/t5-small")
>>> input_ids = tokenizer(
... "Studies have been shown that owning a dog is good for you", return_tensors="pt"
... ).input_ids # Batch size 1
>>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
>>> # preprocess: Prepend decoder_input_ids with start token which is pad token for T5Model.
>>> # This is not needed for torch's T5ForConditionalGeneration as it does this internally using labels arg.
>>> decoder_input_ids = model._shift_right(decoder_input_ids)
>>> # forward pass
>>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
>>> last_hidden_states = outputs.last_hidden_state
```"""
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if head_mask is not None and decoder_head_mask is None:
if self.config.num_layers == self.config.num_decoder_layers:
warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning)
decoder_head_mask = head_mask
if encoder_outputs is None:
encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)):
encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None)
hidden_states = encoder_outputs[0]
if self.model_parallel:
torch.cuda.set_device(self.decoder.first_device)
hidden_states = hidden_states.to(self.decoder.first_device)
if decoder_input_ids is not None:
decoder_input_ids = decoder_input_ids.to(self.decoder.first_device)
if attention_mask is not None:
attention_mask = attention_mask.to(self.decoder.first_device)
if decoder_attention_mask is not None:
decoder_attention_mask = decoder_attention_mask.to(self.decoder.first_device)
decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions)
|
@auto_docstring
class T5Model(T5PreTrainedModel):
def __init__(self, config: T5Config):
pass
@add_start_docstrings(PARALLELIZE_DOCSTRING)
def parallelize(self, device_map=None):
pass
@add_start_docstrings(DEPARALLELIZE_DOCSTRING)
def deparallelize(self):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, new_embeddings):
pass
def _tie_weights(self):
pass
def get_encoder(self):
pass
def _prune_heads(self, heads_to_prune):
'''
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
'''
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.FloatTensor]=None, decoder_head_mask: Optional[torch.FloatTensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.Tensor]=None, decoder_inputs_embeds: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple[torch.FloatTensor], Seq2SeqModelOutput]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you
should be able to pad the inputs on both the right and the left.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for detail.
[What are input IDs?](../glossary#input-ids)
To know more on how to prepare `input_ids` for pretraining take a look a [T5 Training](./t5#training).
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
T5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`
is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).
To know more on how to prepare `decoder_input_ids` for pretraining take a look at [T5
Training](./t5#training).
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0,
1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in
`[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
Example:
```python
>>> from transformers import AutoTokenizer, T5Model
>>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-small")
>>> model = T5Model.from_pretrained("google-t5/t5-small")
>>> input_ids = tokenizer(
... "Studies have been shown that owning a dog is good for you", return_tensors="pt"
... ).input_ids # Batch size 1
>>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
>>> # preprocess: Prepend decoder_input_ids with start token which is pad token for T5Model.
>>> # This is not needed for torch's T5ForConditionalGeneration as it does this internally using labels arg.
>>> decoder_input_ids = model._shift_right(decoder_input_ids)
>>> # forward pass
>>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
>>> last_hidden_states = outputs.last_hidden_state
```'''
pass
| 14
| 2
| 19
| 2
| 14
| 3
| 3
| 0.19
| 1
| 9
| 4
| 0
| 10
| 5
| 10
| 13
| 204
| 27
| 149
| 44
| 116
| 28
| 76
| 23
| 65
| 14
| 2
| 2
| 26
|
5,559
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/t5/modeling_t5.py
|
transformers.models.t5.modeling_t5.T5PreTrainedModel
|
from .configuration_t5 import T5Config
from ...modeling_utils import PreTrainedModel
from ...utils import DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, auto_docstring, is_torch_flex_attn_available, is_torch_fx_proxy, is_torchdynamo_compiling, logging
import torch
@auto_docstring
class T5PreTrainedModel(PreTrainedModel):
config: T5Config
base_model_prefix = 'transformer'
is_parallelizable = True
supports_gradient_checkpointing = True
_can_compile_fullgraph = True
_no_split_modules = ['T5Block']
_keep_in_fp32_modules = ['wo']
@property
def dummy_inputs(self):
input_ids = torch.tensor(DUMMY_INPUTS)
input_mask = torch.tensor(DUMMY_MASK)
dummy_inputs = {'decoder_input_ids': input_ids, 'input_ids': input_ids, 'decoder_attention_mask': input_mask}
return dummy_inputs
def _init_weights(self, module):
"""Initialize the weights"""
factor = self.config.initializer_factor
if isinstance(module, T5LayerNorm):
module.weight.data.fill_(factor * 1.0)
elif isinstance(module, (T5Model, T5ForConditionalGeneration, T5EncoderModel, T5ForQuestionAnswering)):
module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0)
if hasattr(module, 'lm_head') and (not self.config.tie_word_embeddings):
module.lm_head.weight.data.normal_(mean=0.0, std=factor * 1.0)
if hasattr(module, 'qa_outputs'):
module.qa_outputs.weight.data.normal_(mean=0.0, std=factor * self.config.d_model ** (-0.5))
module.qa_outputs.bias.data.zero_()
elif isinstance(module, T5ForTokenClassification):
if hasattr(module, 'classifier'):
module.classifier.weight.data.normal_(mean=0.0, std=factor * 1.0)
module.classifier.bias.data.zero_()
elif isinstance(module, T5ClassificationHead):
module.dense.weight.data.normal_(mean=0.0, std=factor * self.config.d_model ** (-0.5))
if hasattr(module.dense, 'bias') and module.dense.bias is not None:
module.dense.bias.data.zero_()
module.out_proj.weight.data.normal_(mean=0.0, std=factor * self.config.d_model ** (-0.5))
if hasattr(module.out_proj, 'bias') and module.out_proj.bias is not None:
module.out_proj.bias.data.zero_()
elif isinstance(module, T5DenseActDense):
module.wi.weight.data.normal_(mean=0.0, std=factor * self.config.d_model ** (-0.5))
if hasattr(module.wi, 'bias') and module.wi.bias is not None:
module.wi.bias.data.zero_()
module.wo.weight.data.normal_(mean=0.0, std=factor * self.config.d_ff ** (-0.5))
if hasattr(module.wo, 'bias') and module.wo.bias is not None:
module.wo.bias.data.zero_()
elif isinstance(module, T5DenseGatedActDense):
module.wi_0.weight.data.normal_(mean=0.0, std=factor * self.config.d_model ** (-0.5))
if hasattr(module.wi_0, 'bias') and module.wi_0.bias is not None:
module.wi_0.bias.data.zero_()
module.wi_1.weight.data.normal_(mean=0.0, std=factor * self.config.d_model ** (-0.5))
if hasattr(module.wi_1, 'bias') and module.wi_1.bias is not None:
module.wi_1.bias.data.zero_()
module.wo.weight.data.normal_(mean=0.0, std=factor * self.config.d_ff ** (-0.5))
if hasattr(module.wo, 'bias') and module.wo.bias is not None:
module.wo.bias.data.zero_()
elif isinstance(module, T5Attention):
d_model = self.config.d_model
key_value_proj_dim = self.config.d_kv
n_heads = self.config.num_heads
module.q.weight.data.normal_(mean=0.0, std=factor * (d_model * key_value_proj_dim) ** (-0.5))
module.k.weight.data.normal_(mean=0.0, std=factor * d_model ** (-0.5))
module.v.weight.data.normal_(mean=0.0, std=factor * d_model ** (-0.5))
module.o.weight.data.normal_(mean=0.0, std=factor * (n_heads * key_value_proj_dim) ** (-0.5))
if module.has_relative_attention_bias:
module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * d_model ** (-0.5))
def _shift_right(self, input_ids):
decoder_start_token_id = self.config.decoder_start_token_id
pad_token_id = self.config.pad_token_id
if decoder_start_token_id is None:
raise ValueError('self.model.config.decoder_start_token_id has to be defined. In T5 it is usually set to the pad_token_id. See T5 docs for more information.')
if is_torch_fx_proxy(input_ids):
shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id)
shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1)
else:
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
shifted_input_ids[..., 0] = decoder_start_token_id
if pad_token_id is None:
raise ValueError('self.model.config.pad_token_id has to be defined.')
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
return shifted_input_ids
|
@auto_docstring
class T5PreTrainedModel(PreTrainedModel):
@property
def dummy_inputs(self):
pass
def _init_weights(self, module):
'''Initialize the weights'''
pass
def _shift_right(self, input_ids):
pass
| 6
| 1
| 32
| 1
| 27
| 4
| 8
| 0.18
| 1
| 11
| 10
| 7
| 3
| 0
| 3
| 3
| 115
| 8
| 92
| 25
| 87
| 17
| 74
| 24
| 70
| 19
| 1
| 2
| 24
|
5,560
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/t5/modeling_t5.py
|
transformers.models.t5.modeling_t5.T5Stack
|
import torch
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqQuestionAnsweringModelOutput, Seq2SeqSequenceClassifierOutput, TokenClassifierOutput
from typing import Optional, Union
import warnings
from ...modeling_attn_mask_utils import AttentionMaskConverter
from torch import nn
from ...utils.model_parallel_utils import assert_device_map, get_device_map
from ...utils import DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, auto_docstring, is_torch_flex_attn_available, is_torch_fx_proxy, is_torchdynamo_compiling, logging
class T5Stack(T5PreTrainedModel):
def __init__(self, config, embed_tokens=None):
super().__init__(config)
self.embed_tokens = embed_tokens
self.is_decoder = config.is_decoder
self.block = nn.ModuleList([T5Block(config, has_relative_attention_bias=bool(i == 0), layer_idx=i) for i in range(config.num_layers)])
self.final_layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
self.post_init()
self.model_parallel = False
self.device_map = None
self.gradient_checkpointing = False
@add_start_docstrings(PARALLELIZE_DOCSTRING)
def parallelize(self, device_map=None):
warnings.warn("`T5Stack.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own `device_map` but it needs to be a dictionary module_name to device, so for instance {'block.0': 0, 'block.1': 1, ...}", FutureWarning)
self.device_map = get_device_map(len(self.block), range(torch.cuda.device_count())) if device_map is None else device_map
assert_device_map(self.device_map, len(self.block))
self.model_parallel = True
self.first_device = 'cpu' if 'cpu' in self.device_map else 'cuda:' + str(min(self.device_map.keys()))
self.last_device = 'cuda:' + str(max(self.device_map.keys()))
for k, v in self.device_map.items():
for layer in v:
cuda_device = 'cuda:' + str(k)
self.block[layer] = self.block[layer].to(cuda_device)
self.embed_tokens = self.embed_tokens.to(self.first_device)
self.final_layer_norm = self.final_layer_norm.to(self.last_device)
@add_start_docstrings(DEPARALLELIZE_DOCSTRING)
def deparallelize(self):
warnings.warn('Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.', FutureWarning)
self.model_parallel = False
self.device_map = None
self.first_device = 'cpu'
self.last_device = 'cpu'
for i in range(len(self.block)):
self.block[i] = self.block[i].to('cpu')
self.embed_tokens = self.embed_tokens.to('cpu')
self.final_layer_norm = self.final_layer_norm.to('cpu')
torch.cuda.empty_cache()
def set_input_embeddings(self, new_embeddings):
self.embed_tokens = new_embeddings
def forward(self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, cache_position=None):
if self.model_parallel:
torch.cuda.set_device(self.first_device)
self.embed_tokens = self.embed_tokens.to(self.first_device)
use_cache = use_cache if use_cache is not None else self.config.use_cache
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
err_msg_prefix = 'decoder_' if self.is_decoder else ''
raise ValueError(f'You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time')
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
err_msg_prefix = 'decoder_' if self.is_decoder else ''
raise ValueError(f'You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds')
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...')
use_cache = False
if inputs_embeds is None:
if self.embed_tokens is None:
raise ValueError('You have to initialize the model with valid token embeddings')
inputs_embeds = self.embed_tokens(input_ids)
batch_size, seq_length = input_shape
if use_cache is True:
if not self.is_decoder:
raise ValueError(f'`use_cache` can only be set to `True` if {self} is used as a decoder')
if self.is_decoder:
if use_cache and past_key_values is None:
if self.config.is_encoder_decoder:
past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config))
else:
past_key_values = DynamicCache(config=self.config)
elif not self.is_decoder:
past_key_values = None
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
if cache_position is None:
cache_position = torch.arange(past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device)
if attention_mask is None and (not is_torchdynamo_compiling()):
mask_seq_length = past_key_values_length + seq_length
attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device)
if self.config.is_decoder:
causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, past_key_values.self_attention_cache if isinstance(past_key_values, EncoderDecoderCache) else past_key_values, output_attentions)
elif attention_mask is not None:
causal_mask = attention_mask[:, None, None, :]
causal_mask = causal_mask.to(dtype=inputs_embeds.dtype)
causal_mask = (1.0 - causal_mask) * torch.finfo(inputs_embeds.dtype).min
else:
causal_mask = None
if self.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device, dtype=torch.long)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
head_mask = self.get_head_mask(head_mask, self.config.num_layers)
cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers)
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.is_decoder else None
position_bias = None
encoder_decoder_position_bias = None
hidden_states = self.dropout(inputs_embeds)
for i, layer_module in enumerate(self.block):
layer_head_mask = head_mask[i]
cross_attn_layer_head_mask = cross_attn_head_mask[i]
if self.model_parallel:
torch.cuda.set_device(hidden_states.device)
if causal_mask is not None:
causal_mask = causal_mask.to(hidden_states.device)
if position_bias is not None:
position_bias = position_bias.to(hidden_states.device)
if encoder_hidden_states is not None:
encoder_hidden_states = encoder_hidden_states.to(hidden_states.device)
if encoder_extended_attention_mask is not None:
encoder_extended_attention_mask = encoder_extended_attention_mask.to(hidden_states.device)
if encoder_decoder_position_bias is not None:
encoder_decoder_position_bias = encoder_decoder_position_bias.to(hidden_states.device)
if layer_head_mask is not None:
layer_head_mask = layer_head_mask.to(hidden_states.device)
if cross_attn_layer_head_mask is not None:
cross_attn_layer_head_mask = cross_attn_layer_head_mask.to(hidden_states.device)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(hidden_states, causal_mask, position_bias, encoder_hidden_states, encoder_extended_attention_mask, encoder_decoder_position_bias, layer_head_mask=layer_head_mask, cross_attn_layer_head_mask=cross_attn_layer_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, return_dict=return_dict, cache_position=cache_position)
hidden_states = layer_outputs[0]
position_bias = layer_outputs[1]
if self.is_decoder and encoder_hidden_states is not None:
encoder_decoder_position_bias = layer_outputs[3 if output_attentions else 2]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[2],)
if self.is_decoder:
all_cross_attentions = all_cross_attentions + (layer_outputs[4],)
if self.model_parallel:
for k, v in self.device_map.items():
if i == v[-1] and 'cuda:' + str(k) != self.last_device:
hidden_states = hidden_states.to('cuda:' + str(k + 1))
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, past_key_values, all_hidden_states, all_attentions, all_cross_attentions] if v is not None))
return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions)
def _update_causal_mask(self, attention_mask: Union[torch.Tensor, 'BlockMask'], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool=False):
if self.config._attn_implementation == 'flash_attention_2':
if attention_mask is not None and (attention_mask == 0.0).any():
return attention_mask
return None
if self.config._attn_implementation == 'flex_attention':
if isinstance(attention_mask, torch.Tensor):
attention_mask = make_flex_block_causal_mask(attention_mask)
return attention_mask
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
using_compilable_cache = past_key_values.is_compileable if past_key_values is not None else False
if self.config._attn_implementation == 'sdpa' and (not using_compilable_cache) and (not output_attentions):
if AttentionMaskConverter._ignore_causal_mask_sdpa(attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training):
return None
dtype = input_tensor.dtype
sequence_length = input_tensor.shape[1]
if using_compilable_cache:
target_length = past_key_values.get_max_cache_shape()
else:
target_length = attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1
causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, cache_position=cache_position, batch_size=input_tensor.shape[0])
if self.config._attn_implementation == 'sdpa' and attention_mask is not None and (attention_mask.device.type in ['cuda', 'xpu', 'npu']) and (not output_attentions):
min_dtype = torch.finfo(dtype).min
causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
return causal_mask
@staticmethod
def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs):
"""
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
Args:
attention_mask (`torch.Tensor`):
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
`(batch_size, 1, query_length, key_value_length)`.
sequence_length (`int`):
The sequence length being processed.
target_length (`int`):
The target length: when generating with static cache, the mask should be as long as the static cache,
to account for the 0 padding, the part of the cache that is not filled yet.
dtype (`torch.dtype`):
The dtype to use for the 4D attention mask.
cache_position (`torch.Tensor`):
Indices depicting the position of the input sequence tokens in the sequence.
batch_size (`torch.Tensor`):
Batch size.
"""
if attention_mask is not None and attention_mask.dim() == 4:
causal_mask = attention_mask
else:
min_dtype = torch.finfo(dtype).min
causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device)
if sequence_length != 1:
causal_mask = torch.triu(causal_mask, diagonal=1)
causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1)
causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
if attention_mask is not None:
causal_mask = causal_mask.clone()
mask_length = attention_mask.shape[-1]
padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device)
padding_mask = padding_mask == 0
causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype)
return causal_mask
|
class T5Stack(T5PreTrainedModel):
def __init__(self, config, embed_tokens=None):
pass
@add_start_docstrings(PARALLELIZE_DOCSTRING)
def parallelize(self, device_map=None):
pass
@add_start_docstrings(DEPARALLELIZE_DOCSTRING)
def deparallelize(self):
pass
def set_input_embeddings(self, new_embeddings):
pass
def forward(self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, cache_position=None):
pass
def _update_causal_mask(self, attention_mask: Union[torch.Tensor, 'BlockMask'], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool=False):
pass
@staticmethod
def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs):
'''
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
Args:
attention_mask (`torch.Tensor`):
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
`(batch_size, 1, query_length, key_value_length)`.
sequence_length (`int`):
The sequence length being processed.
target_length (`int`):
The target length: when generating with static cache, the mask should be as long as the static cache,
to account for the 0 padding, the part of the cache that is not filled yet.
dtype (`torch.dtype`):
The dtype to use for the 4D attention mask.
cache_position (`torch.Tensor`):
Indices depicting the position of the input sequence tokens in the sequence.
batch_size (`torch.Tensor`):
Batch size.
'''
pass
| 11
| 1
| 53
| 4
| 42
| 7
| 10
| 0.17
| 1
| 18
| 8
| 0
| 7
| 10
| 8
| 11
| 439
| 42
| 342
| 92
| 299
| 57
| 192
| 58
| 183
| 57
| 2
| 4
| 80
|
5,561
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/t5/tokenization_t5.py
|
transformers.models.t5.tokenization_t5.T5Tokenizer
|
import sentencepiece as spm
import warnings
from ...tokenization_utils import PreTrainedTokenizer
import os
from typing import TYPE_CHECKING, Any, Optional
from ...convert_slow_tokenizer import import_protobuf
from shutil import copyfile
from ...tokenization_utils_base import AddedToken
import re
from ...utils.import_utils import requires
@requires(backends=('sentencepiece',))
class T5Tokenizer(PreTrainedTokenizer):
"""
Construct a T5 tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
contains the vocabulary necessary to instantiate a tokenizer.
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
extra_ids (`int`, *optional*, defaults to 100):
Add a number of extra ids added to the vocabulary for use as sentinels. These tokens are
accessible as "<extra_id_{%d}>" where "{%d}" is a number between 0 and extra_ids-1. These tokens can be
retrieved by calling get_sentinel_tokens method and token ids can be by calling get_sentinel_token_ids
method
additional_special_tokens (`list[str]`, *optional*):
Additional special tokens used by the tokenizer.
sp_model_kwargs (`dict`, *optional*):
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
to set:
- `enable_sampling`: Enable subword regularization.
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
- `nbest_size = {0,1}`: No sampling is performed.
- `nbest_size > 1`: samples from the nbest_size results.
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
using forward-filtering-and-backward-sampling algorithm.
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
BPE-dropout.
legacy (`bool`, *optional*):
Whether or not the `legacy` behaviour of the tokenizer should be used. Legacy is before the merge of #24622
and #25224 which includes fixes to properly handle tokens that appear after special tokens. A simple
example:
- `legacy=True`:
```python
>>> from transformers import T5Tokenizer
>>> tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-base", legacy=True)
>>> tokenizer.encode("Hello <extra_id_0>.")
[8774, 32099, 3, 5, 1]
```
- `legacy=False`:
```python
>>> from transformers import T5Tokenizer
>>> tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-base", legacy=False)
>>> tokenizer.encode("Hello <extra_id_0>.") # the extra space `[3]` is no longer here
[8774, 32099, 5, 1]
```
Checkout the [pull request](https://github.com/huggingface/transformers/pull/24565) for more details.
add_prefix_space (`bool`, *optional*, defaults to `False`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word.
Attributes:
sp_model (`SentencePieceProcessor`):
The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ['input_ids', 'attention_mask']
def __init__(self, vocab_file, eos_token='</s>', unk_token='<unk>', pad_token='<pad>', extra_ids=100, additional_special_tokens=None, sp_model_kwargs: Optional[dict[str, Any]]=None, legacy=None, add_prefix_space=True, **kwargs) -> None:
pad_token = AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token
unk_token = AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token
eos_token = AddedToken(eos_token, special=True) if isinstance(eos_token, str) else eos_token
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
self.vocab_file = vocab_file
self._extra_ids = extra_ids
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(vocab_file)
if additional_special_tokens is not None:
extra_tokens = [x for x in additional_special_tokens if '<extra_id_' in str(x)]
if len(extra_tokens) < 1:
additional_special_tokens += [f'<extra_id_{i}>' for i in range(extra_ids)]
elif extra_ids > 0 and extra_ids != len(extra_tokens):
raise ValueError(f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids tokens')
else:
extra_tokens = [f'<extra_id_{i}>' for i in range(extra_ids)]
additional_special_tokens = extra_tokens
self._added_tokens_decoder = {}
for i in range(len(extra_tokens)):
self._added_tokens_decoder[len(self.sp_model) - 1 + extra_ids - i] = AddedToken(f'<extra_id_{i}>', single_word=False, lstrip=True, rstrip=True, special=True, normalized=False)
if legacy is None:
logger.warning_once(f'You are using the default legacy behaviour of the {self.__class__}. This is expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you. If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it means, and thoroughly read the reason why this was added as explained in https://github.com/huggingface/transformers/pull/24565')
legacy = True
self.legacy = legacy
self.sp_model = self.get_spm_processor(kwargs.pop('from_slow', False))
self.add_prefix_space = add_prefix_space
super().__init__(eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, extra_ids=extra_ids, additional_special_tokens=additional_special_tokens, sp_model_kwargs=self.sp_model_kwargs, legacy=legacy, add_prefix_space=add_prefix_space, **kwargs)
def get_spm_processor(self, from_slow=False):
tokenizer = spm.SentencePieceProcessor(**self.sp_model_kwargs)
if self.legacy or from_slow:
tokenizer.Load(self.vocab_file)
return tokenizer
with open(self.vocab_file, 'rb') as f:
sp_model = f.read()
model_pb2 = import_protobuf(f'The new behaviour of {self.__class__.__name__} (with `self.legacy = False`)')
model = model_pb2.ModelProto.FromString(sp_model)
normalizer_spec = model_pb2.NormalizerSpec()
normalizer_spec.add_dummy_prefix = False
model.normalizer_spec.MergeFrom(normalizer_spec)
sp_model = model.SerializeToString()
tokenizer.LoadFromSerializedProto(sp_model)
return tokenizer
@staticmethod
def _eventually_correct_t5_max_length(pretrained_model_name_or_path, max_model_length, init_max_model_length):
if pretrained_model_name_or_path in T5Tokenizer.max_model_input_sizes:
deprecated_max_model_length = T5Tokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(f'This tokenizer was incorrectly instantiated with a model max length of {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this behavior is kept to avoid breaking backwards compatibility when padding/encoding with `truncation is True`.\n- Be aware that you SHOULD NOT rely on {pretrained_model_name_or_path} automatically truncating your input to {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences longer than {deprecated_max_model_length} you can either instantiate this tokenizer with `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please instantiate this tokenizer with `model_max_length` set to your preferred value.', FutureWarning)
return max_model_length
@property
def vocab_size(self):
return self.sp_model.get_piece_size()
def get_vocab(self):
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if token_ids_1 is None:
return [0] * len(token_ids_0) + [1]
return [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1]
def get_sentinel_tokens(self):
return list(set(filter(lambda x: bool(re.search('<extra_id_\\d+>', x)) is not None, self.additional_special_tokens)))
def get_sentinel_token_ids(self):
return [self.convert_tokens_to_ids(token) for token in self.get_sentinel_tokens()]
def _add_eos_if_not_present(self, token_ids: list[int]) -> list[int]:
"""Do not add eos again if user already added it."""
if len(token_ids) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(f'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated eos tokens being added.')
return token_ids
else:
return token_ids + [self.eos_token_id]
def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make
use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of zeros.
"""
eos = [self.eos_token_id]
if token_ids_1 is None:
return len(token_ids_0 + eos) * [0]
return len(token_ids_0 + eos + token_ids_1 + eos) * [0]
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A sequence has the following format:
- single sequence: `X </s>`
- pair of sequences: `A </s> B </s>`
Args:
token_ids_0 (`list[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
token_ids_0 = self._add_eos_if_not_present(token_ids_0)
if token_ids_1 is None:
return token_ids_0
else:
token_ids_1 = self._add_eos_if_not_present(token_ids_1)
return token_ids_0 + token_ids_1
def __getstate__(self):
state = self.__dict__.copy()
state['sp_model'] = None
return state
def __setstate__(self, d):
self.__dict__ = d
if not hasattr(self, 'sp_model_kwargs'):
self.sp_model_kwargs = {}
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def tokenize(self, text: 'TextInput', **kwargs) -> list[str]:
"""
Converts a string to a list of tokens. If `self.legacy` is set to `False`, a prefix token is added unless the
first token is special.
"""
if self.legacy or len(text) == 0:
return super().tokenize(text, **kwargs)
text = text.replace(SPIECE_UNDERLINE, ' ')
if self.add_prefix_space:
text = SPIECE_UNDERLINE + text
tokens = super().tokenize(text, **kwargs)
if len(tokens) > 1 and tokens[0] == SPIECE_UNDERLINE and (tokens[1] in self.all_special_tokens):
tokens = tokens[1:]
return tokens
@property
def unk_token_length(self):
return len(self.sp_model.encode(str(self.unk_token)))
def _tokenize(self, text, **kwargs):
"""
Returns a tokenized string.
We de-activated the `add_dummy_prefix` option, thus the sentencepiece internals will always strip any
SPIECE_UNDERLINE. For example: `self.sp_model.encode(f"{SPIECE_UNDERLINE}Hey", out_type = str)` will give
`['H', 'e', 'y']` instead of `['▁He', 'y']`. Thus we always encode `f"{unk_token}text"` and strip the
`unk_token`. Here is an example with `unk_token = "<unk>"` and `unk_token_length = 4`.
`self.tokenizer.sp_model.encode("<unk> Hey", out_type = str)[4:]`.
"""
if self.legacy or not text.startswith((SPIECE_UNDERLINE, ' ')):
return self.sp_model.encode(text, out_type=str)
tokens = self.sp_model.encode(self.unk_token + text, out_type=str)
return tokens[self.unk_token_length:] if len(tokens) >= self.unk_token_length else tokens
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.sp_model.piece_to_id(token)
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
token = self.sp_model.IdToPiece(index)
return token
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
if tokens[0].startswith(SPIECE_UNDERLINE) and self.add_prefix_space:
tokens[0] = tokens[0][1:]
current_sub_tokens = []
out_string = ''
prev_is_special = False
for token in tokens:
if token in self.all_special_tokens:
if not prev_is_special:
out_string += ' '
out_string += self.sp_model.decode(current_sub_tokens) + token
prev_is_special = True
current_sub_tokens = []
else:
current_sub_tokens.append(token)
prev_is_special = False
out_string += self.sp_model.decode(current_sub_tokens)
return out_string.strip()
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, out_vocab_file)
elif not os.path.isfile(self.vocab_file):
with open(out_vocab_file, 'wb') as fi:
content_spiece_model = self.sp_model.serialized_model_proto()
fi.write(content_spiece_model)
return (out_vocab_file,)
|
@requires(backends=('sentencepiece',))
class T5Tokenizer(PreTrainedTokenizer):
'''
Construct a T5 tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
contains the vocabulary necessary to instantiate a tokenizer.
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
extra_ids (`int`, *optional*, defaults to 100):
Add a number of extra ids added to the vocabulary for use as sentinels. These tokens are
accessible as "<extra_id_{%d}>" where "{%d}" is a number between 0 and extra_ids-1. These tokens can be
retrieved by calling get_sentinel_tokens method and token ids can be by calling get_sentinel_token_ids
method
additional_special_tokens (`list[str]`, *optional*):
Additional special tokens used by the tokenizer.
sp_model_kwargs (`dict`, *optional*):
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
to set:
- `enable_sampling`: Enable subword regularization.
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
- `nbest_size = {0,1}`: No sampling is performed.
- `nbest_size > 1`: samples from the nbest_size results.
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
using forward-filtering-and-backward-sampling algorithm.
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
BPE-dropout.
legacy (`bool`, *optional*):
Whether or not the `legacy` behaviour of the tokenizer should be used. Legacy is before the merge of #24622
and #25224 which includes fixes to properly handle tokens that appear after special tokens. A simple
example:
- `legacy=True`:
```python
>>> from transformers import T5Tokenizer
>>> tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-base", legacy=True)
>>> tokenizer.encode("Hello <extra_id_0>.")
[8774, 32099, 3, 5, 1]
```
- `legacy=False`:
```python
>>> from transformers import T5Tokenizer
>>> tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-base", legacy=False)
>>> tokenizer.encode("Hello <extra_id_0>.") # the extra space `[3]` is no longer here
[8774, 32099, 5, 1]
```
Checkout the [pull request](https://github.com/huggingface/transformers/pull/24565) for more details.
add_prefix_space (`bool`, *optional*, defaults to `False`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word.
Attributes:
sp_model (`SentencePieceProcessor`):
The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
'''
def __init__(self, vocab_file, eos_token='</s>', unk_token='<unk>', pad_token='<pad>', extra_ids=100, additional_special_tokens=None, sp_model_kwargs: Optional[dict[str, Any]]=None, legacy=None, add_prefix_space=True, **kwargs) -> None:
pass
def get_spm_processor(self, from_slow=False):
pass
@staticmethod
def _eventually_correct_t5_max_length(pretrained_model_name_or_path, max_model_length, init_max_model_length):
pass
@property
def vocab_size(self):
pass
def get_vocab(self):
pass
def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]:
'''
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
'''
pass
def get_sentinel_tokens(self):
pass
def get_sentinel_token_ids(self):
pass
def _add_eos_if_not_present(self, token_ids: list[int]) -> list[int]:
'''Do not add eos again if user already added it.'''
pass
def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
'''
Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make
use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of zeros.
'''
pass
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
'''
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A sequence has the following format:
- single sequence: `X </s>`
- pair of sequences: `A </s> B </s>`
Args:
token_ids_0 (`list[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
'''
pass
def __getstate__(self):
pass
def __setstate__(self, d):
pass
def tokenize(self, text: 'TextInput', **kwargs) -> list[str]:
'''
Converts a string to a list of tokens. If `self.legacy` is set to `False`, a prefix token is added unless the
first token is special.
'''
pass
@property
def unk_token_length(self):
pass
def _tokenize(self, text, **kwargs):
'''
Returns a tokenized string.
We de-activated the `add_dummy_prefix` option, thus the sentencepiece internals will always strip any
SPIECE_UNDERLINE. For example: `self.sp_model.encode(f"{SPIECE_UNDERLINE}Hey", out_type = str)` will give
`['H', 'e', 'y']` instead of `['▁He', 'y']`. Thus we always encode `f"{unk_token}text"` and strip the
`unk_token`. Here is an example with `unk_token = "<unk>"` and `unk_token_length = 4`.
`self.tokenizer.sp_model.encode("<unk> Hey", out_type = str)[4:]`.
'''
pass
def _convert_token_to_id(self, token):
'''Converts a token (str) in an id using the vocab.'''
pass
def _convert_id_to_token(self, index):
'''Converts an index (integer) in a token (str) using the vocab.'''
pass
def convert_tokens_to_string(self, tokens):
'''Converts a sequence of tokens (string) in a single string.'''
pass
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
pass
| 25
| 10
| 15
| 1
| 10
| 3
| 3
| 0.59
| 1
| 11
| 0
| 1
| 19
| 8
| 20
| 109
| 403
| 63
| 215
| 73
| 173
| 126
| 145
| 50
| 124
| 10
| 3
| 3
| 52
|
5,562
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/t5/tokenization_t5_fast.py
|
transformers.models.t5.tokenization_t5_fast.T5TokenizerFast
|
from ...tokenization_utils_fast import PreTrainedTokenizerFast
import re
from shutil import copyfile
from typing import Optional
import os
import warnings
class T5TokenizerFast(PreTrainedTokenizerFast):
"""
Construct a "fast" T5 tokenizer (backed by HuggingFace's *tokenizers* library). Based on
[Unigram](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models).
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
contains the vocabulary necessary to instantiate a tokenizer.
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
extra_ids (`int`, *optional*, defaults to 100):
Add a number of extra ids added to the vocabulary for use as sentinels. These tokens are accessible as
"<extra_id_{%d}>" where "{%d}" is a number between 0 and extra_ids-1. These tokens can be retrieved by
calling get_sentinel_tokens method and token ids can be by calling get_sentinel_token_ids method
additional_special_tokens (`list[str]`, *optional*):
Additional special tokens used by the tokenizer.
add_prefix_space (`bool`, *optional*):
Whether or not the tokenizer should automatically add a prefix space
from_slow (`book`, *optional*, defaults to `False`):
Whether or not the tokenizer should be converted from a slow one. If `add_prefix_space` is set, this will be set to `True`.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ['input_ids', 'attention_mask']
slow_tokenizer_class = T5Tokenizer
prefix_tokens: list[int] = []
def __init__(self, vocab_file=None, tokenizer_file=None, eos_token='</s>', unk_token='<unk>', pad_token='<pad>', extra_ids=100, additional_special_tokens=None, add_prefix_space=None, **kwargs):
if additional_special_tokens is not None:
extra_tokens = [x for x in additional_special_tokens if '<extra_id_' in str(x)]
if len(extra_tokens) < 1:
additional_special_tokens += [f'<extra_id_{i}>' for i in range(extra_ids)]
elif extra_ids > 0 and extra_ids != len(extra_tokens):
raise ValueError(f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids tokens')
else:
extra_tokens = [f'<extra_id_{i}>' for i in range(extra_ids)]
additional_special_tokens = extra_tokens
if add_prefix_space is not None:
logger.warning_once('You set `add_prefix_space`. The tokenizer needs to be converted from the slow tokenizers')
kwargs['from_slow'] = True
super().__init__(vocab_file=vocab_file, tokenizer_file=tokenizer_file, eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, extra_ids=extra_ids, additional_special_tokens=additional_special_tokens, add_prefix_space=add_prefix_space, **kwargs)
self.vocab_file = vocab_file
self._extra_ids = extra_ids
@staticmethod
def _eventually_correct_t5_max_length(pretrained_model_name_or_path, max_model_length, init_max_model_length):
if pretrained_model_name_or_path in T5TokenizerFast.max_model_input_sizes:
deprecated_max_model_length = T5TokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(f'This tokenizer was incorrectly instantiated with a model max length of {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this behavior is kept to avoid breaking backwards compatibility when padding/encoding with `truncation is True`.\n- Be aware that you SHOULD NOT rely on {pretrained_model_name_or_path} automatically truncating your input to {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences longer than {deprecated_max_model_length} you can either instantiate this tokenizer with `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please instantiate this tokenizer with `model_max_length` set to your preferred value.', FutureWarning)
return max_model_length
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError('Your fast tokenizer does not have the necessary information to save the vocabulary for a slow tokenizer.')
if not os.path.isdir(save_directory):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
copyfile(self.vocab_file, out_vocab_file)
logger.info(f'Copy vocab file to {out_vocab_file}')
return (out_vocab_file,)
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A sequence has the following format:
- single sequence: `X </s>`
- pair of sequences: `A </s> B </s>`
Args:
token_ids_0 (`list[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
token_ids_0 = token_ids_0 + [self.eos_token_id]
if token_ids_1 is None:
return self.prefix_tokens + token_ids_0
else:
token_ids_1 = token_ids_1 + [self.eos_token_id]
return self.prefix_tokens + token_ids_0 + token_ids_1
def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make
use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of zeros.
"""
eos = [self.eos_token_id]
if token_ids_1 is None:
return len(token_ids_0 + eos) * [0]
return len(token_ids_0 + eos + token_ids_1 + eos) * [0]
def get_sentinel_tokens(self):
return list(set(filter(lambda x: bool(re.search('<extra_id_\\d+>', x)) is not None, self.additional_special_tokens)))
def get_sentinel_token_ids(self):
return [self.convert_tokens_to_ids(token) for token in self.get_sentinel_tokens()]
|
class T5TokenizerFast(PreTrainedTokenizerFast):
'''
Construct a "fast" T5 tokenizer (backed by HuggingFace's *tokenizers* library). Based on
[Unigram](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models).
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
contains the vocabulary necessary to instantiate a tokenizer.
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
extra_ids (`int`, *optional*, defaults to 100):
Add a number of extra ids added to the vocabulary for use as sentinels. These tokens are accessible as
"<extra_id_{%d}>" where "{%d}" is a number between 0 and extra_ids-1. These tokens can be retrieved by
calling get_sentinel_tokens method and token ids can be by calling get_sentinel_token_ids method
additional_special_tokens (`list[str]`, *optional*):
Additional special tokens used by the tokenizer.
add_prefix_space (`bool`, *optional*):
Whether or not the tokenizer should automatically add a prefix space
from_slow (`book`, *optional*, defaults to `False`):
Whether or not the tokenizer should be converted from a slow one. If `add_prefix_space` is set, this will be set to `True`.
'''
def __init__(self, vocab_file=None, tokenizer_file=None, eos_token='</s>', unk_token='<unk>', pad_token='<pad>', extra_ids=100, additional_special_tokens=None, add_prefix_space=None, **kwargs):
pass
@staticmethod
def _eventually_correct_t5_max_length(pretrained_model_name_or_path, max_model_length, init_max_model_length):
pass
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
pass
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
'''
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A sequence has the following format:
- single sequence: `X </s>`
- pair of sequences: `A </s> B </s>`
Args:
token_ids_0 (`list[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
'''
pass
def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
'''
Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make
use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of zeros.
'''
pass
def get_sentinel_tokens(self):
pass
def get_sentinel_token_ids(self):
pass
| 9
| 3
| 18
| 2
| 13
| 3
| 3
| 0.51
| 1
| 10
| 0
| 1
| 7
| 2
| 8
| 96
| 194
| 29
| 109
| 36
| 83
| 56
| 54
| 19
| 45
| 5
| 3
| 2
| 22
|
5,563
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/table_transformer/configuration_table_transformer.py
|
transformers.models.table_transformer.configuration_table_transformer.TableTransformerConfig
|
from ..auto import CONFIG_MAPPING
from ...utils.backbone_utils import verify_backbone_config_arguments
from ...configuration_utils import PretrainedConfig
class TableTransformerConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`TableTransformerModel`]. It is used to
instantiate a Table Transformer model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the Table Transformer
[microsoft/table-transformer-detection](https://huggingface.co/microsoft/table-transformer-detection) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
use_timm_backbone (`bool`, *optional*, defaults to `True`):
Whether or not to use the `timm` library for the backbone. If set to `False`, will use the [`AutoBackbone`]
API.
backbone_config (`PretrainedConfig` or `dict`, *optional*):
The configuration of the backbone model. Only used in case `use_timm_backbone` is set to `False` in which
case it will default to `ResNetConfig()`.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
num_queries (`int`, *optional*, defaults to 100):
Number of object queries, i.e. detection slots. This is the maximal number of objects
[`TableTransformerModel`] can detect in a single image. For COCO, we recommend 100 queries.
d_model (`int`, *optional*, defaults to 256):
Dimension of the layers.
encoder_layers (`int`, *optional*, defaults to 6):
Number of encoder layers.
decoder_layers (`int`, *optional*, defaults to 6):
Number of decoder layers.
encoder_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer decoder.
decoder_ffn_dim (`int`, *optional*, defaults to 2048):
Dimension of the "intermediate" (often named feed-forward) layer in decoder.
encoder_ffn_dim (`int`, *optional*, defaults to 2048):
Dimension of the "intermediate" (often named feed-forward) layer in decoder.
activation_function (`str` or `function`, *optional*, defaults to `"relu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
init_xavier_std (`float`, *optional*, defaults to 1):
The scaling factor used for the Xavier initialization gain in the HM Attention map module.
encoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
decoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
auxiliary_loss (`bool`, *optional*, defaults to `False`):
Whether auxiliary decoding losses (loss at each decoder layer) are to be used.
position_embedding_type (`str`, *optional*, defaults to `"sine"`):
Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`.
backbone (`str`, *optional*):
Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this
will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`
is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.
use_pretrained_backbone (`bool`, *optional*, `True`):
Whether to use pretrained weights for the backbone.
backbone_kwargs (`dict`, *optional*):
Keyword arguments to be passed to AutoBackbone when loading from a checkpoint
e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set.
dilation (`bool`, *optional*, defaults to `False`):
Whether to replace stride with dilation in the last convolutional block (DC5). Only supported when
`use_timm_backbone` = `True`.
class_cost (`float`, *optional*, defaults to 1):
Relative weight of the classification error in the Hungarian matching cost.
bbox_cost (`float`, *optional*, defaults to 5):
Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost.
giou_cost (`float`, *optional*, defaults to 2):
Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost.
mask_loss_coefficient (`float`, *optional*, defaults to 1):
Relative weight of the Focal loss in the panoptic segmentation loss.
dice_loss_coefficient (`float`, *optional*, defaults to 1):
Relative weight of the DICE/F-1 loss in the panoptic segmentation loss.
bbox_loss_coefficient (`float`, *optional*, defaults to 5):
Relative weight of the L1 bounding box loss in the object detection loss.
giou_loss_coefficient (`float`, *optional*, defaults to 2):
Relative weight of the generalized IoU loss in the object detection loss.
eos_coefficient (`float`, *optional*, defaults to 0.1):
Relative classification weight of the 'no-object' class in the object detection loss.
Examples:
```python
>>> from transformers import TableTransformerModel, TableTransformerConfig
>>> # Initializing a Table Transformer microsoft/table-transformer-detection style configuration
>>> configuration = TableTransformerConfig()
>>> # Initializing a model from the microsoft/table-transformer-detection style configuration
>>> model = TableTransformerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'table-transformer'
keys_to_ignore_at_inference = ['past_key_values']
attribute_map = {'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads'}
def __init__(self, use_timm_backbone=True, backbone_config=None, num_channels=3, num_queries=100, encoder_layers=6, encoder_ffn_dim=2048, encoder_attention_heads=8, decoder_layers=6, decoder_ffn_dim=2048, decoder_attention_heads=8, encoder_layerdrop=0.0, decoder_layerdrop=0.0, is_encoder_decoder=True, activation_function='relu', d_model=256, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, init_xavier_std=1.0, auxiliary_loss=False, position_embedding_type='sine', backbone='resnet50', use_pretrained_backbone=True, backbone_kwargs=None, dilation=False, class_cost=1, bbox_cost=5, giou_cost=2, mask_loss_coefficient=1, dice_loss_coefficient=1, bbox_loss_coefficient=5, giou_loss_coefficient=2, eos_coefficient=0.1, **kwargs):
if use_timm_backbone and backbone_kwargs is None:
backbone_kwargs = {}
if dilation:
backbone_kwargs['output_stride'] = 16
backbone_kwargs['out_indices'] = [1, 2, 3, 4]
backbone_kwargs['in_chans'] = num_channels
elif not use_timm_backbone and backbone in (None, 'resnet50'):
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
backbone_config = CONFIG_MAPPING['resnet'](out_features=['stage4'])
elif isinstance(backbone_config, dict):
backbone_model_type = backbone_config.get('model_type')
config_class = CONFIG_MAPPING[backbone_model_type]
backbone_config = config_class.from_dict(backbone_config)
backbone = None
dilation = None
verify_backbone_config_arguments(use_timm_backbone=use_timm_backbone, use_pretrained_backbone=use_pretrained_backbone, backbone=backbone, backbone_config=backbone_config, backbone_kwargs=backbone_kwargs)
self.use_timm_backbone = use_timm_backbone
self.backbone_config = backbone_config
self.num_channels = num_channels
self.num_queries = num_queries
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.init_xavier_std = init_xavier_std
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.num_hidden_layers = encoder_layers
self.auxiliary_loss = auxiliary_loss
self.position_embedding_type = position_embedding_type
self.backbone = backbone
self.use_pretrained_backbone = use_pretrained_backbone
self.backbone_kwargs = backbone_kwargs
self.dilation = dilation
self.class_cost = class_cost
self.bbox_cost = bbox_cost
self.giou_cost = giou_cost
self.mask_loss_coefficient = mask_loss_coefficient
self.dice_loss_coefficient = dice_loss_coefficient
self.bbox_loss_coefficient = bbox_loss_coefficient
self.giou_loss_coefficient = giou_loss_coefficient
self.eos_coefficient = eos_coefficient
super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
@property
def num_attention_heads(self) -> int:
return self.encoder_attention_heads
@property
def hidden_size(self) -> int:
return self.d_model
@property
def sub_configs(self):
return {'backbone_config': type(self.backbone_config)} if getattr(self, 'backbone_config', None) is not None else {}
|
class TableTransformerConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`TableTransformerModel`]. It is used to
instantiate a Table Transformer model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the Table Transformer
[microsoft/table-transformer-detection](https://huggingface.co/microsoft/table-transformer-detection) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
use_timm_backbone (`bool`, *optional*, defaults to `True`):
Whether or not to use the `timm` library for the backbone. If set to `False`, will use the [`AutoBackbone`]
API.
backbone_config (`PretrainedConfig` or `dict`, *optional*):
The configuration of the backbone model. Only used in case `use_timm_backbone` is set to `False` in which
case it will default to `ResNetConfig()`.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
num_queries (`int`, *optional*, defaults to 100):
Number of object queries, i.e. detection slots. This is the maximal number of objects
[`TableTransformerModel`] can detect in a single image. For COCO, we recommend 100 queries.
d_model (`int`, *optional*, defaults to 256):
Dimension of the layers.
encoder_layers (`int`, *optional*, defaults to 6):
Number of encoder layers.
decoder_layers (`int`, *optional*, defaults to 6):
Number of decoder layers.
encoder_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer decoder.
decoder_ffn_dim (`int`, *optional*, defaults to 2048):
Dimension of the "intermediate" (often named feed-forward) layer in decoder.
encoder_ffn_dim (`int`, *optional*, defaults to 2048):
Dimension of the "intermediate" (often named feed-forward) layer in decoder.
activation_function (`str` or `function`, *optional*, defaults to `"relu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
init_xavier_std (`float`, *optional*, defaults to 1):
The scaling factor used for the Xavier initialization gain in the HM Attention map module.
encoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
decoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
auxiliary_loss (`bool`, *optional*, defaults to `False`):
Whether auxiliary decoding losses (loss at each decoder layer) are to be used.
position_embedding_type (`str`, *optional*, defaults to `"sine"`):
Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`.
backbone (`str`, *optional*):
Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this
will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`
is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.
use_pretrained_backbone (`bool`, *optional*, `True`):
Whether to use pretrained weights for the backbone.
backbone_kwargs (`dict`, *optional*):
Keyword arguments to be passed to AutoBackbone when loading from a checkpoint
e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set.
dilation (`bool`, *optional*, defaults to `False`):
Whether to replace stride with dilation in the last convolutional block (DC5). Only supported when
`use_timm_backbone` = `True`.
class_cost (`float`, *optional*, defaults to 1):
Relative weight of the classification error in the Hungarian matching cost.
bbox_cost (`float`, *optional*, defaults to 5):
Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost.
giou_cost (`float`, *optional*, defaults to 2):
Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost.
mask_loss_coefficient (`float`, *optional*, defaults to 1):
Relative weight of the Focal loss in the panoptic segmentation loss.
dice_loss_coefficient (`float`, *optional*, defaults to 1):
Relative weight of the DICE/F-1 loss in the panoptic segmentation loss.
bbox_loss_coefficient (`float`, *optional*, defaults to 5):
Relative weight of the L1 bounding box loss in the object detection loss.
giou_loss_coefficient (`float`, *optional*, defaults to 2):
Relative weight of the generalized IoU loss in the object detection loss.
eos_coefficient (`float`, *optional*, defaults to 0.1):
Relative classification weight of the 'no-object' class in the object detection loss.
Examples:
```python
>>> from transformers import TableTransformerModel, TableTransformerConfig
>>> # Initializing a Table Transformer microsoft/table-transformer-detection style configuration
>>> configuration = TableTransformerConfig()
>>> # Initializing a model from the microsoft/table-transformer-detection style configuration
>>> model = TableTransformerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, use_timm_backbone=True, backbone_config=None, num_channels=3, num_queries=100, encoder_layers=6, encoder_ffn_dim=2048, encoder_attention_heads=8, decoder_layers=6, decoder_ffn_dim=2048, decoder_attention_heads=8, encoder_layerdrop=0.0, decoder_layerdrop=0.0, is_encoder_decoder=True, activation_function='relu', d_model=256, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, init_xavier_std=1.0, auxiliary_loss=False, position_embedding_type='sine', backbone='resnet50', use_pretrained_backbone=True, backbone_kwargs=None, dilation=False, class_cost=1, bbox_cost=5, giou_cost=2, mask_loss_coefficient=1, dice_loss_coefficient=1, bbox_loss_coefficient=5, giou_loss_coefficient=2, eos_coefficient=0.1, **kwargs):
pass
@property
def num_attention_heads(self) -> int:
pass
@property
def hidden_size(self) -> int:
pass
@property
def sub_configs(self):
pass
| 8
| 1
| 36
| 1
| 33
| 2
| 3
| 0.93
| 1
| 3
| 0
| 0
| 3
| 34
| 3
| 3
| 223
| 13
| 109
| 82
| 66
| 101
| 59
| 43
| 55
| 6
| 1
| 2
| 8
|
5,564
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/table_transformer/configuration_table_transformer.py
|
transformers.models.table_transformer.configuration_table_transformer.TableTransformerOnnxConfig
|
from packaging import version
from ...onnx import OnnxConfig
from collections import OrderedDict
from collections.abc import Mapping
class TableTransformerOnnxConfig(OnnxConfig):
torch_onnx_minimum_version = version.parse('1.11')
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ('pixel_mask', {0: 'batch'})])
@property
def atol_for_validation(self) -> float:
return 1e-05
@property
def default_onnx_opset(self) -> int:
return 12
|
class TableTransformerOnnxConfig(OnnxConfig):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
pass
@property
def atol_for_validation(self) -> float:
pass
@property
def default_onnx_opset(self) -> int:
pass
| 7
| 0
| 4
| 0
| 4
| 0
| 1
| 0
| 1
| 4
| 0
| 0
| 3
| 0
| 3
| 3
| 19
| 3
| 16
| 8
| 9
| 0
| 8
| 5
| 4
| 1
| 1
| 0
| 3
|
5,565
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/table_transformer/modeling_table_transformer.py
|
transformers.models.table_transformer.modeling_table_transformer.TableTransformerAttention
|
from torch import Tensor, nn
import torch
from typing import Optional, Union
class TableTransformerAttention(nn.Module):
"""
Multi-headed attention from 'Attention Is All You Need' paper.
Here, we add position embeddings to the queries and keys (as explained in the TABLE_TRANSFORMER paper).
"""
def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, bias: bool=True):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
if self.head_dim * num_heads != self.embed_dim:
raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).')
self.scaling = self.head_dim ** (-0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def _shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int):
return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def with_pos_embed(self, tensor: torch.Tensor, object_queries: Optional[Tensor]):
return tensor if object_queries is None else tensor + object_queries
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, object_queries: Optional[torch.Tensor]=None, key_value_states: Optional[torch.Tensor]=None, spatial_position_embeddings: Optional[torch.Tensor]=None, output_attentions: bool=False) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
is_cross_attention = key_value_states is not None
batch_size, target_len, embed_dim = hidden_states.size()
if object_queries is not None:
hidden_states_original = hidden_states
hidden_states = self.with_pos_embed(hidden_states, object_queries)
if spatial_position_embeddings is not None:
key_value_states_original = key_value_states
key_value_states = self.with_pos_embed(key_value_states, spatial_position_embeddings)
query_states = self.q_proj(hidden_states) * self.scaling
if is_cross_attention:
key_states = self._shape(self.k_proj(key_value_states), -1, batch_size)
value_states = self._shape(self.v_proj(key_value_states_original), -1, batch_size)
else:
key_states = self._shape(self.k_proj(hidden_states), -1, batch_size)
value_states = self._shape(self.v_proj(hidden_states_original), -1, batch_size)
proj_shape = (batch_size * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, target_len, batch_size).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
source_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (batch_size * self.num_heads, target_len, source_len):
raise ValueError(f'Attention weights should be of size {(batch_size * self.num_heads, target_len, source_len)}, but is {attn_weights.size()}')
if attention_mask is not None:
if attention_mask.size() != (batch_size, 1, target_len, source_len):
raise ValueError(f'Attention mask should be of size {(batch_size, 1, target_len, source_len)}, but is {attention_mask.size()}')
if attention_mask.dtype == torch.bool:
attention_mask = torch.zeros_like(attention_mask, dtype=attn_weights.dtype).masked_fill_(attention_mask, -torch.inf)
attn_weights = attn_weights.view(batch_size, self.num_heads, target_len, source_len) + attention_mask
attn_weights = attn_weights.view(batch_size * self.num_heads, target_len, source_len)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if output_attentions:
attn_weights_reshaped = attn_weights.view(batch_size, self.num_heads, target_len, source_len)
attn_weights = attn_weights_reshaped.view(batch_size * self.num_heads, target_len, source_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (batch_size * self.num_heads, target_len, self.head_dim):
raise ValueError(f'`attn_output` should be of size {(batch_size, self.num_heads, target_len, self.head_dim)}, but is {attn_output.size()}')
attn_output = attn_output.view(batch_size, self.num_heads, target_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
attn_output = attn_output.reshape(batch_size, target_len, embed_dim)
attn_output = self.out_proj(attn_output)
return (attn_output, attn_weights_reshaped)
|
class TableTransformerAttention(nn.Module):
'''
Multi-headed attention from 'Attention Is All You Need' paper.
Here, we add position embeddings to the queries and keys (as explained in the TABLE_TRANSFORMER paper).
'''
def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, bias: bool=True):
pass
def _shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int):
pass
def with_pos_embed(self, tensor: torch.Tensor, object_queries: Optional[Tensor]):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, object_queries: Optional[torch.Tensor]=None, key_value_states: Optional[torch.Tensor]=None, spatial_position_embeddings: Optional[torch.Tensor]=None, output_attentions: bool=False) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
'''Input shape: Batch x Time x Channel'''
pass
| 5
| 2
| 29
| 4
| 22
| 3
| 4
| 0.19
| 1
| 6
| 0
| 0
| 4
| 9
| 4
| 14
| 127
| 22
| 88
| 41
| 69
| 17
| 60
| 27
| 55
| 9
| 1
| 2
| 14
|
5,566
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/table_transformer/modeling_table_transformer.py
|
transformers.models.table_transformer.modeling_table_transformer.TableTransformerConvEncoder
|
from torch import Tensor, nn
import torch
from ...utils.backbone_utils import load_backbone
from ...utils import ModelOutput, auto_docstring, is_timm_available, logging, requires_backends
class TableTransformerConvEncoder(nn.Module):
"""
Convolutional backbone, using either the AutoBackbone API or one from the timm library.
nn.BatchNorm2d layers are replaced by TableTransformerFrozenBatchNorm2d as defined above.
"""
def __init__(self, config):
super().__init__()
self.config = config
if config.use_timm_backbone:
requires_backends(self, ['timm'])
kwargs = getattr(config, 'backbone_kwargs', {})
kwargs = {} if kwargs is None else kwargs.copy()
out_indices = kwargs.pop('out_indices', (1, 2, 3, 4))
num_channels = kwargs.pop('in_chans', config.num_channels)
if config.dilation:
kwargs['output_stride'] = kwargs.get('output_stride', 16)
backbone = create_model(config.backbone, pretrained=config.use_pretrained_backbone, features_only=True, out_indices=out_indices, in_chans=num_channels, **kwargs)
else:
backbone = load_backbone(config)
with torch.no_grad():
replace_batch_norm(backbone)
self.model = backbone
self.intermediate_channel_sizes = self.model.feature_info.channels() if config.use_timm_backbone else self.model.channels
backbone_model_type = None
if config.backbone is not None:
backbone_model_type = config.backbone
elif config.backbone_config is not None:
backbone_model_type = config.backbone_config.model_type
else:
raise ValueError('Either `backbone` or `backbone_config` should be provided in the config')
if 'resnet' in backbone_model_type:
for name, parameter in self.model.named_parameters():
if config.use_timm_backbone:
if 'layer2' not in name and 'layer3' not in name and ('layer4' not in name):
parameter.requires_grad_(False)
elif 'stage.1' not in name and 'stage.2' not in name and ('stage.3' not in name):
parameter.requires_grad_(False)
def forward(self, pixel_values: torch.Tensor, pixel_mask: torch.Tensor):
features = self.model(pixel_values) if self.config.use_timm_backbone else self.model(pixel_values).feature_maps
out = []
for feature_map in features:
mask = nn.functional.interpolate(pixel_mask[None].float(), size=feature_map.shape[-2:]).to(torch.bool)[0]
out.append((feature_map, mask))
return out
|
class TableTransformerConvEncoder(nn.Module):
'''
Convolutional backbone, using either the AutoBackbone API or one from the timm library.
nn.BatchNorm2d layers are replaced by TableTransformerFrozenBatchNorm2d as defined above.
'''
def __init__(self, config):
pass
def forward(self, pixel_values: torch.Tensor, pixel_mask: torch.Tensor):
pass
| 3
| 1
| 31
| 3
| 25
| 3
| 8
| 0.2
| 1
| 4
| 0
| 0
| 2
| 3
| 2
| 12
| 70
| 10
| 50
| 16
| 47
| 10
| 37
| 16
| 34
| 12
| 1
| 4
| 15
|
5,567
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/table_transformer/modeling_table_transformer.py
|
transformers.models.table_transformer.modeling_table_transformer.TableTransformerConvModel
|
from torch import Tensor, nn
class TableTransformerConvModel(nn.Module):
"""
This module adds 2D position embeddings to all intermediate feature maps of the convolutional encoder.
"""
def __init__(self, conv_encoder, position_embedding):
super().__init__()
self.conv_encoder = conv_encoder
self.position_embedding = position_embedding
def forward(self, pixel_values, pixel_mask):
out = self.conv_encoder(pixel_values, pixel_mask)
pos = []
for feature_map, mask in out:
pos.append(self.position_embedding(feature_map, mask).to(feature_map.dtype))
return (out, pos)
|
class TableTransformerConvModel(nn.Module):
'''
This module adds 2D position embeddings to all intermediate feature maps of the convolutional encoder.
'''
def __init__(self, conv_encoder, position_embedding):
pass
def forward(self, pixel_values, pixel_mask):
pass
| 3
| 1
| 7
| 1
| 5
| 1
| 2
| 0.45
| 1
| 1
| 0
| 0
| 2
| 2
| 2
| 12
| 19
| 3
| 11
| 8
| 8
| 5
| 11
| 8
| 8
| 2
| 1
| 1
| 3
|
5,568
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/table_transformer/modeling_table_transformer.py
|
transformers.models.table_transformer.modeling_table_transformer.TableTransformerDecoder
|
from .configuration_table_transformer import TableTransformerConfig
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask
import torch
from torch import Tensor, nn
class TableTransformerDecoder(TableTransformerPreTrainedModel):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`TableTransformerDecoderLayer`].
The decoder updates the query embeddings through multiple self-attention and cross-attention layers.
Some small tweaks for TABLE_TRANSFORMER:
- object_queries and query_position_embeddings are added to the forward pass.
- if self.config.auxiliary_loss is set to True, also returns a stack of activations from all decoding layers.
Args:
config: TableTransformerConfig
"""
def __init__(self, config: TableTransformerConfig):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.layers = nn.ModuleList([TableTransformerDecoderLayer(config) for _ in range(config.decoder_layers)])
self.layernorm = nn.LayerNorm(config.d_model)
self.gradient_checkpointing = False
self.post_init()
def forward(self, inputs_embeds=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, object_queries=None, query_position_embeddings=None, output_attentions=None, output_hidden_states=None, return_dict=None):
"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
The query embeddings that are passed into the decoder.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on certain queries. Mask values selected in `[0, 1]`:
- 1 for queries that are **not masked**,
- 0 for queries that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding pixel_values of the encoder. Mask values selected
in `[0, 1]`:
- 1 for pixels that are real (i.e. **not masked**),
- 0 for pixels that are padding (i.e. **masked**).
object_queries (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Object queries that are added to the queries and keys in each cross-attention layer.
query_position_embeddings (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`):
, *optional*): Position embeddings that are added to the values and keys in each self-attention layer.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if inputs_embeds is not None:
hidden_states = inputs_embeds
input_shape = inputs_embeds.size()[:-1]
combined_attention_mask = None
if attention_mask is not None and combined_attention_mask is not None:
combined_attention_mask = combined_attention_mask + _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
if encoder_hidden_states is not None and encoder_attention_mask is not None:
encoder_attention_mask = _prepare_4d_attention_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
intermediate = () if self.config.auxiliary_loss else None
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if output_attentions and encoder_hidden_states is not None else None
for idx, decoder_layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states += (hidden_states,)
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop:
continue
layer_outputs = decoder_layer(hidden_states, combined_attention_mask, object_queries, query_position_embeddings, encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions)
hidden_states = layer_outputs[0]
if self.config.auxiliary_loss:
hidden_states = self.layernorm(hidden_states)
intermediate += (hidden_states,)
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
hidden_states = self.layernorm(hidden_states)
if output_hidden_states:
all_hidden_states += (hidden_states,)
if self.config.auxiliary_loss:
intermediate = torch.stack(intermediate)
if not return_dict:
return tuple((v for v in [hidden_states, all_hidden_states, all_self_attns, all_cross_attentions, intermediate] if v is not None))
return TableTransformerDecoderOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, intermediate_hidden_states=intermediate)
|
class TableTransformerDecoder(TableTransformerPreTrainedModel):
'''
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`TableTransformerDecoderLayer`].
The decoder updates the query embeddings through multiple self-attention and cross-attention layers.
Some small tweaks for TABLE_TRANSFORMER:
- object_queries and query_position_embeddings are added to the forward pass.
- if self.config.auxiliary_loss is set to True, also returns a stack of activations from all decoding layers.
Args:
config: TableTransformerConfig
'''
def __init__(self, config: TableTransformerConfig):
pass
def forward(self, inputs_embeds=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, object_queries=None, query_position_embeddings=None, output_attentions=None, output_hidden_states=None, return_dict=None):
'''
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
The query embeddings that are passed into the decoder.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on certain queries. Mask values selected in `[0, 1]`:
- 1 for queries that are **not masked**,
- 0 for queries that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding pixel_values of the encoder. Mask values selected
in `[0, 1]`:
- 1 for pixels that are real (i.e. **not masked**),
- 0 for pixels that are padding (i.e. **masked**).
object_queries (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Object queries that are added to the queries and keys in each cross-attention layer.
query_position_embeddings (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`):
, *optional*): Position embeddings that are added to the values and keys in each self-attention layer.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
pass
| 3
| 2
| 79
| 12
| 47
| 21
| 12
| 0.53
| 1
| 7
| 3
| 0
| 2
| 5
| 2
| 3
| 174
| 30
| 94
| 29
| 80
| 50
| 51
| 18
| 48
| 22
| 2
| 3
| 23
|
5,569
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/table_transformer/modeling_table_transformer.py
|
transformers.models.table_transformer.modeling_table_transformer.TableTransformerDecoderLayer
|
from ...modeling_layers import GradientCheckpointingLayer
from ...activations import ACT2FN
from typing import Optional, Union
from .configuration_table_transformer import TableTransformerConfig
import torch
from torch import Tensor, nn
class TableTransformerDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: TableTransformerConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = TableTransformerAttention(embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.encoder_attn = TableTransformerAttention(self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout)
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, object_queries: Optional[torch.Tensor]=None, query_position_embeddings: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False):
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative
values.
object_queries (`torch.FloatTensor`, *optional*):
object queries that are added to the queries and keys
in the cross-attention layer.
query_position_embeddings (`torch.FloatTensor`, *optional*):
object queries that are added to the queries and keys
in the self-attention layer.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative
values.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, self_attn_weights = self.self_attn(hidden_states=hidden_states, object_queries=query_position_embeddings, attention_mask=attention_mask, output_attentions=output_attentions)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
cross_attn_weights = None
if encoder_hidden_states is not None:
hidden_states, cross_attn_weights = self.encoder_attn(hidden_states=hidden_states, object_queries=query_position_embeddings, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, spatial_position_embeddings=object_queries, output_attentions=output_attentions)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
return outputs
|
class TableTransformerDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: TableTransformerConfig):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, object_queries: Optional[torch.Tensor]=None, query_position_embeddings: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False):
'''
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative
values.
object_queries (`torch.FloatTensor`, *optional*):
object queries that are added to the queries and keys
in the cross-attention layer.
query_position_embeddings (`torch.FloatTensor`, *optional*):
object queries that are added to the queries and keys
in the self-attention layer.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative
values.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
'''
pass
| 3
| 1
| 51
| 6
| 33
| 12
| 2
| 0.37
| 1
| 5
| 2
| 0
| 2
| 11
| 2
| 12
| 105
| 13
| 67
| 27
| 55
| 25
| 38
| 18
| 35
| 3
| 1
| 1
| 4
|
5,570
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/table_transformer/modeling_table_transformer.py
|
transformers.models.table_transformer.modeling_table_transformer.TableTransformerDecoderOutput
|
from typing import Optional, Union
from ...utils import ModelOutput, auto_docstring, is_timm_available, logging, requires_backends
import torch
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithCrossAttentions, Seq2SeqModelOutput
from dataclasses import dataclass
@dataclass
@auto_docstring(custom_intro='\n Base class for outputs of the TABLE_TRANSFORMER decoder. This class adds one attribute to BaseModelOutputWithCrossAttentions,\n namely an optional stack of intermediate decoder activations, i.e. the output of each decoder layer, each of them\n gone through a layernorm. This is useful when training the model with auxiliary decoding losses.\n ')
class TableTransformerDecoderOutput(BaseModelOutputWithCrossAttentions):
"""
cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax,
used to compute the weighted average in the cross-attention heads.
intermediate_hidden_states (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, num_queries, hidden_size)`, *optional*, returned when `config.auxiliary_loss=True`):
Intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a
layernorm.
"""
intermediate_hidden_states: Optional[torch.FloatTensor] = None
|
@dataclass
@auto_docstring(custom_intro='\n Base class for outputs of the TABLE_TRANSFORMER decoder. This class adds one attribute to BaseModelOutputWithCrossAttentions,\n namely an optional stack of intermediate decoder activations, i.e. the output of each decoder layer, each of them\n gone through a layernorm. This is useful when training the model with auxiliary decoding losses.\n ')
class TableTransformerDecoderOutput(BaseModelOutputWithCrossAttentions):
'''
cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax,
used to compute the weighted average in the cross-attention heads.
intermediate_hidden_states (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, num_queries, hidden_size)`, *optional*, returned when `config.auxiliary_loss=True`):
Intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a
layernorm.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 11.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 27
| 2
| 2
| 2
| 1
| 23
| 2
| 2
| 1
| 0
| 2
| 0
| 0
|
5,571
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/table_transformer/modeling_table_transformer.py
|
transformers.models.table_transformer.modeling_table_transformer.TableTransformerEncoder
|
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask
from .configuration_table_transformer import TableTransformerConfig
from torch import Tensor, nn
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithCrossAttentions, Seq2SeqModelOutput
import torch
class TableTransformerEncoder(TableTransformerPreTrainedModel):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
[`TableTransformerEncoderLayer`].
The encoder updates the flattened feature map through multiple self-attention layers.
Small tweak for Table Transformer:
- object_queries are added to the forward pass.
Args:
config: TableTransformerConfig
"""
def __init__(self, config: TableTransformerConfig):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
self.layers = nn.ModuleList([TableTransformerEncoderLayer(config) for _ in range(config.encoder_layers)])
self.layernorm = nn.LayerNorm(config.d_model)
self.post_init()
def forward(self, inputs_embeds=None, attention_mask=None, object_queries=None, output_attentions=None, output_hidden_states=None, return_dict=None):
"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Flattened feature map (output of the backbone + projection layer) that is passed to the encoder.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding pixel features. Mask values selected in `[0, 1]`:
- 1 for pixel features that are real (i.e. **not masked**),
- 0 for pixel features that are padding (i.e. **masked**).
[What are attention masks?](../glossary#attention-mask)
object_queries (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Position embeddings that are added to the queries and keys in each self-attention layer.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
hidden_states = inputs_embeds
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
if attention_mask is not None:
attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for encoder_layer in self.layers:
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
to_drop = False
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop:
to_drop = True
if to_drop:
layer_outputs = (None, None)
else:
layer_outputs = encoder_layer(hidden_states, attention_mask, object_queries=object_queries, output_attentions=output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
hidden_states = self.layernorm(hidden_states)
if not return_dict:
return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
|
class TableTransformerEncoder(TableTransformerPreTrainedModel):
'''
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
[`TableTransformerEncoderLayer`].
The encoder updates the flattened feature map through multiple self-attention layers.
Small tweak for Table Transformer:
- object_queries are added to the forward pass.
Args:
config: TableTransformerConfig
'''
def __init__(self, config: TableTransformerConfig):
pass
def forward(self, inputs_embeds=None, attention_mask=None, object_queries=None, output_attentions=None, output_hidden_states=None, return_dict=None):
'''
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Flattened feature map (output of the backbone + projection layer) that is passed to the encoder.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding pixel features. Mask values selected in `[0, 1]`:
- 1 for pixel features that are real (i.e. **not masked**),
- 0 for pixel features that are padding (i.e. **masked**).
[What are attention masks?](../glossary#attention-mask)
object_queries (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Position embeddings that are added to the queries and keys in each self-attention layer.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
pass
| 3
| 2
| 49
| 9
| 28
| 13
| 8
| 0.63
| 1
| 6
| 3
| 0
| 2
| 4
| 2
| 3
| 114
| 24
| 56
| 22
| 45
| 35
| 38
| 14
| 35
| 15
| 2
| 3
| 16
|
5,572
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/table_transformer/modeling_table_transformer.py
|
transformers.models.table_transformer.modeling_table_transformer.TableTransformerEncoderLayer
|
from .configuration_table_transformer import TableTransformerConfig
from torch import Tensor, nn
import torch
from ...activations import ACT2FN
from typing import Optional, Union
class TableTransformerEncoderLayer(nn.Module):
def __init__(self, config: TableTransformerConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = TableTransformerAttention(embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, object_queries: Optional[torch.Tensor]=None, output_attentions: bool=False):
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative
values.
object_queries (`torch.FloatTensor`, *optional*): object queries, to be added to hidden_states.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, object_queries=object_queries, output_attentions=output_attentions)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
if self.training:
if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any():
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
|
class TableTransformerEncoderLayer(nn.Module):
def __init__(self, config: TableTransformerConfig):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, object_queries: Optional[torch.Tensor]=None, output_attentions: bool=False):
'''
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative
values.
object_queries (`torch.FloatTensor`, *optional*): object queries, to be added to hidden_states.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
'''
pass
| 3
| 1
| 34
| 5
| 24
| 6
| 3
| 0.25
| 1
| 5
| 2
| 0
| 2
| 9
| 2
| 12
| 71
| 11
| 48
| 22
| 39
| 12
| 33
| 16
| 30
| 4
| 1
| 2
| 5
|
5,573
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/table_transformer/modeling_table_transformer.py
|
transformers.models.table_transformer.modeling_table_transformer.TableTransformerForObjectDetection
|
import torch
from typing import Optional, Union
from ...utils import ModelOutput, auto_docstring, is_timm_available, logging, requires_backends
from torch import Tensor, nn
from .configuration_table_transformer import TableTransformerConfig
@auto_docstring(custom_intro='\n Table Transformer Model (consisting of a backbone and encoder-decoder Transformer) with object detection heads on\n top, for tasks such as COCO detection.\n ')
class TableTransformerForObjectDetection(TableTransformerPreTrainedModel):
def __init__(self, config: TableTransformerConfig):
super().__init__(config)
self.model = TableTransformerModel(config)
self.class_labels_classifier = nn.Linear(config.d_model, config.num_labels + 1)
self.bbox_predictor = TableTransformerMLPPredictionHead(input_dim=config.d_model, hidden_dim=config.d_model, output_dim=4, num_layers=3)
self.post_init()
@auto_docstring
def forward(self, pixel_values: torch.FloatTensor, pixel_mask: Optional[torch.FloatTensor]=None, decoder_attention_mask: Optional[torch.FloatTensor]=None, encoder_outputs: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[list[dict]]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.FloatTensor], TableTransformerObjectDetectionOutput]:
"""
decoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, num_queries)`, *optional*):
Not used by default. Can be used to mask object queries.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you
can choose to directly pass a flattened representation of an image.
decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*):
Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an
embedded representation.
labels (`list[Dict]` of len `(batch_size,)`, *optional*):
Labels for computing the bipartite matching loss. List of dicts, each dictionary containing at least the
following 2 keys: 'class_labels' and 'boxes' (the class labels and bounding boxes of an image in the batch
respectively). The class labels themselves should be a `torch.LongTensor` of len `(number of bounding boxes
in the image,)` and the boxes a `torch.FloatTensor` of shape `(number of bounding boxes in the image, 4)`.
Examples:
```python
>>> from huggingface_hub import hf_hub_download
>>> from transformers import AutoImageProcessor, TableTransformerForObjectDetection
>>> import torch
>>> from PIL import Image
>>> file_path = hf_hub_download(repo_id="nielsr/example-pdf", repo_type="dataset", filename="example_pdf.png")
>>> image = Image.open(file_path).convert("RGB")
>>> image_processor = AutoImageProcessor.from_pretrained("microsoft/table-transformer-detection")
>>> model = TableTransformerForObjectDetection.from_pretrained("microsoft/table-transformer-detection")
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> # convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax)
>>> target_sizes = torch.tensor([image.size[::-1]])
>>> results = image_processor.post_process_object_detection(outputs, threshold=0.9, target_sizes=target_sizes)[
... 0
... ]
>>> for score, label, box in zip(results["scores"], results["labels"], results["boxes"]):
... box = [round(i, 2) for i in box.tolist()]
... print(
... f"Detected {model.config.id2label[label.item()]} with confidence "
... f"{round(score.item(), 3)} at location {box}"
... )
Detected table with confidence 1.0 at location [202.1, 210.59, 1119.22, 385.09]
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.model(pixel_values, pixel_mask=pixel_mask, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = outputs[0]
logits = self.class_labels_classifier(sequence_output)
pred_boxes = self.bbox_predictor(sequence_output).sigmoid()
loss, loss_dict, auxiliary_outputs = (None, None, None)
if labels is not None:
outputs_class, outputs_coord = (None, None)
if self.config.auxiliary_loss:
intermediate = outputs.intermediate_hidden_states if return_dict else outputs[4]
outputs_class = self.class_labels_classifier(intermediate)
outputs_coord = self.bbox_predictor(intermediate).sigmoid()
loss, loss_dict, auxiliary_outputs = self.loss_function(logits, labels, self.device, pred_boxes, self.config, outputs_class, outputs_coord)
if not return_dict:
if auxiliary_outputs is not None:
output = (logits, pred_boxes) + auxiliary_outputs + outputs
else:
output = (logits, pred_boxes) + outputs
return (loss, loss_dict) + output if loss is not None else output
return TableTransformerObjectDetectionOutput(loss=loss, loss_dict=loss_dict, logits=logits, pred_boxes=pred_boxes, auxiliary_outputs=auxiliary_outputs, last_hidden_state=outputs.last_hidden_state, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions)
| null | 5
| 1
| 61
| 9
| 34
| 19
| 5
| 0.55
| 1
| 6
| 4
| 0
| 2
| 3
| 2
| 3
| 127
| 18
| 71
| 27
| 54
| 39
| 27
| 14
| 24
| 8
| 2
| 2
| 9
|
5,574
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/table_transformer/modeling_table_transformer.py
|
transformers.models.table_transformer.modeling_table_transformer.TableTransformerFrozenBatchNorm2d
|
import torch
from torch import Tensor, nn
class TableTransformerFrozenBatchNorm2d(nn.Module):
"""
BatchNorm2d where the batch statistics and the affine parameters are fixed.
Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than
torchvision.models.resnet[18,34,50,101] produce nans.
"""
def __init__(self, n):
super().__init__()
self.register_buffer('weight', torch.ones(n))
self.register_buffer('bias', torch.zeros(n))
self.register_buffer('running_mean', torch.zeros(n))
self.register_buffer('running_var', torch.ones(n))
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
num_batches_tracked_key = prefix + 'num_batches_tracked'
if num_batches_tracked_key in state_dict:
del state_dict[num_batches_tracked_key]
super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs)
def forward(self, x):
weight = self.weight.reshape(1, -1, 1, 1)
bias = self.bias.reshape(1, -1, 1, 1)
running_var = self.running_var.reshape(1, -1, 1, 1)
running_mean = self.running_mean.reshape(1, -1, 1, 1)
epsilon = 1e-05
scale = weight * (running_var + epsilon).rsqrt()
bias = bias - running_mean * scale
return x * scale + bias
|
class TableTransformerFrozenBatchNorm2d(nn.Module):
'''
BatchNorm2d where the batch statistics and the affine parameters are fixed.
Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than
torchvision.models.resnet[18,34,50,101] produce nans.
'''
def __init__(self, n):
pass
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
pass
def forward(self, x):
pass
| 4
| 1
| 9
| 0
| 8
| 1
| 1
| 0.28
| 1
| 1
| 0
| 0
| 3
| 0
| 3
| 13
| 37
| 5
| 25
| 13
| 19
| 7
| 21
| 11
| 17
| 2
| 1
| 1
| 4
|
5,575
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/table_transformer/modeling_table_transformer.py
|
transformers.models.table_transformer.modeling_table_transformer.TableTransformerLearnedPositionEmbedding
|
import torch
from torch import Tensor, nn
class TableTransformerLearnedPositionEmbedding(nn.Module):
"""
This module learns positional embeddings up to a fixed maximum size.
"""
def __init__(self, embedding_dim=256):
super().__init__()
self.row_embeddings = nn.Embedding(50, embedding_dim)
self.column_embeddings = nn.Embedding(50, embedding_dim)
def forward(self, pixel_values, pixel_mask=None):
height, width = pixel_values.shape[-2:]
width_values = torch.arange(width, device=pixel_values.device)
height_values = torch.arange(height, device=pixel_values.device)
x_emb = self.column_embeddings(width_values)
y_emb = self.row_embeddings(height_values)
pos = torch.cat([x_emb.unsqueeze(0).repeat(height, 1, 1), y_emb.unsqueeze(1).repeat(1, width, 1)], dim=-1)
pos = pos.permute(2, 0, 1)
pos = pos.unsqueeze(0)
pos = pos.repeat(pixel_values.shape[0], 1, 1, 1)
return pos
|
class TableTransformerLearnedPositionEmbedding(nn.Module):
'''
This module learns positional embeddings up to a fixed maximum size.
'''
def __init__(self, embedding_dim=256):
pass
def forward(self, pixel_values, pixel_mask=None):
pass
| 3
| 1
| 8
| 0
| 8
| 0
| 1
| 0.19
| 1
| 1
| 0
| 0
| 2
| 2
| 2
| 12
| 21
| 2
| 16
| 11
| 13
| 3
| 16
| 11
| 13
| 1
| 1
| 0
| 2
|
5,576
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/table_transformer/modeling_table_transformer.py
|
transformers.models.table_transformer.modeling_table_transformer.TableTransformerMLPPredictionHead
|
from torch import Tensor, nn
class TableTransformerMLPPredictionHead(nn.Module):
"""
Very simple multi-layer perceptron (MLP, also called FFN), used to predict the normalized center coordinates,
height and width of a bounding box w.r.t. an image.
Copied from https://github.com/facebookresearch/table_transformer/blob/master/models/table_transformer.py
"""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList((nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])))
def forward(self, x):
for i, layer in enumerate(self.layers):
x = nn.functional.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
|
class TableTransformerMLPPredictionHead(nn.Module):
'''
Very simple multi-layer perceptron (MLP, also called FFN), used to predict the normalized center coordinates,
height and width of a bounding box w.r.t. an image.
Copied from https://github.com/facebookresearch/table_transformer/blob/master/models/table_transformer.py
'''
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
pass
def forward(self, x):
pass
| 3
| 1
| 5
| 0
| 5
| 0
| 2
| 0.5
| 1
| 3
| 0
| 0
| 2
| 2
| 2
| 12
| 19
| 4
| 10
| 7
| 7
| 5
| 10
| 7
| 7
| 3
| 1
| 1
| 4
|
5,577
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/table_transformer/modeling_table_transformer.py
|
transformers.models.table_transformer.modeling_table_transformer.TableTransformerModel
|
from torch import Tensor, nn
import torch
from .configuration_table_transformer import TableTransformerConfig
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithCrossAttentions, Seq2SeqModelOutput
from ...utils import ModelOutput, auto_docstring, is_timm_available, logging, requires_backends
from typing import Optional, Union
@auto_docstring(custom_intro='\n The bare Table Transformer Model (consisting of a backbone and encoder-decoder Transformer) outputting raw\n hidden-states without any specific head on top.\n ')
class TableTransformerModel(TableTransformerPreTrainedModel):
def __init__(self, config: TableTransformerConfig):
super().__init__(config)
backbone = TableTransformerConvEncoder(config)
object_queries = build_position_encoding(config)
self.backbone = TableTransformerConvModel(backbone, object_queries)
self.input_projection = nn.Conv2d(backbone.intermediate_channel_sizes[-1], config.d_model, kernel_size=1)
self.query_position_embeddings = nn.Embedding(config.num_queries, config.d_model)
self.encoder = TableTransformerEncoder(config)
self.decoder = TableTransformerDecoder(config)
self.post_init()
def get_encoder(self):
return self.encoder
def freeze_backbone(self):
for name, param in self.backbone.conv_encoder.model.named_parameters():
param.requires_grad_(False)
def unfreeze_backbone(self):
for name, param in self.backbone.conv_encoder.model.named_parameters():
param.requires_grad_(True)
@auto_docstring
def forward(self, pixel_values: torch.FloatTensor, pixel_mask: Optional[torch.FloatTensor]=None, decoder_attention_mask: Optional[torch.FloatTensor]=None, encoder_outputs: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.FloatTensor], TableTransformerModelOutput]:
"""
decoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, num_queries)`, *optional*):
Not used by default. Can be used to mask object queries.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you
can choose to directly pass a flattened representation of an image.
decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*):
Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an
embedded representation.
Examples:
```python
>>> from transformers import AutoImageProcessor, TableTransformerModel
>>> from huggingface_hub import hf_hub_download
>>> from PIL import Image
>>> file_path = hf_hub_download(repo_id="nielsr/example-pdf", repo_type="dataset", filename="example_pdf.png")
>>> image = Image.open(file_path).convert("RGB")
>>> image_processor = AutoImageProcessor.from_pretrained("microsoft/table-transformer-detection")
>>> model = TableTransformerModel.from_pretrained("microsoft/table-transformer-detection")
>>> # prepare image for the model
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> # forward pass
>>> outputs = model(**inputs)
>>> # the last hidden states are the final query embeddings of the Transformer decoder
>>> # these are of shape (batch_size, num_queries, hidden_size)
>>> last_hidden_states = outputs.last_hidden_state
>>> list(last_hidden_states.shape)
[1, 15, 256]
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
batch_size, num_channels, height, width = pixel_values.shape
device = pixel_values.device
if pixel_mask is None:
pixel_mask = torch.ones((batch_size, height, width), device=device)
features, position_embeddings_list = self.backbone(pixel_values, pixel_mask)
feature_map, mask = features[-1]
if mask is None:
raise ValueError('Backbone does not return downsampled pixel mask')
projected_feature_map = self.input_projection(feature_map)
flattened_features = projected_feature_map.flatten(2).permute(0, 2, 1)
object_queries = position_embeddings_list[-1].flatten(2).permute(0, 2, 1)
flattened_mask = mask.flatten(1)
if encoder_outputs is None:
encoder_outputs = self.encoder(inputs_embeds=flattened_features, attention_mask=flattened_mask, object_queries=object_queries, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)):
encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None)
query_position_embeddings = self.query_position_embeddings.weight.unsqueeze(0).repeat(batch_size, 1, 1)
queries = torch.zeros_like(query_position_embeddings)
decoder_outputs = self.decoder(inputs_embeds=queries, attention_mask=None, object_queries=object_queries, query_position_embeddings=query_position_embeddings, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=flattened_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
if not return_dict:
return decoder_outputs + encoder_outputs
return TableTransformerModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, intermediate_hidden_states=decoder_outputs.intermediate_hidden_states)
| null | 8
| 1
| 25
| 4
| 15
| 6
| 3
| 0.41
| 1
| 10
| 7
| 0
| 6
| 5
| 6
| 7
| 160
| 30
| 92
| 39
| 72
| 38
| 46
| 27
| 39
| 11
| 2
| 1
| 18
|
5,578
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/table_transformer/modeling_table_transformer.py
|
transformers.models.table_transformer.modeling_table_transformer.TableTransformerObjectDetectionOutput
|
from typing import Optional, Union
from ...utils import ModelOutput, auto_docstring, is_timm_available, logging, requires_backends
import torch
from dataclasses import dataclass
@dataclass
@auto_docstring(custom_intro='\n Output type of [`TableTransformerForObjectDetection`].\n ')
class TableTransformerObjectDetectionOutput(ModelOutput):
"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)):
Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a
bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized
scale-invariant IoU loss.
loss_dict (`Dict`, *optional*):
A dictionary containing the individual losses. Useful for logging.
logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`):
Classification logits (including no-object) for all queries.
pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These
values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding
possible padding). You can use [`~TableTransformerImageProcessor.post_process_object_detection`] to retrieve the
unnormalized bounding boxes.
auxiliary_outputs (`list[Dict]`, *optional*):
Optional, only returned when auxiliary losses are activated (i.e. `config.auxiliary_loss` is set to `True`)
and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and
`pred_boxes`) for each decoder layer.
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the decoder of the model.
"""
loss: Optional[torch.FloatTensor] = None
loss_dict: Optional[dict] = None
logits: Optional[torch.FloatTensor] = None
pred_boxes: Optional[torch.FloatTensor] = None
auxiliary_outputs: Optional[list[dict]] = None
last_hidden_state: Optional[torch.FloatTensor] = None
decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[tuple[torch.FloatTensor]] = None
cross_attentions: Optional[tuple[torch.FloatTensor]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[tuple[torch.FloatTensor]] = None
|
@dataclass
@auto_docstring(custom_intro='\n Output type of [`TableTransformerForObjectDetection`].\n ')
class TableTransformerObjectDetectionOutput(ModelOutput):
'''
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)):
Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a
bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized
scale-invariant IoU loss.
loss_dict (`Dict`, *optional*):
A dictionary containing the individual losses. Useful for logging.
logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`):
Classification logits (including no-object) for all queries.
pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These
values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding
possible padding). You can use [`~TableTransformerImageProcessor.post_process_object_detection`] to retrieve the
unnormalized bounding boxes.
auxiliary_outputs (`list[Dict]`, *optional*):
Optional, only returned when auxiliary losses are activated (i.e. `config.auxiliary_loss` is set to `True`)
and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and
`pred_boxes`) for each decoder layer.
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the decoder of the model.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.46
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 60
| 2
| 13
| 13
| 12
| 45
| 13
| 13
| 12
| 0
| 1
| 0
| 0
|
5,579
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/table_transformer/modeling_table_transformer.py
|
transformers.models.table_transformer.modeling_table_transformer.TableTransformerPreTrainedModel
|
from .configuration_table_transformer import TableTransformerConfig
from ...utils import ModelOutput, auto_docstring, is_timm_available, logging, requires_backends
from torch import Tensor, nn
from ...modeling_utils import PreTrainedModel
@auto_docstring
class TableTransformerPreTrainedModel(PreTrainedModel):
config: TableTransformerConfig
base_model_prefix = 'model'
main_input_name = 'pixel_values'
_no_split_modules = ['TableTransformerConvEncoder', 'TableTransformerEncoderLayer', 'TableTransformerDecoderLayer']
def _init_weights(self, module):
std = self.config.init_std
if isinstance(module, TableTransformerLearnedPositionEmbedding):
nn.init.uniform_(module.row_embeddings.weight)
nn.init.uniform_(module.column_embeddings.weight)
if isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
|
@auto_docstring
class TableTransformerPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
pass
| 3
| 0
| 16
| 1
| 13
| 2
| 6
| 0.09
| 1
| 1
| 1
| 4
| 1
| 0
| 1
| 1
| 26
| 2
| 22
| 7
| 20
| 2
| 17
| 7
| 15
| 6
| 1
| 2
| 6
|
5,580
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/table_transformer/modeling_table_transformer.py
|
transformers.models.table_transformer.modeling_table_transformer.TableTransformerSinePositionEmbedding
|
import torch
import math
from torch import Tensor, nn
class TableTransformerSinePositionEmbedding(nn.Module):
"""
This is a more standard version of the position embedding, very similar to the one used by the Attention is all you
need paper, generalized to work on images.
"""
def __init__(self, embedding_dim=64, temperature=10000, normalize=False, scale=None):
super().__init__()
self.embedding_dim = embedding_dim
self.temperature = temperature
self.normalize = normalize
if scale is not None and normalize is False:
raise ValueError('normalize should be True if scale is passed')
if scale is None:
scale = 2 * math.pi
self.scale = scale
def forward(self, pixel_values, pixel_mask):
if pixel_mask is None:
raise ValueError('No pixel mask provided')
y_embed = pixel_mask.cumsum(1, dtype=torch.float32)
x_embed = pixel_mask.cumsum(2, dtype=torch.float32)
if self.normalize:
y_embed = y_embed / (y_embed[:, -1:, :] + 1e-06) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + 1e-06) * self.scale
dim_t = torch.arange(self.embedding_dim, dtype=torch.int64, device=pixel_values.device).float()
dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / self.embedding_dim)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
return pos
|
class TableTransformerSinePositionEmbedding(nn.Module):
'''
This is a more standard version of the position embedding, very similar to the one used by the Attention is all you
need paper, generalized to work on images.
'''
def __init__(self, embedding_dim=64, temperature=10000, normalize=False, scale=None):
pass
def forward(self, pixel_values, pixel_mask):
pass
| 3
| 1
| 14
| 1
| 13
| 0
| 3
| 0.15
| 1
| 2
| 0
| 0
| 2
| 4
| 2
| 12
| 35
| 4
| 27
| 13
| 24
| 4
| 27
| 13
| 24
| 3
| 1
| 1
| 6
|
5,581
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tapas/configuration_tapas.py
|
transformers.models.tapas.configuration_tapas.TapasConfig
|
from ...configuration_utils import PretrainedConfig
class TapasConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`TapasModel`]. It is used to instantiate a TAPAS
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the TAPAS
[google/tapas-base-finetuned-sqa](https://huggingface.co/google/tapas-base-finetuned-sqa) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Hyperparameters additional to BERT are taken from run_task_main.py and hparam_utils.py of the original
implementation. Original implementation available at https://github.com/google-research/tapas/tree/master.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the TAPAS model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`TapasModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"swish"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 1024):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_sizes (`list[int]`, *optional*, defaults to `[3, 256, 256, 2, 256, 256, 10]`):
The vocabulary sizes of the `token_type_ids` passed when calling [`TapasModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
positive_label_weight (`float`, *optional*, defaults to 10.0):
Weight for positive labels.
num_aggregation_labels (`int`, *optional*, defaults to 0):
The number of aggregation operators to predict.
aggregation_loss_weight (`float`, *optional*, defaults to 1.0):
Importance weight for the aggregation loss.
use_answer_as_supervision (`bool`, *optional*):
Whether to use the answer as the only supervision for aggregation examples.
answer_loss_importance (`float`, *optional*, defaults to 1.0):
Importance weight for the regression loss.
use_normalized_answer_loss (`bool`, *optional*, defaults to `False`):
Whether to normalize the answer loss by the maximum of the predicted and expected value.
huber_loss_delta (`float`, *optional*):
Delta parameter used to calculate the regression loss.
temperature (`float`, *optional*, defaults to 1.0):
Value used to control (OR change) the skewness of cell logits probabilities.
aggregation_temperature (`float`, *optional*, defaults to 1.0):
Scales aggregation logits to control the skewness of probabilities.
use_gumbel_for_cells (`bool`, *optional*, defaults to `False`):
Whether to apply Gumbel-Softmax to cell selection.
use_gumbel_for_aggregation (`bool`, *optional*, defaults to `False`):
Whether to apply Gumbel-Softmax to aggregation selection.
average_approximation_function (`string`, *optional*, defaults to `"ratio"`):
Method to calculate the expected average of cells in the weak supervision case. One of `"ratio"`,
`"first_order"` or `"second_order"`.
cell_selection_preference (`float`, *optional*):
Preference for cell selection in ambiguous cases. Only applicable in case of weak supervision for
aggregation (WTQ, WikiSQL). If the total mass of the aggregation probabilities (excluding the "NONE"
operator) is higher than this hyperparameter, then aggregation is predicted for an example.
answer_loss_cutoff (`float`, *optional*):
Ignore examples with answer loss larger than cutoff.
max_num_rows (`int`, *optional*, defaults to 64):
Maximum number of rows.
max_num_columns (`int`, *optional*, defaults to 32):
Maximum number of columns.
average_logits_per_cell (`bool`, *optional*, defaults to `False`):
Whether to average logits per cell.
select_one_column (`bool`, *optional*, defaults to `True`):
Whether to constrain the model to only select cells from a single column.
allow_empty_column_selection (`bool`, *optional*, defaults to `False`):
Whether to allow not to select any column.
init_cell_selection_weights_to_zero (`bool`, *optional*, defaults to `False`):
Whether to initialize cell selection weights to 0 so that the initial probabilities are 50%.
reset_position_index_per_cell (`bool`, *optional*, defaults to `True`):
Whether to restart position indexes at every cell (i.e. use relative position embeddings).
disable_per_token_loss (`bool`, *optional*, defaults to `False`):
Whether to disable any (strong or weak) supervision on cells.
aggregation_labels (`dict[int, label]`, *optional*):
The aggregation labels used to aggregate the results. For example, the WTQ models have the following
aggregation labels: `{0: "NONE", 1: "SUM", 2: "AVERAGE", 3: "COUNT"}`
no_aggregation_label_index (`int`, *optional*):
If the aggregation labels are defined and one of these labels represents "No aggregation", this should be
set to its index. For example, the WTQ models have the "NONE" aggregation label at index 0, so that value
should be set to 0 for these models.
Example:
```python
>>> from transformers import TapasModel, TapasConfig
>>> # Initializing a default (SQA) Tapas configuration
>>> configuration = TapasConfig()
>>> # Initializing a model from the configuration
>>> model = TapasModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'tapas'
def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=1024, type_vocab_sizes=[3, 256, 256, 2, 256, 256, 10], initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, positive_label_weight=10.0, num_aggregation_labels=0, aggregation_loss_weight=1.0, use_answer_as_supervision=None, answer_loss_importance=1.0, use_normalized_answer_loss=False, huber_loss_delta=None, temperature=1.0, aggregation_temperature=1.0, use_gumbel_for_cells=False, use_gumbel_for_aggregation=False, average_approximation_function='ratio', cell_selection_preference=None, answer_loss_cutoff=None, max_num_rows=64, max_num_columns=32, average_logits_per_cell=False, select_one_column=True, allow_empty_column_selection=False, init_cell_selection_weights_to_zero=False, reset_position_index_per_cell=True, disable_per_token_loss=False, aggregation_labels=None, no_aggregation_label_index=None, **kwargs):
super().__init__(pad_token_id=pad_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_sizes = type_vocab_sizes
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.positive_label_weight = positive_label_weight
self.num_aggregation_labels = num_aggregation_labels
self.aggregation_loss_weight = aggregation_loss_weight
self.use_answer_as_supervision = use_answer_as_supervision
self.answer_loss_importance = answer_loss_importance
self.use_normalized_answer_loss = use_normalized_answer_loss
self.huber_loss_delta = huber_loss_delta
self.temperature = temperature
self.aggregation_temperature = aggregation_temperature
self.use_gumbel_for_cells = use_gumbel_for_cells
self.use_gumbel_for_aggregation = use_gumbel_for_aggregation
self.average_approximation_function = average_approximation_function
self.cell_selection_preference = cell_selection_preference
self.answer_loss_cutoff = answer_loss_cutoff
self.max_num_rows = max_num_rows
self.max_num_columns = max_num_columns
self.average_logits_per_cell = average_logits_per_cell
self.select_one_column = select_one_column
self.allow_empty_column_selection = allow_empty_column_selection
self.init_cell_selection_weights_to_zero = init_cell_selection_weights_to_zero
self.reset_position_index_per_cell = reset_position_index_per_cell
self.disable_per_token_loss = disable_per_token_loss
self.aggregation_labels = aggregation_labels
self.no_aggregation_label_index = no_aggregation_label_index
if isinstance(self.aggregation_labels, dict):
self.aggregation_labels = {int(k): v for k, v in aggregation_labels.items()}
|
class TapasConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`TapasModel`]. It is used to instantiate a TAPAS
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the TAPAS
[google/tapas-base-finetuned-sqa](https://huggingface.co/google/tapas-base-finetuned-sqa) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Hyperparameters additional to BERT are taken from run_task_main.py and hparam_utils.py of the original
implementation. Original implementation available at https://github.com/google-research/tapas/tree/master.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the TAPAS model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`TapasModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"swish"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 1024):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_sizes (`list[int]`, *optional*, defaults to `[3, 256, 256, 2, 256, 256, 10]`):
The vocabulary sizes of the `token_type_ids` passed when calling [`TapasModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
positive_label_weight (`float`, *optional*, defaults to 10.0):
Weight for positive labels.
num_aggregation_labels (`int`, *optional*, defaults to 0):
The number of aggregation operators to predict.
aggregation_loss_weight (`float`, *optional*, defaults to 1.0):
Importance weight for the aggregation loss.
use_answer_as_supervision (`bool`, *optional*):
Whether to use the answer as the only supervision for aggregation examples.
answer_loss_importance (`float`, *optional*, defaults to 1.0):
Importance weight for the regression loss.
use_normalized_answer_loss (`bool`, *optional*, defaults to `False`):
Whether to normalize the answer loss by the maximum of the predicted and expected value.
huber_loss_delta (`float`, *optional*):
Delta parameter used to calculate the regression loss.
temperature (`float`, *optional*, defaults to 1.0):
Value used to control (OR change) the skewness of cell logits probabilities.
aggregation_temperature (`float`, *optional*, defaults to 1.0):
Scales aggregation logits to control the skewness of probabilities.
use_gumbel_for_cells (`bool`, *optional*, defaults to `False`):
Whether to apply Gumbel-Softmax to cell selection.
use_gumbel_for_aggregation (`bool`, *optional*, defaults to `False`):
Whether to apply Gumbel-Softmax to aggregation selection.
average_approximation_function (`string`, *optional*, defaults to `"ratio"`):
Method to calculate the expected average of cells in the weak supervision case. One of `"ratio"`,
`"first_order"` or `"second_order"`.
cell_selection_preference (`float`, *optional*):
Preference for cell selection in ambiguous cases. Only applicable in case of weak supervision for
aggregation (WTQ, WikiSQL). If the total mass of the aggregation probabilities (excluding the "NONE"
operator) is higher than this hyperparameter, then aggregation is predicted for an example.
answer_loss_cutoff (`float`, *optional*):
Ignore examples with answer loss larger than cutoff.
max_num_rows (`int`, *optional*, defaults to 64):
Maximum number of rows.
max_num_columns (`int`, *optional*, defaults to 32):
Maximum number of columns.
average_logits_per_cell (`bool`, *optional*, defaults to `False`):
Whether to average logits per cell.
select_one_column (`bool`, *optional*, defaults to `True`):
Whether to constrain the model to only select cells from a single column.
allow_empty_column_selection (`bool`, *optional*, defaults to `False`):
Whether to allow not to select any column.
init_cell_selection_weights_to_zero (`bool`, *optional*, defaults to `False`):
Whether to initialize cell selection weights to 0 so that the initial probabilities are 50%.
reset_position_index_per_cell (`bool`, *optional*, defaults to `True`):
Whether to restart position indexes at every cell (i.e. use relative position embeddings).
disable_per_token_loss (`bool`, *optional*, defaults to `False`):
Whether to disable any (strong or weak) supervision on cells.
aggregation_labels (`dict[int, label]`, *optional*):
The aggregation labels used to aggregate the results. For example, the WTQ models have the following
aggregation labels: `{0: "NONE", 1: "SUM", 2: "AVERAGE", 3: "COUNT"}`
no_aggregation_label_index (`int`, *optional*):
If the aggregation labels are defined and one of these labels represents "No aggregation", this should be
set to its index. For example, the WTQ models have the "NONE" aggregation label at index 0, so that value
should be set to 0 for these models.
Example:
```python
>>> from transformers import TapasModel, TapasConfig
>>> # Initializing a default (SQA) Tapas configuration
>>> configuration = TapasConfig()
>>> # Initializing a model from the configuration
>>> model = TapasModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=1024, type_vocab_sizes=[3, 256, 256, 2, 256, 256, 10], initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, positive_label_weight=10.0, num_aggregation_labels=0, aggregation_loss_weight=1.0, use_answer_as_supervision=None, answer_loss_importance=1.0, use_normalized_answer_loss=False, huber_loss_delta=None, temperature=1.0, aggregation_temperature=1.0, use_gumbel_for_cells=False, use_gumbel_for_aggregation=False, average_approximation_function='ratio', cell_selection_preference=None, answer_loss_cutoff=None, max_num_rows=64, max_num_columns=32, average_logits_per_cell=False, select_one_column=True, allow_empty_column_selection=False, init_cell_selection_weights_to_zero=False, reset_position_index_per_cell=True, disable_per_token_loss=False, aggregation_labels=None, no_aggregation_label_index=None, **kwargs):
pass
| 2
| 1
| 87
| 4
| 80
| 3
| 2
| 1.27
| 1
| 3
| 0
| 0
| 1
| 36
| 1
| 1
| 199
| 13
| 82
| 79
| 40
| 104
| 42
| 39
| 40
| 2
| 1
| 1
| 2
|
5,582
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tapas/modeling_tapas.py
|
transformers.models.tapas.modeling_tapas.AverageApproximationFunction
|
import enum
class AverageApproximationFunction(str, enum.Enum):
RATIO = 'ratio'
FIRST_ORDER = 'first_order'
SECOND_ORDER = 'second_order'
|
class AverageApproximationFunction(str, enum.Enum):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 115
| 4
| 0
| 4
| 4
| 3
| 0
| 4
| 4
| 3
| 0
| 4
| 0
| 0
|
5,583
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tapas/modeling_tapas.py
|
transformers.models.tapas.modeling_tapas.IndexMap
|
import torch
class IndexMap:
"""Index grouping entries within a tensor."""
def __init__(self, indices, num_segments, batch_dims=0):
"""
Creates an index
Args:
indices (`torch.LongTensor`, same shape as a *values* Tensor to which the indices refer):
Tensor containing the indices.
num_segments (`torch.LongTensor`):
Scalar tensor, the number of segments. All elements in a batched segmented tensor must have the same
number of segments (although many segments can be empty).
batch_dims (`int`, *optional*, defaults to 0):
The number of batch dimensions. The first *batch_dims* dimensions of a SegmentedTensor are treated as
batch dimensions. Segments in different batch elements are always distinct even if they have the same
index.
"""
self.indices = torch.as_tensor(indices, device=indices.device)
self.num_segments = torch.as_tensor(num_segments, device=indices.device)
self.batch_dims = batch_dims
def batch_shape(self):
return self.indices.size()[:self.batch_dims]
|
class IndexMap:
'''Index grouping entries within a tensor.'''
def __init__(self, indices, num_segments, batch_dims=0):
'''
Creates an index
Args:
indices (`torch.LongTensor`, same shape as a *values* Tensor to which the indices refer):
Tensor containing the indices.
num_segments (`torch.LongTensor`):
Scalar tensor, the number of segments. All elements in a batched segmented tensor must have the same
number of segments (although many segments can be empty).
batch_dims (`int`, *optional*, defaults to 0):
The number of batch dimensions. The first *batch_dims* dimensions of a SegmentedTensor are treated as
batch dimensions. Segments in different batch elements are always distinct even if they have the same
index.
'''
pass
def batch_shape(self):
pass
| 3
| 2
| 10
| 1
| 3
| 7
| 1
| 2.14
| 0
| 0
| 0
| 1
| 2
| 3
| 2
| 2
| 24
| 3
| 7
| 6
| 4
| 15
| 7
| 6
| 4
| 1
| 0
| 0
| 2
|
5,584
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tapas/modeling_tapas.py
|
transformers.models.tapas.modeling_tapas.ProductIndexMap
|
import torch
class ProductIndexMap(IndexMap):
"""The product of two indices."""
def __init__(self, outer_index, inner_index):
"""
Combines indices i and j into pairs (i, j). The result is an index where each segment (i, j) is the
intersection of segments i and j. For example if the inputs represent table cells indexed by respectively rows
and columns the output will be a table indexed by (row, column) pairs, i.e. by cell. The implementation
combines indices {0, .., n - 1} and {0, .., m - 1} into {0, .., nm - 1}. The output has *num_segments* equal to
*outer_index.num_segments* * *inner_index.num_segments*
Args:
outer_index (`IndexMap`):
IndexMap.
inner_index (`IndexMap`):
IndexMap, must have the same shape as *outer_index*.
"""
if outer_index.batch_dims != inner_index.batch_dims:
raise ValueError('outer_index.batch_dims and inner_index.batch_dims must be the same.')
super().__init__(indices=inner_index.indices + outer_index.indices * inner_index.num_segments, num_segments=inner_index.num_segments * outer_index.num_segments, batch_dims=inner_index.batch_dims)
self.outer_index = outer_index
self.inner_index = inner_index
def project_outer(self, index):
"""Projects an index with the same index set onto the outer components."""
indices = torch.div(index.indices, self.inner_index.num_segments, rounding_mode='floor').type(torch.long)
return IndexMap(indices=indices, num_segments=self.outer_index.num_segments, batch_dims=index.batch_dims)
def project_inner(self, index):
"""Projects an index with the same index set onto the inner components."""
return IndexMap(indices=torch.fmod(index.indices, self.inner_index.num_segments).type(torch.float).floor().type(torch.long), num_segments=self.inner_index.num_segments, batch_dims=index.batch_dims)
|
class ProductIndexMap(IndexMap):
'''The product of two indices.'''
def __init__(self, outer_index, inner_index):
'''
Combines indices i and j into pairs (i, j). The result is an index where each segment (i, j) is the
intersection of segments i and j. For example if the inputs represent table cells indexed by respectively rows
and columns the output will be a table indexed by (row, column) pairs, i.e. by cell. The implementation
combines indices {0, .., n - 1} and {0, .., m - 1} into {0, .., nm - 1}. The output has *num_segments* equal to
*outer_index.num_segments* * *inner_index.num_segments*
Args:
outer_index (`IndexMap`):
IndexMap.
inner_index (`IndexMap`):
IndexMap, must have the same shape as *outer_index*.
'''
pass
def project_outer(self, index):
'''Projects an index with the same index set onto the outer components.'''
pass
def project_inner(self, index):
'''Projects an index with the same index set onto the inner components.'''
pass
| 4
| 4
| 13
| 1
| 7
| 5
| 1
| 0.65
| 1
| 3
| 0
| 0
| 3
| 2
| 3
| 5
| 43
| 5
| 23
| 7
| 19
| 15
| 12
| 7
| 8
| 2
| 1
| 1
| 4
|
5,585
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tapas/modeling_tapas.py
|
transformers.models.tapas.modeling_tapas.TableQuestionAnsweringOutput
|
from dataclasses import dataclass
import torch
from typing import Optional, Union
from ...utils import ModelOutput, auto_docstring, logging
@dataclass
@auto_docstring(custom_intro='\n Output type of [`TapasForQuestionAnswering`].\n ')
class TableQuestionAnsweringOutput(ModelOutput):
"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` (and possibly `answer`, `aggregation_labels`, `numeric_values` and `numeric_values_scale` are provided)):
Total loss as the sum of the hierarchical cell selection log-likelihood loss and (optionally) the
semi-supervised regression loss and (optionally) supervised loss for aggregations.
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Prediction scores of the cell selection head, for every token.
logits_aggregation (`torch.FloatTensor`, *optional*, of shape `(batch_size, num_aggregation_labels)`):
Prediction scores of the aggregation head, for every aggregation operator.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
logits_aggregation: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
|
@dataclass
@auto_docstring(custom_intro='\n Output type of [`TapasForQuestionAnswering`].\n ')
class TableQuestionAnsweringOutput(ModelOutput):
'''
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` (and possibly `answer`, `aggregation_labels`, `numeric_values` and `numeric_values_scale` are provided)):
Total loss as the sum of the hierarchical cell selection log-likelihood loss and (optionally) the
semi-supervised regression loss and (optionally) supervised loss for aggregations.
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Prediction scores of the cell selection head, for every token.
logits_aggregation (`torch.FloatTensor`, *optional*, of shape `(batch_size, num_aggregation_labels)`):
Prediction scores of the aggregation head, for every aggregation operator.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.17
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 27
| 2
| 6
| 6
| 5
| 19
| 6
| 6
| 5
| 0
| 1
| 0
| 0
|
5,586
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tapas/modeling_tapas.py
|
transformers.models.tapas.modeling_tapas.TapasAttention
|
import torch
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Optional, Union
from torch import nn
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
class TapasAttention(nn.Module):
def __init__(self, config, layer_idx=None):
super().__init__()
self.self = TapasSelfAttention(config, layer_idx=layer_idx)
self.output = TapasSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads)
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.Tensor]:
self_outputs = self.self(hidden_states, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, past_key_values=past_key_values, output_attentions=output_attentions, cache_position=cache_position)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:]
return outputs
|
class TapasAttention(nn.Module):
def __init__(self, config, layer_idx=None):
pass
def prune_heads(self, heads):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.Tensor]:
pass
| 4
| 0
| 15
| 1
| 13
| 1
| 1
| 0.12
| 1
| 6
| 2
| 0
| 3
| 3
| 3
| 13
| 49
| 4
| 41
| 20
| 28
| 5
| 22
| 11
| 18
| 2
| 1
| 1
| 4
|
5,587
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tapas/modeling_tapas.py
|
transformers.models.tapas.modeling_tapas.TapasEmbeddings
|
import torch
from torch import nn
class TapasEmbeddings(nn.Module):
"""
Construct the embeddings from word, position and token_type embeddings. Same as BertEmbeddings but with a number of
additional token type embeddings to encode tabular structure.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
for i, type_vocab_sizes in enumerate(config.type_vocab_sizes):
name = f'token_type_embeddings_{i}'
setattr(self, name, nn.Embedding(type_vocab_sizes, config.hidden_size))
self.number_of_token_type_embeddings = len(config.type_vocab_sizes)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.config = config
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
device = input_ids.device if input_ids is not None else inputs_embeds.device
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).expand(input_shape)
if self.config.reset_position_index_per_cell:
col_index = IndexMap(token_type_ids[:, :, 1], self.config.type_vocab_sizes[1], batch_dims=1)
row_index = IndexMap(token_type_ids[:, :, 2], self.config.type_vocab_sizes[2], batch_dims=1)
full_index = ProductIndexMap(col_index, row_index)
first_position_per_segment = reduce_min(position_ids, full_index)[0]
first_position = gather(first_position_per_segment, full_index)
position = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0)
position_ids = torch.min(torch.as_tensor(self.config.max_position_embeddings - 1, device=device), position - first_position)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape + self.number_of_token_type_embeddings, dtype=torch.long, device=device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
embeddings = inputs_embeds + position_embeddings
for i in range(self.number_of_token_type_embeddings):
name = f'token_type_embeddings_{i}'
embeddings += getattr(self, name)(token_type_ids[:, :, i])
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
|
class TapasEmbeddings(nn.Module):
'''
Construct the embeddings from word, position and token_type embeddings. Same as BertEmbeddings but with a number of
additional token type embeddings to encode tabular structure.
'''
def __init__(self, config):
pass
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
pass
| 3
| 1
| 35
| 6
| 23
| 7
| 5
| 0.39
| 1
| 5
| 2
| 0
| 2
| 6
| 2
| 12
| 77
| 13
| 46
| 24
| 43
| 18
| 41
| 24
| 38
| 8
| 1
| 2
| 10
|
5,588
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tapas/modeling_tapas.py
|
transformers.models.tapas.modeling_tapas.TapasEncoder
|
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, SequenceClassifierOutput
from torch import nn
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
class TapasEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([TapasLayer(config, layer_idx=i) for i in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=False, output_hidden_states=False, return_dict=True, cache_position=None):
if use_cache and past_key_values is None:
past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config))
if use_cache and isinstance(past_key_values, tuple):
logger.warning_once('Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. You should pass an instance of `EncoderDecoderCache` instead, e.g. `past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`.')
past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values)
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_attentions=output_attentions, cache_position=cache_position)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None))
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions)
|
class TapasEncoder(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=False, output_hidden_states=False, return_dict=True, cache_position=None):
pass
| 3
| 0
| 30
| 2
| 28
| 0
| 6
| 0
| 1
| 6
| 2
| 0
| 2
| 3
| 2
| 12
| 61
| 5
| 56
| 23
| 41
| 0
| 24
| 11
| 21
| 10
| 1
| 2
| 11
|
5,589
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tapas/modeling_tapas.py
|
transformers.models.tapas.modeling_tapas.TapasForMaskedLM
|
from typing import Optional, Union
from ...utils import ModelOutput, auto_docstring, logging
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, SequenceClassifierOutput
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from .configuration_tapas import TapasConfig
import torch
@auto_docstring
class TapasForMaskedLM(TapasPreTrainedModel):
_tied_weights_keys = ['cls.predictions.decoder.weight', 'cls.predictions.decoder.bias']
config: TapasConfig
base_model_prefix = 'tapas'
def __init__(self, config):
super().__init__(config)
self.tapas = TapasModel(config, add_pooling_layer=False)
self.cls = TapasOnlyMLMHead(config)
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
self.cls.predictions.bias = new_embeddings.bias
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[tuple, MaskedLMOutput]:
"""
token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length, 7)`, *optional*):
Token indices that encode tabular structure. Indices can be obtained using [`AutoTokenizer`]. See this
class for more info.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. If
`reset_position_index_per_cell` of [`TapasConfig`] is set to `True`, relative position embeddings will be
used. Selected in the range `[0, config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
Examples:
```python
>>> from transformers import AutoTokenizer, TapasForMaskedLM
>>> import pandas as pd
>>> tokenizer = AutoTokenizer.from_pretrained("google/tapas-base")
>>> model = TapasForMaskedLM.from_pretrained("google/tapas-base")
>>> data = {
... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"],
... "Age": ["56", "45", "59"],
... "Number of movies": ["87", "53", "69"],
... }
>>> table = pd.DataFrame.from_dict(data)
>>> inputs = tokenizer(
... table=table, queries="How many [MASK] has George [MASK] played in?", return_tensors="pt"
... )
>>> labels = tokenizer(
... table=table, queries="How many movies has George Clooney played in?", return_tensors="pt"
... )["input_ids"]
>>> outputs = model(**inputs, labels=labels)
>>> logits = outputs.logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.tapas(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return (masked_lm_loss,) + output if masked_lm_loss is not None else output
return MaskedLMOutput(loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class TapasForMaskedLM(TapasPreTrainedModel):
def __init__(self, config):
pass
def get_output_embeddings(self):
pass
def set_output_embeddings(self, new_embeddings):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[tuple, MaskedLMOutput]:
'''
token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length, 7)`, *optional*):
Token indices that encode tabular structure. Indices can be obtained using [`AutoTokenizer`]. See this
class for more info.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. If
`reset_position_index_per_cell` of [`TapasConfig`] is set to `True`, relative position embeddings will be
used. Selected in the range `[0, config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
Examples:
```python
>>> from transformers import AutoTokenizer, TapasForMaskedLM
>>> import pandas as pd
>>> tokenizer = AutoTokenizer.from_pretrained("google/tapas-base")
>>> model = TapasForMaskedLM.from_pretrained("google/tapas-base")
>>> data = {
... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"],
... "Age": ["56", "45", "59"],
... "Number of movies": ["87", "53", "69"],
... }
>>> table = pd.DataFrame.from_dict(data)
>>> inputs = tokenizer(
... table=table, queries="How many [MASK] has George [MASK] played in?", return_tensors="pt"
... )
>>> labels = tokenizer(
... table=table, queries="How many movies has George Clooney played in?", return_tensors="pt"
... )["input_ids"]
>>> outputs = model(**inputs, labels=labels)
>>> logits = outputs.logits
```'''
pass
| 7
| 1
| 24
| 4
| 14
| 7
| 2
| 0.48
| 1
| 5
| 3
| 0
| 4
| 2
| 4
| 5
| 107
| 18
| 61
| 32
| 39
| 29
| 27
| 16
| 22
| 5
| 2
| 1
| 8
|
5,590
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tapas/modeling_tapas.py
|
transformers.models.tapas.modeling_tapas.TapasForQuestionAnswering
|
import torch
from typing import Optional, Union
from ...utils import ModelOutput, auto_docstring, logging
from torch import nn
from .configuration_tapas import TapasConfig
@auto_docstring(custom_intro='\n Tapas Model with a cell selection head and optional aggregation head on top for question-answering tasks on tables\n (linear layers on top of the hidden-states output to compute `logits` and optional `logits_aggregation`), e.g. for\n SQA, WTQ or WikiSQL-supervised tasks.\n ')
class TapasForQuestionAnswering(TapasPreTrainedModel):
def __init__(self, config: TapasConfig):
super().__init__(config)
self.tapas = TapasModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
if config.init_cell_selection_weights_to_zero:
self.output_weights = nn.Parameter(torch.zeros(config.hidden_size))
self.column_output_weights = nn.Parameter(torch.zeros(config.hidden_size))
else:
self.output_weights = nn.Parameter(torch.empty(config.hidden_size))
nn.init.normal_(self.output_weights, std=config.initializer_range)
self.column_output_weights = nn.Parameter(torch.empty(config.hidden_size))
nn.init.normal_(self.column_output_weights, std=config.initializer_range)
self.output_bias = nn.Parameter(torch.zeros([]))
self.column_output_bias = nn.Parameter(torch.zeros([]))
if config.num_aggregation_labels > 0:
self.aggregation_classifier = nn.Linear(config.hidden_size, config.num_aggregation_labels)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, table_mask: Optional[torch.LongTensor]=None, labels: Optional[torch.LongTensor]=None, aggregation_labels: Optional[torch.LongTensor]=None, float_answer: Optional[torch.FloatTensor]=None, numeric_values: Optional[torch.FloatTensor]=None, numeric_values_scale: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, TableQuestionAnsweringOutput]:
"""
token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length, 7)`, *optional*):
Token indices that encode tabular structure. Indices can be obtained using [`AutoTokenizer`]. See this
class for more info.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. If
`reset_position_index_per_cell` of [`TapasConfig`] is set to `True`, relative position embeddings will be
used. Selected in the range `[0, config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
table_mask (`torch.LongTensor` of shape `(batch_size, seq_length)`, *optional*):
Mask for the table. Indicates which tokens belong to the table (1). Question tokens, table headers and
padding are 0.
labels (`torch.LongTensor` of shape `(batch_size, seq_length)`, *optional*):
Labels per token for computing the hierarchical cell selection loss. This encodes the positions of the
answer appearing in the table. Can be obtained using [`AutoTokenizer`].
- 1 for tokens that are **part of the answer**,
- 0 for tokens that are **not part of the answer**.
aggregation_labels (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
Aggregation function index for every example in the batch for computing the aggregation loss. Indices
should be in `[0, ..., config.num_aggregation_labels - 1]`. Only required in case of strong supervision for
aggregation (WikiSQL-supervised).
float_answer (`torch.FloatTensor` of shape `(batch_size, )`, *optional*):
Float answer for every example in the batch. Set to *float('nan')* for cell selection questions. Only
required in case of weak supervision (WTQ) to calculate the aggregate mask and regression loss.
numeric_values (`torch.FloatTensor` of shape `(batch_size, seq_length)`, *optional*):
Numeric values of every token, NaN for tokens which are not numeric values. Can be obtained using
[`AutoTokenizer`]. Only required in case of weak supervision for aggregation (WTQ) to calculate the
regression loss.
numeric_values_scale (`torch.FloatTensor` of shape `(batch_size, seq_length)`, *optional*):
Scale of the numeric values of every token. Can be obtained using [`AutoTokenizer`]. Only required in case
of weak supervision for aggregation (WTQ) to calculate the regression loss.
Examples:
```python
>>> from transformers import AutoTokenizer, TapasForQuestionAnswering
>>> import pandas as pd
>>> tokenizer = AutoTokenizer.from_pretrained("google/tapas-base-finetuned-wtq")
>>> model = TapasForQuestionAnswering.from_pretrained("google/tapas-base-finetuned-wtq")
>>> data = {
... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"],
... "Age": ["56", "45", "59"],
... "Number of movies": ["87", "53", "69"],
... }
>>> table = pd.DataFrame.from_dict(data)
>>> queries = ["How many movies has George Clooney played in?", "How old is Brad Pitt?"]
>>> inputs = tokenizer(table=table, queries=queries, padding="max_length", return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
>>> logits_aggregation = outputs.logits_aggregation
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.tapas(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = outputs[0]
pooled_output = outputs[1]
sequence_output = self.dropout(sequence_output)
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
device = input_ids.device if input_ids is not None else inputs_embeds.device
if token_type_ids is None:
token_type_ids = torch.zeros((*input_shape, len(self.config.type_vocab_sizes)), dtype=torch.long, device=device)
token_types = ['segment_ids', 'column_ids', 'row_ids', 'prev_labels', 'column_ranks', 'inv_column_ranks', 'numeric_relations']
row_ids = token_type_ids[:, :, token_types.index('row_ids')]
column_ids = token_type_ids[:, :, token_types.index('column_ids')]
row_index = IndexMap(indices=torch.min(row_ids, torch.as_tensor(self.config.max_num_rows - 1, device=row_ids.device)), num_segments=self.config.max_num_rows, batch_dims=1)
col_index = IndexMap(indices=torch.min(column_ids, torch.as_tensor(self.config.max_num_columns - 1, device=column_ids.device)), num_segments=self.config.max_num_columns, batch_dims=1)
cell_index = ProductIndexMap(row_index, col_index)
input_shape = input_ids.size() if input_ids is not None else inputs_embeds.size()[:-1]
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if table_mask is None:
table_mask = torch.where(row_ids > 0, torch.ones_like(row_ids), torch.zeros_like(row_ids))
input_mask_float = attention_mask.to(device=device, dtype=torch.float)
table_mask_float = table_mask.to(device=device, dtype=torch.float)
cell_mask, _ = reduce_mean(input_mask_float, cell_index)
logits = compute_token_logits(sequence_output, self.config.temperature, self.output_weights, self.output_bias)
column_logits = None
if self.config.select_one_column:
column_logits = compute_column_logits(sequence_output, self.column_output_weights, self.column_output_bias, cell_index, cell_mask, self.config.allow_empty_column_selection)
logits_aggregation = None
if self.config.num_aggregation_labels > 0:
logits_aggregation = self.aggregation_classifier(pooled_output)
total_loss = 0.0
calculate_loss = False
if labels is not None:
calculate_loss = True
is_supervised = not self.config.num_aggregation_labels > 0 or not self.config.use_answer_as_supervision
if is_supervised:
aggregate_mask = None
elif float_answer is not None:
assert labels.shape[0] == float_answer.shape[0], 'Make sure the answers are a FloatTensor of shape (batch_size,)'
aggregate_mask = _calculate_aggregate_mask(float_answer, pooled_output, self.config.cell_selection_preference, labels, self.aggregation_classifier)
else:
raise ValueError('You have to specify float answers in order to calculate the aggregate mask')
if self.config.average_logits_per_cell:
logits_per_cell, _ = reduce_mean(logits, cell_index)
logits = gather(logits_per_cell, cell_index)
dist_per_token = torch.distributions.Bernoulli(logits=logits)
selection_loss_per_example = None
if not self.config.select_one_column:
weight = torch.where(labels == 0, torch.ones_like(labels, dtype=torch.float32), self.config.positive_label_weight * torch.ones_like(labels, dtype=torch.float32))
selection_loss_per_token = -dist_per_token.log_prob(labels) * weight
selection_loss_per_example = torch.sum(selection_loss_per_token * input_mask_float, dim=1) / (torch.sum(input_mask_float, dim=1) + EPSILON_ZERO_DIVISION)
else:
selection_loss_per_example, logits = _single_column_cell_selection_loss(logits, column_logits, labels, cell_index, col_index, cell_mask)
dist_per_token = torch.distributions.Bernoulli(logits=logits)
if self.config.disable_per_token_loss:
pass
elif is_supervised:
total_loss += torch.mean(selection_loss_per_example)
else:
total_loss += torch.mean(selection_loss_per_example * (1.0 - aggregate_mask))
if self.config.num_aggregation_labels > 0:
if is_supervised:
if aggregation_labels is not None:
assert labels.shape[0] == aggregation_labels.shape[0], 'Make sure the aggregation labels are a LongTensor of shape (batch_size,)'
per_example_additional_loss = _calculate_aggregation_loss(logits_aggregation, aggregate_mask, aggregation_labels, self.config.use_answer_as_supervision, self.config.num_aggregation_labels, self.config.aggregation_loss_weight)
else:
raise ValueError('You have to specify aggregation labels in order to calculate the aggregation loss')
else:
aggregation_labels = torch.zeros(labels.shape[0], dtype=torch.long, device=labels.device)
per_example_additional_loss = _calculate_aggregation_loss(logits_aggregation, aggregate_mask, aggregation_labels, self.config.use_answer_as_supervision, self.config.num_aggregation_labels, self.config.aggregation_loss_weight)
if self.config.use_answer_as_supervision:
if numeric_values is not None and numeric_values_scale is not None:
assert numeric_values.shape == numeric_values_scale.shape
answer_loss, large_answer_loss_mask = _calculate_regression_loss(float_answer, aggregate_mask, dist_per_token, numeric_values, numeric_values_scale, table_mask_float, logits_aggregation, self.config)
per_example_additional_loss += answer_loss
per_example_additional_loss *= large_answer_loss_mask
else:
raise ValueError('You have to specify numeric values and numeric values scale in order to calculate the regression loss')
total_loss += torch.mean(per_example_additional_loss)
else:
labels = torch.zeros_like(logits)
_, logits = _single_column_cell_selection_loss(logits, column_logits, labels, cell_index, col_index, cell_mask)
if not return_dict:
output = (logits, logits_aggregation) + outputs[2:]
return (total_loss,) + output if calculate_loss else output
return TableQuestionAnsweringOutput(loss=total_loss if calculate_loss else None, logits=logits, logits_aggregation=logits_aggregation, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n Tapas Model with a cell selection head and optional aggregation head on top for question-answering tasks on tables\n (linear layers on top of the hidden-states output to compute `logits` and optional `logits_aggregation`), e.g. for\n SQA, WTQ or WikiSQL-supervised tasks.\n ')
class TapasForQuestionAnswering(TapasPreTrainedModel):
def __init__(self, config: TapasConfig):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, table_mask: Optional[torch.LongTensor]=None, labels: Optional[torch.LongTensor]=None, aggregation_labels: Optional[torch.LongTensor]=None, float_answer: Optional[torch.FloatTensor]=None, numeric_values: Optional[torch.FloatTensor]=None, numeric_values_scale: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, TableQuestionAnsweringOutput]:
'''
token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length, 7)`, *optional*):
Token indices that encode tabular structure. Indices can be obtained using [`AutoTokenizer`]. See this
class for more info.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. If
`reset_position_index_per_cell` of [`TapasConfig`] is set to `True`, relative position embeddings will be
used. Selected in the range `[0, config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
table_mask (`torch.LongTensor` of shape `(batch_size, seq_length)`, *optional*):
Mask for the table. Indicates which tokens belong to the table (1). Question tokens, table headers and
padding are 0.
labels (`torch.LongTensor` of shape `(batch_size, seq_length)`, *optional*):
Labels per token for computing the hierarchical cell selection loss. This encodes the positions of the
answer appearing in the table. Can be obtained using [`AutoTokenizer`].
- 1 for tokens that are **part of the answer**,
- 0 for tokens that are **not part of the answer**.
aggregation_labels (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
Aggregation function index for every example in the batch for computing the aggregation loss. Indices
should be in `[0, ..., config.num_aggregation_labels - 1]`. Only required in case of strong supervision for
aggregation (WikiSQL-supervised).
float_answer (`torch.FloatTensor` of shape `(batch_size, )`, *optional*):
Float answer for every example in the batch. Set to *float('nan')* for cell selection questions. Only
required in case of weak supervision (WTQ) to calculate the aggregate mask and regression loss.
numeric_values (`torch.FloatTensor` of shape `(batch_size, seq_length)`, *optional*):
Numeric values of every token, NaN for tokens which are not numeric values. Can be obtained using
[`AutoTokenizer`]. Only required in case of weak supervision for aggregation (WTQ) to calculate the
regression loss.
numeric_values_scale (`torch.FloatTensor` of shape `(batch_size, seq_length)`, *optional*):
Scale of the numeric values of every token. Can be obtained using [`AutoTokenizer`]. Only required in case
of weak supervision for aggregation (WTQ) to calculate the regression loss.
Examples:
```python
>>> from transformers import AutoTokenizer, TapasForQuestionAnswering
>>> import pandas as pd
>>> tokenizer = AutoTokenizer.from_pretrained("google/tapas-base-finetuned-wtq")
>>> model = TapasForQuestionAnswering.from_pretrained("google/tapas-base-finetuned-wtq")
>>> data = {
... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"],
... "Age": ["56", "45", "59"],
... "Number of movies": ["87", "53", "69"],
... }
>>> table = pd.DataFrame.from_dict(data)
>>> queries = ["How many movies has George Clooney played in?", "How old is Brad Pitt?"]
>>> inputs = tokenizer(table=table, queries=queries, padding="max_length", return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
>>> logits_aggregation = outputs.logits_aggregation
```'''
pass
| 5
| 1
| 166
| 19
| 110
| 39
| 15
| 0.35
| 1
| 9
| 5
| 0
| 2
| 7
| 2
| 3
| 336
| 38
| 222
| 57
| 200
| 78
| 99
| 39
| 96
| 26
| 2
| 4
| 29
|
5,591
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tapas/modeling_tapas.py
|
transformers.models.tapas.modeling_tapas.TapasForSequenceClassification
|
import torch
from typing import Optional, Union
from ...utils import ModelOutput, auto_docstring, logging
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, SequenceClassifierOutput
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from torch import nn
@auto_docstring(custom_intro='\n Tapas Model with a sequence classification head on top (a linear layer on top of the pooled output), e.g. for table\n entailment tasks, such as TabFact (Chen et al., 2020).\n ')
class TapasForSequenceClassification(TapasPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.tapas = TapasModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]:
"""
token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length, 7)`, *optional*):
Token indices that encode tabular structure. Indices can be obtained using [`AutoTokenizer`]. See this
class for more info.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. If
`reset_position_index_per_cell` of [`TapasConfig`] is set to `True`, relative position embeddings will be
used. Selected in the range `[0, config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy). Note: this is called
"classification_class_index" in the original implementation.
Examples:
```python
>>> from transformers import AutoTokenizer, TapasForSequenceClassification
>>> import torch
>>> import pandas as pd
>>> tokenizer = AutoTokenizer.from_pretrained("google/tapas-base-finetuned-tabfact")
>>> model = TapasForSequenceClassification.from_pretrained("google/tapas-base-finetuned-tabfact")
>>> data = {
... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"],
... "Age": ["56", "45", "59"],
... "Number of movies": ["87", "53", "69"],
... }
>>> table = pd.DataFrame.from_dict(data)
>>> queries = [
... "There is only one actor who is 45 years old",
... "There are 3 actors which played in more than 60 movies",
... ]
>>> inputs = tokenizer(table=table, queries=queries, padding="max_length", return_tensors="pt")
>>> labels = torch.tensor([1, 0]) # 1 means entailed, 0 means refuted
>>> outputs = model(**inputs, labels=labels)
>>> loss = outputs.loss
>>> logits = outputs.logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.tapas(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = 'single_label_classification'
else:
self.config.problem_type = 'multi_label_classification'
if self.config.problem_type == 'regression':
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == 'single_label_classification':
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == 'multi_label_classification':
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n Tapas Model with a sequence classification head on top (a linear layer on top of the pooled output), e.g. for table\n entailment tasks, such as TabFact (Chen et al., 2020).\n ')
class TapasForSequenceClassification(TapasPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]:
'''
token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length, 7)`, *optional*):
Token indices that encode tabular structure. Indices can be obtained using [`AutoTokenizer`]. See this
class for more info.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. If
`reset_position_index_per_cell` of [`TapasConfig`] is set to `True`, relative position embeddings will be
used. Selected in the range `[0, config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy). Note: this is called
"classification_class_index" in the original implementation.
Examples:
```python
>>> from transformers import AutoTokenizer, TapasForSequenceClassification
>>> import torch
>>> import pandas as pd
>>> tokenizer = AutoTokenizer.from_pretrained("google/tapas-base-finetuned-tabfact")
>>> model = TapasForSequenceClassification.from_pretrained("google/tapas-base-finetuned-tabfact")
>>> data = {
... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"],
... "Age": ["56", "45", "59"],
... "Number of movies": ["87", "53", "69"],
... }
>>> table = pd.DataFrame.from_dict(data)
>>> queries = [
... "There is only one actor who is 45 years old",
... "There are 3 actors which played in more than 60 movies",
... ]
>>> inputs = tokenizer(table=table, queries=queries, padding="max_length", return_tensors="pt")
>>> labels = torch.tensor([1, 0]) # 1 means entailed, 0 means refuted
>>> outputs = model(**inputs, labels=labels)
>>> loss = outputs.loss
>>> logits = outputs.logits
```'''
pass
| 5
| 1
| 56
| 8
| 33
| 16
| 7
| 0.46
| 1
| 6
| 2
| 0
| 2
| 4
| 2
| 3
| 115
| 16
| 68
| 26
| 51
| 31
| 34
| 13
| 31
| 12
| 2
| 3
| 13
|
5,592
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tapas/modeling_tapas.py
|
transformers.models.tapas.modeling_tapas.TapasIntermediate
|
from ...activations import ACT2FN
import torch
from torch import nn
class TapasIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
|
class TapasIntermediate(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 2
| 0
| 1
| 3
| 0
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 12
| 5
| 9
| 0
| 11
| 5
| 8
| 2
| 1
| 1
| 3
|
5,593
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tapas/modeling_tapas.py
|
transformers.models.tapas.modeling_tapas.TapasLMPredictionHead
|
import torch
from torch import nn
class TapasLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = TapasPredictionHeadTransform(config)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
self.decoder.bias = self.bias
def _tie_weights(self):
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
|
class TapasLMPredictionHead(nn.Module):
def __init__(self, config):
pass
def _tie_weights(self):
pass
def forward(self, hidden_states):
pass
| 4
| 0
| 6
| 1
| 4
| 1
| 1
| 0.23
| 1
| 2
| 1
| 0
| 3
| 3
| 3
| 13
| 21
| 5
| 13
| 7
| 9
| 3
| 13
| 7
| 9
| 1
| 1
| 0
| 3
|
5,594
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tapas/modeling_tapas.py
|
transformers.models.tapas.modeling_tapas.TapasLayer
|
import torch
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Optional, Union
from ...modeling_layers import GradientCheckpointingLayer
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
class TapasLayer(GradientCheckpointingLayer):
def __init__(self, config, layer_idx=None):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = TapasAttention(config, layer_idx=layer_idx)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
if not self.is_decoder:
raise ValueError(f'{self} should be used as a decoder model if cross attention is added')
self.crossattention = TapasAttention(config, layer_idx=layer_idx)
self.intermediate = TapasIntermediate(config)
self.output = TapasOutput(config)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.Tensor]:
self_attention_outputs = self.attention(hidden_states, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, past_key_values=past_key_values, cache_position=cache_position)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:]
if self.is_decoder and encoder_hidden_states is not None:
if not hasattr(self, 'crossattention'):
raise ValueError(f'If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`')
cross_attention_outputs = self.crossattention(attention_output, attention_mask=encoder_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, past_key_values=past_key_values, output_attentions=output_attentions, cache_position=cache_position)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:]
layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output)
outputs = (layer_output,) + outputs
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
|
class TapasLayer(GradientCheckpointingLayer):
def __init__(self, config, layer_idx=None):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.Tensor]:
pass
def feed_forward_chunk(self, attention_output):
pass
| 4
| 0
| 27
| 2
| 23
| 2
| 4
| 0.13
| 1
| 7
| 3
| 0
| 3
| 8
| 3
| 13
| 86
| 9
| 70
| 32
| 57
| 9
| 41
| 23
| 37
| 7
| 1
| 2
| 11
|
5,595
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tapas/modeling_tapas.py
|
transformers.models.tapas.modeling_tapas.TapasModel
|
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, SequenceClassifierOutput
from ...utils import ModelOutput, auto_docstring, logging
import torch
from typing import Optional, Union
@auto_docstring
class TapasModel(TapasPreTrainedModel):
"""
This class is a small change compared to [`BertModel`], taking into account the additional token type ids.
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in [Attention is
all you need](https://huggingface.co/papers/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
"""
def __init__(self, config, add_pooling_layer=True):
"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
self.embeddings = TapasEmbeddings(config)
self.encoder = TapasEncoder(config)
self.pooler = TapasPooler(config) if add_pooling_layer else None
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]:
"""
token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length, 7)`, *optional*):
Token indices that encode tabular structure. Indices can be obtained using [`AutoTokenizer`]. See this
class for more info.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. If
`reset_position_index_per_cell` of [`TapasConfig`] is set to `True`, relative position embeddings will be
used. Selected in the range `[0, config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
Examples:
```python
>>> from transformers import AutoTokenizer, TapasModel
>>> import pandas as pd
>>> tokenizer = AutoTokenizer.from_pretrained("google/tapas-base")
>>> model = TapasModel.from_pretrained("google/tapas-base")
>>> data = {
... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"],
... "Age": ["56", "45", "59"],
... "Number of movies": ["87", "53", "69"],
... }
>>> table = pd.DataFrame.from_dict(data)
>>> queries = ["How many movies has George Clooney played in?", "How old is Brad Pitt?"]
>>> inputs = tokenizer(table=table, queries=queries, padding="max_length", return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds')
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros((*input_shape, len(self.config.type_vocab_sizes)), dtype=torch.long, device=device)
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds)
encoder_outputs = self.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)
|
@auto_docstring
class TapasModel(TapasPreTrainedModel):
'''
This class is a small change compared to [`BertModel`], taking into account the additional token type ids.
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in [Attention is
all you need](https://huggingface.co/papers/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
'''
def __init__(self, config, add_pooling_layer=True):
'''
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
'''
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, value):
pass
def _prune_heads(self, heads_to_prune):
'''
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
'''
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]:
'''
token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length, 7)`, *optional*):
Token indices that encode tabular structure. Indices can be obtained using [`AutoTokenizer`]. See this
class for more info.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. If
`reset_position_index_per_cell` of [`TapasConfig`] is set to `True`, relative position embeddings will be
used. Selected in the range `[0, config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
Examples:
```python
>>> from transformers import AutoTokenizer, TapasModel
>>> import pandas as pd
>>> tokenizer = AutoTokenizer.from_pretrained("google/tapas-base")
>>> model = TapasModel.from_pretrained("google/tapas-base")
>>> data = {
... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"],
... "Age": ["56", "45", "59"],
... "Number of movies": ["87", "53", "69"],
... }
>>> table = pd.DataFrame.from_dict(data)
>>> queries = ["How many movies has George Clooney played in?", "How old is Brad Pitt?"]
>>> inputs = tokenizer(table=table, queries=queries, padding="max_length", return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
```'''
pass
| 8
| 4
| 27
| 4
| 16
| 7
| 4
| 0.47
| 1
| 8
| 4
| 0
| 5
| 4
| 5
| 6
| 150
| 25
| 85
| 35
| 64
| 40
| 46
| 21
| 40
| 14
| 2
| 2
| 20
|
5,596
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tapas/modeling_tapas.py
|
transformers.models.tapas.modeling_tapas.TapasOnlyMLMHead
|
import torch
from torch import nn
class TapasOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = TapasLMPredictionHead(config)
def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
prediction_scores = self.predictions(sequence_output)
return prediction_scores
|
class TapasOnlyMLMHead(nn.Module):
def __init__(self, config):
pass
def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 3
| 0
| 3
| 0
| 1
| 0
| 1
| 3
| 1
| 0
| 2
| 1
| 2
| 12
| 8
| 1
| 7
| 5
| 4
| 0
| 7
| 5
| 4
| 1
| 1
| 0
| 2
|
5,597
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tapas/modeling_tapas.py
|
transformers.models.tapas.modeling_tapas.TapasOutput
|
import torch
from torch import nn
class TapasOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
|
class TapasOutput(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 3
| 2
| 12
| 12
| 1
| 11
| 6
| 8
| 0
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
5,598
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tapas/modeling_tapas.py
|
transformers.models.tapas.modeling_tapas.TapasPooler
|
import torch
from torch import nn
class TapasPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
|
class TapasPooler(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 5
| 1
| 1
| 0.2
| 1
| 2
| 0
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 10
| 7
| 7
| 2
| 10
| 7
| 7
| 1
| 1
| 0
| 2
|
5,599
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/tapas/modeling_tapas.py
|
transformers.models.tapas.modeling_tapas.TapasPreTrainedModel
|
from ...utils import ModelOutput, auto_docstring, logging
from ...modeling_utils import PreTrainedModel
from torch import nn
from .configuration_tapas import TapasConfig
@auto_docstring
class TapasPreTrainedModel(PreTrainedModel):
config: TapasConfig
base_model_prefix = 'tapas'
supports_gradient_checkpointing = True
_supports_param_buffer_assignment = False
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, TapasLMPredictionHead):
module.bias.data.zero_()
|
@auto_docstring
class TapasPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
'''Initialize the weights'''
pass
| 3
| 1
| 15
| 0
| 12
| 3
| 6
| 0.47
| 1
| 0
| 0
| 4
| 1
| 0
| 1
| 1
| 27
| 2
| 17
| 6
| 15
| 8
| 15
| 6
| 13
| 6
| 1
| 2
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.