id
int64
0
328k
repository_name
stringlengths
7
58
file_path
stringlengths
9
302
class_name
stringlengths
5
256
human_written_code
stringlengths
16
2.16M
class_skeleton
stringlengths
18
1.49M
total_program_units
int64
1
1.76k
total_doc_str
int64
0
771
AvgCountLine
float64
0
7.89k
AvgCountLineBlank
float64
0
297
AvgCountLineCode
float64
0
7.89k
AvgCountLineComment
float64
0
7.89k
AvgCyclomatic
float64
0
130
CommentToCodeRatio
float64
0
168
CountClassBase
float64
0
40
CountClassCoupled
float64
0
583
CountClassCoupledModified
float64
0
575
CountClassDerived
float64
0
5.35k
CountDeclInstanceMethod
float64
0
529
CountDeclInstanceVariable
float64
0
296
CountDeclMethod
float64
0
599
CountDeclMethodAll
float64
0
1.12k
CountLine
float64
1
40.4k
CountLineBlank
float64
0
8.16k
CountLineCode
float64
1
25.7k
CountLineCodeDecl
float64
1
8.15k
CountLineCodeExe
float64
0
24.2k
CountLineComment
float64
0
16.5k
CountStmt
float64
1
9.71k
CountStmtDecl
float64
1
8.15k
CountStmtExe
float64
0
9.69k
MaxCyclomatic
float64
0
759
MaxInheritanceTree
float64
0
16
MaxNesting
float64
0
34
SumCyclomatic
float64
0
2.9k
900
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/bit/modeling_bit.py
transformers.models.bit.modeling_bit.BitDropPath
from torch import Tensor, nn import torch from typing import Optional class BitDropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob: Optional[float]=None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return f'p={self.drop_prob}'
class BitDropPath(nn.Module): '''Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).''' def __init__(self, drop_prob: Optional[float]=None) -> None: pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass def extra_repr(self) -> str: pass
4
1
2
0
2
0
1
0.13
1
4
0
0
3
1
3
13
12
3
8
5
4
1
8
5
4
1
1
0
3
901
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/bit/modeling_bit.py
transformers.models.bit.modeling_bit.BitEmbeddings
from .configuration_bit import BitConfig from torch import Tensor, nn class BitEmbeddings(nn.Module): """ BiT Embeddings (stem) composed of a single aggressive convolution. """ def __init__(self, config: BitConfig): super().__init__() self.convolution = WeightStandardizedConv2d(config.num_channels, config.embedding_size, kernel_size=7, stride=2, eps=1e-08, padding=config.global_padding) self.pooler = BitMaxPool2d(kernel_size=3, stride=2, use_dynamic_padding=config.embedding_dynamic_padding) if config.global_padding is not None and config.global_padding.upper() == 'SAME': self.pad = nn.Identity() else: self.pad = nn.ConstantPad2d(padding=(1, 1, 1, 1), value=0.0) if config.layer_type != 'preactivation': self.norm = BitGroupNormActivation(config, num_channels=config.embedding_size) else: self.norm = nn.Identity() self.num_channels = config.num_channels def forward(self, pixel_values: Tensor) -> Tensor: num_channels = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError('Make sure that the channel dimension of the pixel values match with the one set in the configuration.') embedding = self.convolution(pixel_values) embedding = self.pad(embedding) embedding = self.norm(embedding) embedding = self.pooler(embedding) return embedding
class BitEmbeddings(nn.Module): ''' BiT Embeddings (stem) composed of a single aggressive convolution. ''' def __init__(self, config: BitConfig): pass def forward(self, pixel_values: Tensor) -> Tensor: pass
3
1
21
5
16
1
3
0.13
1
7
4
0
2
5
2
12
48
12
32
10
29
4
21
10
18
3
1
1
5
902
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/bit/modeling_bit.py
transformers.models.bit.modeling_bit.BitEncoder
from ...modeling_outputs import BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention import numpy as np from .configuration_bit import BitConfig from torch import Tensor, nn import torch class BitEncoder(nn.Module): def __init__(self, config: BitConfig): super().__init__() self.stages = nn.ModuleList([]) prev_chs = config.embedding_size current_stride = 4 dilation = 1 layer_dropouts = [x.tolist() for x in torch.Tensor(np.linspace(0, config.drop_path_rate, sum(config.depths))).split(config.depths)] for stage_idx, (current_depth, current_hidden_size, layer_dropout) in enumerate(zip(config.depths, config.hidden_sizes, layer_dropouts)): out_channels, stride, dilation = self._get_updated_hyperparameters(stage_idx, current_stride, current_hidden_size, dilation, config) stage = BitStage(config, prev_chs, out_channels, stride=stride, dilation=dilation, depth=current_depth, layer_dropout=layer_dropout) prev_chs = out_channels current_stride *= stride self.stages.add_module(str(stage_idx), stage) def _get_updated_hyperparameters(self, stage_idx, current_stride, current_hidden_size, dilation, config): out_channels = make_div(current_hidden_size * config.width_factor) stride = 1 if stage_idx == 0 else 2 if current_stride >= config.output_stride: dilation *= stride stride = 1 return (out_channels, stride, dilation) def forward(self, hidden_state: Tensor, output_hidden_states: bool=False, return_dict: bool=True) -> BaseModelOutputWithNoAttention: hidden_states = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: hidden_states = hidden_states + (hidden_state,) hidden_state = stage_module(hidden_state) if output_hidden_states: hidden_states = hidden_states + (hidden_state,) if not return_dict: return tuple((v for v in [hidden_state, hidden_states] if v is not None)) return BaseModelOutputWithNoAttention(last_hidden_state=hidden_state, hidden_states=hidden_states)
class BitEncoder(nn.Module): def __init__(self, config: BitConfig): pass def _get_updated_hyperparameters(self, stage_idx, current_stride, current_hidden_size, dilation, config): pass def forward(self, hidden_state: Tensor, output_hidden_states: bool=False, return_dict: bool=True) -> BaseModelOutputWithNoAttention: pass
4
0
22
4
17
1
4
0.04
1
10
3
0
3
1
3
13
68
14
52
18
46
2
32
16
28
6
1
2
11
903
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/bit/modeling_bit.py
transformers.models.bit.modeling_bit.BitForImageClassification
import torch from torch import Tensor, nn from ...utils import auto_docstring, logging from typing import Optional from ...modeling_outputs import BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention @auto_docstring(custom_intro='\n BiT Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ') class BitForImageClassification(BitPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.bit = BitModel(config) self.classifier = nn.Sequential(nn.Flatten(), nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity()) self.post_init() @auto_docstring def forward(self, pixel_values: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> ImageClassifierOutputWithNoAttention: """ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bit(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs.pooler_output if return_dict else outputs[1] logits = self.classifier(pooled_output) loss = None if labels is not None: loss = self.loss_function(labels, logits, self.config) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
@auto_docstring(custom_intro='\n BiT Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ') class BitForImageClassification(BitPreTrainedModel): def __init__(self, config): pass @auto_docstring def forward(self, pixel_values: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> ImageClassifierOutputWithNoAttention: ''' labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). ''' pass
5
1
30
4
23
4
8
0.13
1
5
2
0
2
3
2
3
68
8
53
19
37
7
32
12
29
13
2
3
15
904
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/bit/modeling_bit.py
transformers.models.bit.modeling_bit.BitGroupNormActivation
from torch import Tensor, nn from ...activations import ACT2FN class BitGroupNormActivation(nn.GroupNorm): """ A module that combines group normalization with an activation function. """ def __init__(self, config, num_channels, eps=1e-05, affine=True, apply_activation=True): super().__init__(config.num_groups, num_channels, eps=eps, affine=affine) if apply_activation: self.activation = ACT2FN[config.hidden_act] else: self.activation = nn.Identity() def forward(self, hidden_state): hidden_state = nn.functional.group_norm(hidden_state, self.num_groups, self.weight, self.bias, self.eps) hidden_state = self.activation(hidden_state) return hidden_state
class BitGroupNormActivation(nn.GroupNorm): ''' A module that combines group normalization with an activation function. ''' def __init__(self, config, num_channels, eps=1e-05, affine=True, apply_activation=True): pass def forward(self, hidden_state): pass
3
1
5
0
5
0
2
0.27
1
1
0
0
2
1
2
2
16
2
11
4
8
3
10
4
7
2
1
1
3
905
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/bit/modeling_bit.py
transformers.models.bit.modeling_bit.BitMaxPool2d
import collections from torch import Tensor, nn class BitMaxPool2d(nn.MaxPool2d): def __init__(self, kernel_size: int, stride=None, dilation=1, ceil_mode=False, padding=(0, 0), padding_value=0, use_dynamic_padding=True): kernel_size = kernel_size if isinstance(kernel_size, collections.abc.Iterable) else (kernel_size, kernel_size) stride = stride if isinstance(stride, collections.abc.Iterable) else (stride, stride) dilation = dilation if isinstance(dilation, collections.abc.Iterable) else (dilation, dilation) super().__init__(kernel_size, stride, padding, dilation, ceil_mode) if use_dynamic_padding: self.pad = DynamicPad2d(kernel_size, stride, dilation, padding_value) else: self.pad = nn.Identity() def forward(self, hidden_states): hidden_states = self.pad(hidden_states) return nn.functional.max_pool2d(hidden_states, self.kernel_size, self.stride, self.padding, self.dilation, self.ceil_mode)
class BitMaxPool2d(nn.MaxPool2d): def __init__(self, kernel_size: int, stride=None, dilation=1, ceil_mode=False, padding=(0, 0), padding_value=0, use_dynamic_padding=True): pass def forward(self, hidden_states): pass
3
0
12
0
12
0
3
0.04
1
3
1
0
2
1
2
2
27
2
24
13
12
1
12
4
9
5
1
1
6
906
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/bit/modeling_bit.py
transformers.models.bit.modeling_bit.BitModel
from typing import Optional from ...modeling_outputs import BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...utils import auto_docstring, logging from torch import Tensor, nn @auto_docstring class BitModel(BitPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.embedder = BitEmbeddings(config) self.encoder = BitEncoder(config) self.norm = BitGroupNormActivation(config, num_channels=config.hidden_sizes[-1]) if config.layer_type == 'preactivation' else nn.Identity() self.pooler = nn.AdaptiveAvgPool2d((1, 1)) self.post_init() @auto_docstring def forward(self, pixel_values: Tensor, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> BaseModelOutputWithPoolingAndNoAttention: output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict embedding_output = self.embedder(pixel_values) encoder_outputs = self.encoder(embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict) last_hidden_state = encoder_outputs[0] last_hidden_state = self.norm(last_hidden_state) pooled_output = self.pooler(last_hidden_state) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states)
@auto_docstring class BitModel(BitPreTrainedModel): def __init__(self, config): pass @auto_docstring def forward(self, pixel_values: Tensor, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> BaseModelOutputWithPoolingAndNoAttention: pass
5
0
22
5
17
1
3
0.02
1
7
4
0
2
5
2
3
54
11
42
15
29
1
20
12
17
4
2
1
6
907
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/bit/modeling_bit.py
transformers.models.bit.modeling_bit.BitPreActivationBottleneckLayer
from torch import Tensor, nn class BitPreActivationBottleneckLayer(nn.Module): """Pre-activation (v2) bottleneck block. Follows the implementation of "Identity Mappings in Deep Residual Networks": https://github.com/KaimingHe/resnet-1k-layers/blob/master/resnet-pre-act.lua Except it puts the stride on 3x3 conv when available. """ def __init__(self, config, in_channels, out_channels=None, bottle_ratio=0.25, stride=1, dilation=1, first_dilation=None, groups=1, drop_path_rate=0.0, is_first_layer=False): super().__init__() first_dilation = first_dilation or dilation out_channels = out_channels or in_channels mid_channels = make_div(out_channels * bottle_ratio) if is_first_layer: self.downsample = BitDownsampleConv(config, in_channels, out_channels, stride=stride, preact=True) else: self.downsample = None self.norm1 = BitGroupNormActivation(config, in_channels) self.conv1 = WeightStandardizedConv2d(in_channels, mid_channels, 1, eps=1e-08, padding=config.global_padding) self.norm2 = BitGroupNormActivation(config, num_channels=mid_channels) self.conv2 = WeightStandardizedConv2d(mid_channels, mid_channels, 3, stride=stride, groups=groups, eps=1e-08, padding=config.global_padding) self.norm3 = BitGroupNormActivation(config, mid_channels) self.conv3 = WeightStandardizedConv2d(mid_channels, out_channels, 1, eps=1e-08, padding=config.global_padding) self.drop_path = BitDropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() def forward(self, hidden_states): hidden_states_preact = self.norm1(hidden_states) shortcut = hidden_states if self.downsample is not None: shortcut = self.downsample(hidden_states_preact) hidden_states = self.conv1(hidden_states_preact) hidden_states = self.conv2(self.norm2(hidden_states)) hidden_states = self.conv3(self.norm3(hidden_states)) hidden_states = self.drop_path(hidden_states) return hidden_states + shortcut
class BitPreActivationBottleneckLayer(nn.Module): '''Pre-activation (v2) bottleneck block. Follows the implementation of "Identity Mappings in Deep Residual Networks": https://github.com/KaimingHe/resnet-1k-layers/blob/master/resnet-pre-act.lua Except it puts the stride on 3x3 conv when available. ''' def __init__(self, config, in_channels, out_channels=None, bottle_ratio=0.25, stride=1, dilation=1, first_dilation=None, groups=1, drop_path_rate=0.0, is_first_layer=False): pass def forward(self, hidden_states): pass
3
1
29
5
23
1
3
0.15
1
5
4
0
2
8
2
12
66
12
47
26
32
7
26
14
23
3
1
1
5
908
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/bit/modeling_bit.py
transformers.models.bit.modeling_bit.BitPreTrainedModel
from torch import Tensor, nn from ...modeling_utils import PreTrainedModel from .configuration_bit import BitConfig from ...utils import auto_docstring, logging import math @auto_docstring class BitPreTrainedModel(PreTrainedModel): config: BitConfig base_model_prefix = 'bit' main_input_name = 'pixel_values' _no_split_modules = ['BitEmbeddings'] def _init_weights(self, module): if isinstance(module, nn.Conv2d): nn.init.kaiming_normal_(module.weight, mode='fan_out', nonlinearity='relu') elif isinstance(module, nn.Linear): nn.init.kaiming_uniform_(module.weight, a=math.sqrt(5)) if module.bias is not None: fan_in, _ = nn.init._calculate_fan_in_and_fan_out(module.weight) bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0 nn.init.uniform_(module.bias, -bound, bound) elif isinstance(module, (nn.BatchNorm2d, nn.GroupNorm)): nn.init.constant_(module.weight, 1) nn.init.constant_(module.bias, 0)
@auto_docstring class BitPreTrainedModel(PreTrainedModel): def _init_weights(self, module): pass
3
0
13
0
12
1
6
0.29
1
0
0
3
1
0
1
1
24
2
17
8
15
5
15
8
13
6
1
2
6
909
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/bit/modeling_bit.py
transformers.models.bit.modeling_bit.BitStage
from torch import Tensor, nn class BitStage(nn.Module): """ A ResNet v2 stage composed by stacked layers. """ def __init__(self, config, in_channels, out_channels, stride, dilation, depth, bottle_ratio=0.25, layer_dropout=None): super().__init__() first_dilation = 1 if dilation in (1, 2) else 2 if config.layer_type == 'bottleneck': layer_cls = BitBottleneckLayer else: layer_cls = BitPreActivationBottleneckLayer prev_chs = in_channels self.layers = nn.Sequential() for layer_idx in range(depth): stride, drop_path_rate, is_first_layer = self._get_updated_hyperparameters(layer_idx, stride, layer_dropout) self.layers.add_module(str(layer_idx), layer_cls(config, prev_chs, out_channels, stride=stride, dilation=dilation, bottle_ratio=bottle_ratio, first_dilation=first_dilation, drop_path_rate=drop_path_rate, is_first_layer=is_first_layer)) prev_chs = out_channels first_dilation = dilation def _get_updated_hyperparameters(self, layer_idx, stride, layer_dropout): """ Get the new hyper-parameters with respect to the previous ones and the index of the current layer. """ if layer_dropout: drop_path_rate = layer_dropout[layer_idx] else: drop_path_rate = 0.0 if layer_idx != 0: stride = 1 is_first_layer = layer_idx == 0 return (stride, drop_path_rate, is_first_layer) def forward(self, input: Tensor) -> Tensor: hidden_state = input for _, layer in enumerate(self.layers): hidden_state = layer(hidden_state) return hidden_state
class BitStage(nn.Module): ''' A ResNet v2 stage composed by stacked layers. ''' def __init__(self, config, in_channels, out_channels, stride, dilation, depth, bottle_ratio=0.25, layer_dropout=None): pass def _get_updated_hyperparameters(self, layer_idx, stride, layer_dropout): ''' Get the new hyper-parameters with respect to the previous ones and the index of the current layer. ''' pass def forward(self, input: Tensor) -> Tensor: pass
4
2
22
2
18
2
3
0.15
1
7
2
0
3
1
3
13
72
10
54
24
40
8
27
14
23
4
1
1
9
910
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/bit/modeling_bit.py
transformers.models.bit.modeling_bit.DynamicPad2d
from torch import Tensor, nn import math class DynamicPad2d(nn.Module): """ A module that wraps dynamic padding of any input, given the parameters of the convolutional layer and the input hidden states. """ def __init__(self, kernel_size, stride, dilation, value=0): super().__init__() if isinstance(kernel_size, int): kernel_size = (kernel_size, kernel_size) if isinstance(stride, int): stride = (stride, stride) if isinstance(dilation, int): dilation = (dilation, dilation) self.kernel_size = kernel_size self.stride = stride self.dilation = dilation self.value = value def compute_padding(x, kernel_size, stride, dilation): return max((math.ceil(x / stride) - 1) * stride + (kernel_size - 1) * dilation + 1 - x, 0) self.compute_padding = compute_padding def forward(self, input): input_height, input_width = input.size()[-2:] padding_height = self.compute_padding(input_height, self.kernel_size[0], self.stride[0], self.dilation[0]) padding_width = self.compute_padding(input_width, self.kernel_size[1], self.stride[1], self.dilation[1]) if padding_height > 0 or padding_width > 0: input = nn.functional.pad(input, [padding_width // 2, padding_width - padding_width // 2, padding_height // 2, padding_height - padding_height // 2], value=self.value) return input
class DynamicPad2d(nn.Module): ''' A module that wraps dynamic padding of any input, given the parameters of the convolutional layer and the input hidden states. ''' def __init__(self, kernel_size, stride, dilation, value=0): pass def compute_padding(x, kernel_size, stride, dilation): pass def forward(self, input): pass
4
1
15
2
11
1
2
0.25
1
2
0
0
2
5
2
12
49
9
32
12
28
8
23
12
19
4
1
1
7
911
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/bit/modeling_bit.py
transformers.models.bit.modeling_bit.WeightStandardizedConv2d
from torch import Tensor, nn class WeightStandardizedConv2d(nn.Conv2d): """Conv2d with Weight Standardization. Used for ViT Hybrid model. Paper: [Micro-Batch Training with Batch-Channel Normalization and Weight Standardization](https://huggingface.co/papers/1903.10520v2) """ def __init__(self, in_channel, out_channels, kernel_size, stride=1, padding='SAME', dilation=1, groups=1, bias=False, eps=1e-06): padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, dilation=dilation) super().__init__(in_channel, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) if is_dynamic: self.pad = DynamicPad2d(kernel_size, stride, dilation) else: self.pad = None self.eps = eps def forward(self, hidden_state): if self.pad is not None: hidden_state = self.pad(hidden_state) weight = nn.functional.batch_norm(self.weight.reshape(1, self.out_channels, -1), None, None, training=True, momentum=0.0, eps=self.eps).reshape_as(self.weight) hidden_state = nn.functional.conv2d(hidden_state, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) return hidden_state
class WeightStandardizedConv2d(nn.Conv2d): '''Conv2d with Weight Standardization. Used for ViT Hybrid model. Paper: [Micro-Batch Training with Batch-Channel Normalization and Weight Standardization](https://huggingface.co/papers/1903.10520v2) ''' def __init__(self, in_channel, out_channels, kernel_size, stride=1, padding='SAME', dilation=1, groups=1, bias=False, eps=1e-06): pass def forward(self, hidden_state): pass
3
1
19
0
19
0
2
0.1
1
2
1
0
2
3
2
2
46
3
39
19
25
4
14
7
11
2
1
1
4
912
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blenderbot/configuration_blenderbot.py
transformers.models.blenderbot.configuration_blenderbot.BlenderbotConfig
from ...configuration_utils import PretrainedConfig class BlenderbotConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`BlenderbotModel`]. It is used to instantiate an Blenderbot model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Blenderbot [facebook/blenderbot-3B](https://huggingface.co/facebook/blenderbot-3B) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50265): Vocabulary size of the Blenderbot model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`BlenderbotModel`] or [`TFBlenderbotModel`]. d_model (`int`, *optional*, defaults to 1024): Dimensionality of the layers and the pooler layer. encoder_layers (`int`, *optional*, defaults to 12): Number of encoder layers. decoder_layers (`int`, *optional*, defaults to 12): Number of decoder layers. encoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. encoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. max_position_embeddings (`int`, *optional*, defaults to 128): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. scale_embedding (`bool`, *optional*, defaults to `False`): Scale embeddings by diving by sqrt(d_model). use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models) forced_eos_token_id (`int`, *optional*, defaults to 2): The id of the token to force as the last generated token when `max_length` is reached. Usually set to `eos_token_id`. Example: ```python >>> from transformers import BlenderbotConfig, BlenderbotModel >>> # Initializing a Blenderbot facebook/blenderbot-3B style configuration >>> configuration = BlenderbotConfig() >>> # Initializing a model (with random weights) from the facebook/blenderbot-3B style configuration >>> model = BlenderbotModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'blenderbot' keys_to_ignore_at_inference = ['past_key_values'] attribute_map = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__(self, vocab_size=8008, max_position_embeddings=128, encoder_layers=2, encoder_ffn_dim=10240, encoder_attention_heads=32, decoder_layers=24, decoder_ffn_dim=10240, decoder_attention_heads=32, encoder_layerdrop=0.0, decoder_layerdrop=0.0, use_cache=True, is_encoder_decoder=True, activation_function='gelu', d_model=2560, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, decoder_start_token_id=1, scale_embedding=False, pad_token_id=0, bos_token_id=1, eos_token_id=2, encoder_no_repeat_ngram_size=3, forced_eos_token_id=2, **kwargs): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.use_cache = use_cache self.num_hidden_layers = encoder_layers self.scale_embedding = scale_embedding super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, decoder_start_token_id=decoder_start_token_id, encoder_no_repeat_ngram_size=encoder_no_repeat_ngram_size, forced_eos_token_id=forced_eos_token_id, **kwargs)
class BlenderbotConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`BlenderbotModel`]. It is used to instantiate an Blenderbot model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Blenderbot [facebook/blenderbot-3B](https://huggingface.co/facebook/blenderbot-3B) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50265): Vocabulary size of the Blenderbot model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`BlenderbotModel`] or [`TFBlenderbotModel`]. d_model (`int`, *optional*, defaults to 1024): Dimensionality of the layers and the pooler layer. encoder_layers (`int`, *optional*, defaults to 12): Number of encoder layers. decoder_layers (`int`, *optional*, defaults to 12): Number of decoder layers. encoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. encoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. max_position_embeddings (`int`, *optional*, defaults to 128): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. scale_embedding (`bool`, *optional*, defaults to `False`): Scale embeddings by diving by sqrt(d_model). use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models) forced_eos_token_id (`int`, *optional*, defaults to 2): The id of the token to force as the last generated token when `max_length` is reached. Usually set to `eos_token_id`. Example: ```python >>> from transformers import BlenderbotConfig, BlenderbotModel >>> # Initializing a Blenderbot facebook/blenderbot-3B style configuration >>> configuration = BlenderbotConfig() >>> # Initializing a model (with random weights) from the facebook/blenderbot-3B style configuration >>> model = BlenderbotModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, vocab_size=8008, max_position_embeddings=128, encoder_layers=2, encoder_ffn_dim=10240, encoder_attention_heads=32, decoder_layers=24, decoder_ffn_dim=10240, decoder_attention_heads=32, encoder_layerdrop=0.0, decoder_layerdrop=0.0, use_cache=True, is_encoder_decoder=True, activation_function='gelu', d_model=2560, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, decoder_start_token_id=1, scale_embedding=False, pad_token_id=0, bos_token_id=1, eos_token_id=2, encoder_no_repeat_ngram_size=3, forced_eos_token_id=2, **kwargs): pass
2
1
59
1
58
1
1
1.02
1
1
0
0
1
19
1
1
135
11
62
52
32
63
25
24
23
1
1
0
1
913
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blenderbot/configuration_blenderbot.py
transformers.models.blenderbot.configuration_blenderbot.BlenderbotOnnxConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from collections.abc import Mapping from ... import PreTrainedTokenizer from collections import OrderedDict from typing import Any from ...file_utils import is_torch_available class BlenderbotOnnxConfig(OnnxSeq2SeqConfigWithPast): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: if self.task in ['default', 'seq2seq-lm']: common_inputs = OrderedDict([('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'})]) if self.use_past: common_inputs['decoder_input_ids'] = {0: 'batch'} common_inputs['decoder_attention_mask'] = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: common_inputs['decoder_input_ids'] = {0: 'batch', 1: 'decoder_sequence'} common_inputs['decoder_attention_mask'] = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(common_inputs, direction='inputs') elif self.task == 'causal-lm': common_inputs = OrderedDict([('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'})]) if self.use_past: _, num_decoder_layers = self.num_layers for i in range(num_decoder_layers): common_inputs[f'past_key_values.{i}.key'] = {0: 'batch', 2: 'past_sequence + sequence'} common_inputs[f'past_key_values.{i}.value'] = {0: 'batch', 2: 'past_sequence + sequence'} else: common_inputs = OrderedDict([('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}), ('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'})]) return common_inputs @property def outputs(self) -> Mapping[str, Mapping[int, str]]: if self.task in ['default', 'seq2seq-lm']: common_outputs = super().outputs else: common_outputs = super(OnnxConfigWithPast, self).outputs if self.use_past: num_encoder_layers, _ = self.num_layers for i in range(num_encoder_layers): common_outputs[f'present.{i}.key'] = {0: 'batch', 2: 'past_sequence + sequence'} common_outputs[f'present.{i}.value'] = {0: 'batch', 2: 'past_sequence + sequence'} return common_outputs def _generate_dummy_inputs_for_default_and_seq2seq_lm(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False) -> Mapping[str, Any]: encoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(tokenizer, batch_size, seq_length, is_pair) decoder_seq_length = seq_length if not self.use_past else 1 decoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(tokenizer, batch_size, decoder_seq_length, is_pair) decoder_inputs = {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()} common_inputs = dict(**encoder_inputs, **decoder_inputs) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.') else: import torch batch, encoder_seq_length = common_inputs['input_ids'].shape decoder_seq_length = common_inputs['decoder_input_ids'].shape[1] num_encoder_attention_heads, num_decoder_attention_heads = self.num_attention_heads encoder_shape = (batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads) decoder_past_length = decoder_seq_length decoder_shape = (batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads) common_inputs['decoder_attention_mask'] = torch.cat([common_inputs['decoder_attention_mask'], torch.ones(batch, decoder_past_length)], dim=1) common_inputs['past_key_values'] = [] _, num_decoder_layers = self.num_layers for _ in range(num_decoder_layers): common_inputs['past_key_values'].append((torch.zeros(decoder_shape), torch.zeros(decoder_shape), torch.zeros(encoder_shape), torch.zeros(encoder_shape))) return common_inputs def _generate_dummy_inputs_for_causal_lm(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False) -> Mapping[str, Any]: common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(tokenizer, batch_size, seq_length, is_pair) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.') else: import torch batch, seqlen = common_inputs['input_ids'].shape past_key_values_length = seqlen _, num_decoder_layers = self.num_layers num_encoder_attention_heads, _ = self.num_attention_heads past_shape = (batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads) mask_dtype = common_inputs['attention_mask'].dtype common_inputs['attention_mask'] = torch.cat([common_inputs['attention_mask'], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1) common_inputs['past_key_values'] = [(torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(num_decoder_layers)] return common_inputs def _generate_dummy_inputs_for_sequence_classification_and_question_answering(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False) -> Mapping[str, Any]: batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0) token_to_add = tokenizer.num_special_tokens_to_add(is_pair) seq_length = compute_effective_axis_dimension(seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add) dummy_input = [' '.join([tokenizer.unk_token]) * seq_length] * batch_size common_inputs = dict(tokenizer(dummy_input, return_tensors='pt')) return common_inputs def generate_dummy_inputs(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False) -> Mapping[str, Any]: if self.task in ['default', 'seq2seq-lm']: common_inputs = self._generate_dummy_inputs_for_default_and_seq2seq_lm(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair) elif self.task == 'causal-lm': common_inputs = self._generate_dummy_inputs_for_causal_lm(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair) else: common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair) return common_inputs def _flatten_past_key_values_(self, flattened_output, name, idx, t): if self.task in ['default', 'seq2seq-lm']: flattened_output = super()._flatten_past_key_values_(flattened_output, name, idx, t) else: flattened_output = super(OnnxSeq2SeqConfigWithPast, self)._flatten_past_key_values_(flattened_output, name, idx, t) def fill_with_past_key_values_(self, inputs_or_outputs: Mapping[str, Mapping[int, str]], direction: str): if direction not in ['inputs', 'outputs']: raise ValueError(f'direction must either be "inputs" or "outputs", but {direction} was given') name = 'past_key_values' if direction == 'inputs' else 'present' _, num_decoder_layers = self.num_layers encoder_sequence = 'past_encoder_sequence' decoder_sequence = 'past_decoder_sequence' if direction == 'inputs' else 'past_decoder_sequence + sequence' for i in range(num_decoder_layers): inputs_or_outputs[f'{name}.{i}.decoder.key'] = {0: 'batch', 2: decoder_sequence} inputs_or_outputs[f'{name}.{i}.decoder.value'] = {0: 'batch', 2: decoder_sequence} inputs_or_outputs[f'{name}.{i}.encoder.key'] = {0: 'batch', 2: encoder_sequence} inputs_or_outputs[f'{name}.{i}.encoder.value'] = {0: 'batch', 2: encoder_sequence}
class BlenderbotOnnxConfig(OnnxSeq2SeqConfigWithPast): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: pass @property def outputs(self) -> Mapping[str, Mapping[int, str]]: pass def _generate_dummy_inputs_for_default_and_seq2seq_lm(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False) -> Mapping[str, Any]: pass def _generate_dummy_inputs_for_causal_lm(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False) -> Mapping[str, Any]: pass def _generate_dummy_inputs_for_sequence_classification_and_question_answering(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False) -> Mapping[str, Any]: pass def generate_dummy_inputs(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False) -> Mapping[str, Any]: pass def _flatten_past_key_values_(self, flattened_output, name, idx, t): pass def fill_with_past_key_values_(self, inputs_or_outputs: Mapping[str, Mapping[int, str]], direction: str): pass
11
0
26
1
24
1
4
0.05
1
9
0
0
8
1
8
8
225
18
197
74
156
10
95
43
84
7
1
3
30
914
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blenderbot/modeling_blenderbot.py
transformers.models.blenderbot.modeling_blenderbot.BlenderbotAttention
from torch import nn from ...utils.deprecation import deprecate_kwarg from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from .configuration_blenderbot import BlenderbotConfig from typing import Callable, Optional, Union from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache import torch class BlenderbotAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, is_causal: bool=False, config: Optional[BlenderbotConfig]=None, layer_idx: Optional[int]=None): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if self.head_dim * num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).') self.scaling = self.head_dim ** (-0.5) self.is_decoder = is_decoder self.is_causal = is_causal self.layer_idx = layer_idx if layer_idx is None and self.is_decoder: logger.warning_once(f'Instantiating a decoder {self.__class__.__name__} without passing `layer_idx` is not recommended and will lead to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` when creating this class.') self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" is_cross_attention = key_value_states is not None bsz, tgt_len = hidden_states.shape[:-1] src_len = key_value_states.shape[1] if is_cross_attention else tgt_len q_input_shape = (bsz, tgt_len, -1, self.head_dim) kv_input_shape = (bsz, src_len, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(*q_input_shape).transpose(1, 2) is_updated = False if past_key_values is not None: if isinstance(past_key_values, EncoderDecoderCache): is_updated = past_key_values.is_updated.get(self.layer_idx) if is_cross_attention: curr_past_key_value = past_key_values.cross_attention_cache else: curr_past_key_value = past_key_values.self_attention_cache else: curr_past_key_value = past_key_values current_states = key_value_states if is_cross_attention else hidden_states if is_cross_attention and past_key_values is not None and is_updated: key_states = curr_past_key_value.layers[self.layer_idx].keys value_states = curr_past_key_value.layers[self.layer_idx].values else: key_states = self.k_proj(current_states) value_states = self.v_proj(current_states) key_states = key_states.view(*kv_input_shape).transpose(1, 2) value_states = value_states.view(*kv_input_shape).transpose(1, 2) if past_key_values is not None: cache_position = cache_position if not is_cross_attention else None key_states, value_states = curr_past_key_value.update(key_states, value_states, self.layer_idx, {'cache_position': cache_position}) if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache): past_key_values.is_updated[self.layer_idx] = True attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != 'eager': attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.dropout, scaling=self.scaling, output_attentions=output_attentions, head_mask=layer_head_mask, **kwargs) attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous() attn_output = self.out_proj(attn_output) return (attn_output, attn_weights)
class BlenderbotAttention(nn.Module): '''Multi-headed attention from 'Attention Is All You Need' paper''' def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, is_causal: bool=False, config: Optional[BlenderbotConfig]=None, layer_idx: Optional[int]=None): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: '''Input shape: Batch x Time x Channel''' pass
4
2
50
7
35
8
5
0.24
1
7
1
0
3
12
3
13
156
23
107
44
86
26
68
27
64
12
1
2
15
915
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blenderbot/modeling_blenderbot.py
transformers.models.blenderbot.modeling_blenderbot.BlenderbotDecoder
from typing import Callable, Optional, Union from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput import torch from torch import nn from ...utils import auto_docstring, is_torch_flex_attn_available, is_torchdynamo_compiling, logging import math from .configuration_blenderbot import BlenderbotConfig from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache class BlenderbotDecoder(BlenderbotPreTrainedModel): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`BlenderbotDecoderLayer`] Args: config: BlenderbotConfig embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: BlenderbotConfig, embed_tokens: Optional[nn.Embedding]=None): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop self.padding_idx = config.pad_token_id self.max_target_positions = config.max_position_embeddings embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 if embed_tokens is not None: self.embed_tokens = embed_tokens else: self.embed_tokens = BlenderbotScaledWordEmbedding(config.vocab_size, config.d_model, self.padding_idx, embed_scale=embed_scale) self.embed_positions = BlenderbotLearnedPositionalEmbedding(config.max_position_embeddings, config.d_model) self.layers = nn.ModuleList([BlenderbotDecoderLayer(config, layer_idx=i) for i in range(config.decoder_layers)]) self.layer_norm = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False self.post_init() def forward(self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, cache_position: Optional[torch.Tensor]=None): """ Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing cross-attention on hidden heads. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. It is used to update the cache in the correct position and to infer the complete sequence length. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError('You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time') elif input_ids is not None: input = input_ids input_shape = input.shape input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] input = inputs_embeds[:, :, -1] else: raise ValueError('You have to specify either decoder_input_ids or decoder_inputs_embeds') if inputs_embeds is None: inputs_embeds = self.embed_tokens(input) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing`. Setting `use_cache=False`...') use_cache = False if use_cache and past_key_values is None: past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config)) if encoder_hidden_states is not None else DynamicCache(config=self.config) if use_cache and isinstance(past_key_values, tuple): logger.warning_once('Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. You should pass an instance of `EncoderDecoderCache` instead, e.g. `past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`.') past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values) batch_size, seq_length = inputs_embeds.size()[:-1] past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 if cache_position is None: cache_position = torch.arange(past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device) if attention_mask is None and (not is_torchdynamo_compiling()): mask_seq_length = past_key_values_length + seq_length attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device) self_attn_cache = past_key_values.self_attention_cache if isinstance(past_key_values, EncoderDecoderCache) else past_key_values causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, self_attn_cache) encoder_attention_mask = self._update_cross_attn_mask(encoder_hidden_states, encoder_attention_mask, input_shape, inputs_embeds) position_ids = self.embed_positions((batch_size, seq_length), past_key_values_length, position_ids=cache_position) hidden_states = inputs_embeds + position_ids hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if output_attentions and encoder_hidden_states is not None else None for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ['head_mask', 'cross_attn_head_mask']): if attn_mask is not None: if attn_mask.size()[0] != len(self.layers): raise ValueError(f'The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.') for idx, decoder_layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue layer_outputs = decoder_layer(hidden_states, causal_mask, encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: all_hidden_states += (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions)
class BlenderbotDecoder(BlenderbotPreTrainedModel): ''' Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`BlenderbotDecoderLayer`] Args: config: BlenderbotConfig embed_tokens (nn.Embedding): output embedding ''' def __init__(self, config: BlenderbotConfig, embed_tokens: Optional[nn.Embedding]=None): pass def forward(self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, cache_position: Optional[torch.Tensor]=None): ''' Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing cross-attention on hidden heads. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. It is used to update the cache in the correct position and to infer the complete sequence length. ''' pass
3
2
61
8
36
16
11
0.49
1
11
5
0
4
9
4
6
254
38
145
43
126
71
77
29
72
37
2
3
42
916
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blenderbot/modeling_blenderbot.py
transformers.models.blenderbot.modeling_blenderbot.BlenderbotDecoderLayer
from ...modeling_layers import GradientCheckpointingLayer from torch import nn from typing import Callable, Optional, Union from .configuration_blenderbot import BlenderbotConfig from ...activations import ACT2FN from ...utils.deprecation import deprecate_kwarg import torch from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache class BlenderbotDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: BlenderbotConfig, layer_idx: Optional[int]=None): super().__init__() self.embed_dim = config.d_model self.self_attn = BlenderbotAttention(embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, is_causal=True, config=config, layer_idx=layer_idx) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.encoder_attn = BlenderbotAttention(self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, config=config, layer_idx=layer_idx) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, cross_attn_layer_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True, cache_position: Optional[torch.Tensor]=None) -> torch.Tensor: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of size `(decoder_attention_heads,)`. past_key_values (`Cache`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. It is used to update the cache in the correct position and to infer the complete sequence length. """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) hidden_states, self_attn_weights = self.self_attn(hidden_states=hidden_states, past_key_values=past_key_values, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, cache_position=cache_position) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) hidden_states, cross_attn_weights = self.encoder_attn(hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_values=past_key_values, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) return outputs
class BlenderbotDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: BlenderbotConfig, layer_idx: Optional[int]=None): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, cross_attn_layer_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True, cache_position: Optional[torch.Tensor]=None) -> torch.Tensor: ''' Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of size `(decoder_attention_heads,)`. past_key_values (`Cache`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. It is used to update the cache in the correct position and to infer the complete sequence length. ''' pass
4
1
58
6
40
13
4
0.31
1
4
1
0
2
11
2
12
118
12
81
32
67
25
44
21
41
6
1
1
7
917
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blenderbot/modeling_blenderbot.py
transformers.models.blenderbot.modeling_blenderbot.BlenderbotDecoderWrapper
class BlenderbotDecoderWrapper(BlenderbotPreTrainedModel): """ This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is used in combination with the [`EncoderDecoderModel`] framework. """ def __init__(self, config): super().__init__(config) self.decoder = BlenderbotDecoder(config) def forward(self, *args, **kwargs): return self.decoder(*args, **kwargs)
class BlenderbotDecoderWrapper(BlenderbotPreTrainedModel): ''' This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is used in combination with the [`EncoderDecoderModel`] framework. ''' def __init__(self, config): pass def forward(self, *args, **kwargs): pass
3
1
3
0
3
0
1
0.67
1
2
1
0
2
1
2
4
12
2
6
4
3
4
6
4
3
1
2
0
2
918
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blenderbot/modeling_blenderbot.py
transformers.models.blenderbot.modeling_blenderbot.BlenderbotEncoder
import math import torch from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput from torch import nn from .configuration_blenderbot import BlenderbotConfig from typing import Callable, Optional, Union class BlenderbotEncoder(BlenderbotPreTrainedModel): """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`BlenderbotEncoderLayer`]. Args: config: BlenderbotConfig embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: BlenderbotConfig, embed_tokens: Optional[nn.Embedding]=None): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop embed_dim = config.d_model self.padding_idx = config.pad_token_id self.max_source_positions = config.max_position_embeddings embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 if embed_tokens is not None: self.embed_tokens = embed_tokens else: self.embed_tokens = BlenderbotScaledWordEmbedding(config.vocab_size, embed_dim, self.padding_idx, embed_scale=embed_scale) self.embed_positions = BlenderbotLearnedPositionalEmbedding(config.max_position_embeddings, embed_dim) self.layers = nn.ModuleList([BlenderbotEncoderLayer(config) for _ in range(config.encoder_layers)]) self.layer_norm = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False self.post_init() def forward(self, input_ids=None, attention_mask=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None): """ Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) embed_pos = self.embed_positions(input_shape) hidden_states = inputs_embeds + embed_pos hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) attention_mask = self._update_full_mask(attention_mask, inputs_embeds) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None if head_mask is not None: if head_mask.size()[0] != len(self.layers): raise ValueError(f'The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.') for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) to_drop = False if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: to_drop = True if to_drop: layer_outputs = (None, None) else: layer_outputs = encoder_layer(hidden_states, attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
class BlenderbotEncoder(BlenderbotPreTrainedModel): ''' Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`BlenderbotEncoderLayer`]. Args: config: BlenderbotConfig embed_tokens (nn.Embedding): output embedding ''' def __init__(self, config: BlenderbotConfig, embed_tokens: Optional[nn.Embedding]=None): pass def forward(self, input_ids=None, attention_mask=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None): ''' Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' pass
3
2
80
12
50
19
14
0.45
1
10
5
0
2
9
2
4
171
27
100
32
88
45
62
23
59
24
2
3
27
919
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blenderbot/modeling_blenderbot.py
transformers.models.blenderbot.modeling_blenderbot.BlenderbotEncoderLayer
from .configuration_blenderbot import BlenderbotConfig from torch import nn from ...modeling_layers import GradientCheckpointingLayer from ...activations import ACT2FN import torch class BlenderbotEncoderLayer(GradientCheckpointingLayer): def __init__(self, config: BlenderbotConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = BlenderbotAttention(embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, config=config) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, layer_head_mask: torch.Tensor, output_attentions: bool=False) -> torch.Tensor: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states if hidden_states.dtype == torch.float16: clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) return (hidden_states, attn_weights)
class BlenderbotEncoderLayer(GradientCheckpointingLayer): def __init__(self, config: BlenderbotConfig): pass def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, layer_head_mask: torch.Tensor, output_attentions: bool=False) -> torch.Tensor: ''' Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. ''' pass
3
1
33
3
25
6
2
0.22
1
4
1
0
2
9
2
12
68
7
50
22
41
11
32
16
29
3
1
1
4
920
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blenderbot/modeling_blenderbot.py
transformers.models.blenderbot.modeling_blenderbot.BlenderbotForCausalLM
from torch.nn import CrossEntropyLoss from typing import Callable, Optional, Union from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...utils import auto_docstring, is_torch_flex_attn_available, is_torchdynamo_compiling, logging from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput import torch from ...generation import GenerationMixin from torch import nn class BlenderbotForCausalLM(BlenderbotPreTrainedModel, GenerationMixin): _tied_weights_keys = ['lm_head.weight'] def __init__(self, config): config.is_decoder = True config.is_encoder_decoder = False super().__init__(config) self.model = BlenderbotDecoderWrapper(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens def set_input_embeddings(self, value): self.model.decoder.embed_tokens = value def set_decoder(self, decoder): self.model.decoder = decoder def get_decoder(self): return self.model.decoder @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple, CausalLMOutputWithCrossAttentions]: """ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from transformers import AutoTokenizer, BlenderbotForCausalLM >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill") >>> model = BlenderbotForCausalLM.from_pretrained("facebook/blenderbot-400M-distill", add_cross_attention=False) >>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder." >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> expected_shape = [1, inputs.input_ids.shape[-1], model.config.vocab_size] >>> list(logits.shape) == expected_shape True ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.model.decoder(input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position) logits = self.lm_head(outputs[0]) loss = None if labels is not None: labels = labels.to(logits.device) loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithCrossAttentions(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions)
class BlenderbotForCausalLM(BlenderbotPreTrainedModel, GenerationMixin): def __init__(self, config): pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass def set_decoder(self, decoder): pass def get_decoder(self): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple, CausalLMOutputWithCrossAttentions]: ''' cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from transformers import AutoTokenizer, BlenderbotForCausalLM >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill") >>> model = BlenderbotForCausalLM.from_pretrained("facebook/blenderbot-400M-distill", add_cross_attention=False) >>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder." >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> expected_shape = [1, inputs.input_ids.shape[-1], model.config.vocab_size] >>> list(logits.shape) == expected_shape True ```''' pass
8
1
19
3
9
8
2
0.84
2
6
2
0
8
2
9
11
186
33
83
37
56
70
42
20
32
7
2
1
16
921
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blenderbot/modeling_blenderbot.py
transformers.models.blenderbot.modeling_blenderbot.BlenderbotForConditionalGeneration
import warnings import torch from torch.nn import CrossEntropyLoss from torch import nn import os from ..blenderbot_small import BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel from ...utils import auto_docstring, is_torch_flex_attn_available, is_torchdynamo_compiling, logging from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...generation import GenerationMixin from .configuration_blenderbot import BlenderbotConfig from typing import Callable, Optional, Union @auto_docstring(custom_intro='\n The Blenderbot Model with a language modeling head. Can be used for summarization.\n ') class BlenderbotForConditionalGeneration(BlenderbotPreTrainedModel, GenerationMixin): base_model_prefix = 'model' _keys_to_ignore_on_load_missing = ['final_logits_bias'] _tied_weights_keys = ['decoder.embed_tokens.weight', 'encoder.embed_tokens.weight', 'lm_head.weight'] def __init__(self, config: BlenderbotConfig): super().__init__(config) self.model = BlenderbotModel(config) self.register_buffer('final_logits_bias', torch.zeros((1, self.model.shared.num_embeddings))) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) self.post_init() @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs): if pretrained_model_name_or_path == 'facebook/blenderbot-90M': warnings.warn("The checkpoint `facebook/blenderbot-90M` is deprecated. In the future, please use the identical checkpoint `facebook/small_blenderbot-90M` with `BlenderbotSmallForConditionalGeneration.from_pretrained('facebook/small_blenderbot-90M')` instead.", FutureWarning) return BlenderbotSmallForConditionalGeneration.from_pretrained(pretrained_model_name_or_path) return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) def get_encoder(self): return self.model.get_encoder() def get_decoder(self): return self.model.get_decoder() def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int]=None, mean_resizing: bool=True) -> nn.Embedding: new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing) self._resize_final_logits_bias(new_embeddings.weight.shape[0]) return new_embeddings def _resize_final_logits_bias(self, new_num_tokens: int) -> None: old_num_tokens = self.final_logits_bias.shape[-1] if new_num_tokens <= old_num_tokens: new_bias = self.final_logits_bias[:, :new_num_tokens] else: extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device) new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1) self.register_buffer('final_logits_bias', new_bias) @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[Union[tuple, BaseModelOutput]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.Tensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple[torch.FloatTensor], Seq2SeqLMOutput]: """ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) Blenderbot uses the `bos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example conversation: ```python >>> from transformers import AutoTokenizer, BlenderbotForConditionalGeneration >>> mname = "facebook/blenderbot-400M-distill" >>> model = BlenderbotForConditionalGeneration.from_pretrained(mname) >>> tokenizer = AutoTokenizer.from_pretrained(mname) >>> UTTERANCE = "My friends are cool but they eat too many carbs." >>> print("Human: ", UTTERANCE) Human: My friends are cool but they eat too many carbs. >>> inputs = tokenizer([UTTERANCE], return_tensors="pt") >>> reply_ids = model.generate(**inputs) >>> print("Bot: ", tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0]) Bot: That's unfortunate. Are they trying to lose weight or are they just trying to be healthier? >>> REPLY = "I'm not sure" >>> print("Human: ", REPLY) Human: I'm not sure >>> NEXT_UTTERANCE = ( ... "My friends are cool but they eat too many carbs.</s> <s>That's unfortunate. " ... "Are they trying to lose weight or are they just trying to be healthier?</s> " ... "<s> I'm not sure." ... ) >>> inputs = tokenizer([NEXT_UTTERANCE], return_tensors="pt") >>> next_reply_ids = model.generate(**inputs) >>> print("Bot: ", tokenizer.batch_decode(next_reply_ids, skip_special_tokens=True)[0]) Bot: I see. Well, it's good that they're trying to change their eating habits. ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: if use_cache: logger.warning('The `use_cache` argument is changed to `False` since `labels` is provided.') use_cache = False if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) outputs = self.model(input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position) lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (lm_logits,) + outputs[1:] return (masked_lm_loss,) + output if masked_lm_loss is not None else output return Seq2SeqLMOutput(loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions)
@auto_docstring(custom_intro='\n The Blenderbot Model with a language modeling head. Can be used for summarization.\n ') class BlenderbotForConditionalGeneration(BlenderbotPreTrainedModel, GenerationMixin): def __init__(self, config: BlenderbotConfig): pass @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs): pass def get_encoder(self): pass def get_decoder(self): pass def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int]=None, mean_resizing: bool=True) -> nn.Embedding: pass def _resize_final_logits_bias(self, new_num_tokens: int) -> None: pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[Union[tuple, BaseModelOutput]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.Tensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple[torch.FloatTensor], Seq2SeqLMOutput]: ''' decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) Blenderbot uses the `bos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example conversation: ```python >>> from transformers import AutoTokenizer, BlenderbotForConditionalGeneration >>> mname = "facebook/blenderbot-400M-distill" >>> model = BlenderbotForConditionalGeneration.from_pretrained(mname) >>> tokenizer = AutoTokenizer.from_pretrained(mname) >>> UTTERANCE = "My friends are cool but they eat too many carbs." >>> print("Human: ", UTTERANCE) Human: My friends are cool but they eat too many carbs. >>> inputs = tokenizer([UTTERANCE], return_tensors="pt") >>> reply_ids = model.generate(**inputs) >>> print("Bot: ", tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0]) Bot: That's unfortunate. Are they trying to lose weight or are they just trying to be healthier? >>> REPLY = "I'm not sure" >>> print("Human: ", REPLY) Human: I'm not sure >>> NEXT_UTTERANCE = ( ... "My friends are cool but they eat too many carbs.</s> <s>That's unfortunate. " ... "Are they trying to lose weight or are they just trying to be healthier?</s> " ... "<s> I'm not sure." ... ) >>> inputs = tokenizer([NEXT_UTTERANCE], return_tensors="pt") >>> next_reply_ids = model.generate(**inputs) >>> print("Bot: ", tokenizer.batch_decode(next_reply_ids, skip_special_tokens=True)[0]) Bot: I see. Well, it's good that they're trying to change their eating habits. ``` ''' pass
11
1
13
1
11
1
2
0.07
2
13
5
0
8
3
10
12
148
18
121
51
85
9
57
27
46
8
2
2
20
922
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blenderbot/modeling_blenderbot.py
transformers.models.blenderbot.modeling_blenderbot.BlenderbotLearnedPositionalEmbedding
import torch from typing import Callable, Optional, Union from torch import nn class BlenderbotLearnedPositionalEmbedding(nn.Embedding): """ This module learns positional embeddings up to a fixed maximum size. """ def __init__(self, num_embeddings: int, embedding_dim: int): super().__init__(num_embeddings, embedding_dim) def forward(self, input_ids_shape: torch.Size, past_key_values_length: int=0, position_ids: Optional[torch.Tensor]=None): """`input_ids_shape` is expected to be [bsz x seqlen].""" if position_ids is None: bsz, seq_len = input_ids_shape[:2] position_ids = torch.arange(past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device) return super().forward(position_ids)
class BlenderbotLearnedPositionalEmbedding(nn.Embedding): ''' This module learns positional embeddings up to a fixed maximum size. ''' def __init__(self, num_embeddings: int, embedding_dim: int): pass def forward(self, input_ids_shape: torch.Size, past_key_values_length: int=0, position_ids: Optional[torch.Tensor]=None): '''`input_ids_shape` is expected to be [bsz x seqlen].''' pass
3
2
5
0
4
1
1
0.44
1
2
0
0
2
0
2
2
15
2
9
5
6
4
7
5
4
1
1
0
2
923
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blenderbot/modeling_blenderbot.py
transformers.models.blenderbot.modeling_blenderbot.BlenderbotModel
from ..blenderbot_small import BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel from typing import Callable, Optional, Union from ...utils import auto_docstring, is_torch_flex_attn_available, is_torchdynamo_compiling, logging from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput import warnings import torch import os from .configuration_blenderbot import BlenderbotConfig import math @auto_docstring class BlenderbotModel(BlenderbotPreTrainedModel): _tied_weights_keys = ['decoder.embed_tokens.weight', 'encoder.embed_tokens.weight'] def __init__(self, config: BlenderbotConfig): super().__init__(config) padding_idx, vocab_size = (config.pad_token_id, config.vocab_size) embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 self.shared = BlenderbotScaledWordEmbedding(vocab_size, config.d_model, padding_idx, embed_scale=embed_scale) self.encoder = BlenderbotEncoder(config, self.shared) self.decoder = BlenderbotDecoder(config, self.shared) self.post_init() @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs): if pretrained_model_name_or_path == 'facebook/blenderbot-90M': warnings.warn("The checkpoint `facebook/blenderbot-90M` is deprecated. In the future, please use the identical checkpoint `facebook/small_blenderbot-90M` with `BlenderbotSmallModel.from_pretrained('facebook/small_blenderbot-90M')` instead.", FutureWarning) return BlenderbotSmallModel.from_pretrained(pretrained_model_name_or_path) return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) def get_input_embeddings(self): return self.shared def set_input_embeddings(self, value): self.shared = value self.encoder.embed_tokens = self.shared self.decoder.embed_tokens = self.shared def get_encoder(self): return self.encoder @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[Union[tuple, BaseModelOutput]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.Tensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple[torch.FloatTensor], Seq2SeqModelOutput]: """ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) Blenderbot uses the `bos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. Example: ```python >>> from transformers import AutoTokenizer, BlenderbotModel >>> model = BlenderbotModel.from_pretrained("facebook/blenderbot-400M-distill") >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill") >>> inputs = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt") >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 >>> outputs = model(input_ids=inputs.input_ids, decoder_input_ids=decoder_input_ids) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) [1, 6, 1280] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if encoder_outputs is None: encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)): encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None) decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions)
@auto_docstring class BlenderbotModel(BlenderbotPreTrainedModel): def __init__(self, config: BlenderbotConfig): pass @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs): pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass def get_encoder(self): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[Union[tuple, BaseModelOutput]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.Tensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple[torch.FloatTensor], Seq2SeqModelOutput]: ''' decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) Blenderbot uses the `bos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. Example: ```python >>> from transformers import AutoTokenizer, BlenderbotModel >>> model = BlenderbotModel.from_pretrained("facebook/blenderbot-400M-distill") >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill") >>> inputs = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt") >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 >>> outputs = model(input_ids=inputs.input_ids, decoder_input_ids=decoder_input_ids) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) [1, 6, 1280] ```''' pass
10
1
18
2
13
2
3
0.17
1
13
7
0
6
3
7
9
135
19
99
34
71
17
37
15
29
10
2
1
18
924
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blenderbot/modeling_blenderbot.py
transformers.models.blenderbot.modeling_blenderbot.BlenderbotPreTrainedModel
from ...utils import auto_docstring, is_torch_flex_attn_available, is_torchdynamo_compiling, logging from ...modeling_attn_mask_utils import AttentionMaskConverter, _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa from .configuration_blenderbot import BlenderbotConfig from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from torch import nn import torch from typing import Callable, Optional, Union @auto_docstring class BlenderbotPreTrainedModel(PreTrainedModel): config: BlenderbotConfig base_model_prefix = 'model' supports_gradient_checkpointing = True _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = True _can_compile_fullgraph = True def _init_weights(self, module): std = self.config.init_std if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.weight.data.fill_(1.0) module.bias.data.zero_() @property def dummy_inputs(self): pad_token = self.config.pad_token_id input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device) dummy_inputs = {'attention_mask': input_ids.ne(pad_token), 'input_ids': input_ids, 'decoder_input_ids': input_ids} return dummy_inputs def _update_full_mask(self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor): if attention_mask is not None: if 'flash' in self.config._attn_implementation: attention_mask = attention_mask if 0 in attention_mask else None elif self.config._attn_implementation == 'sdpa': attention_mask = _prepare_4d_attention_mask_for_sdpa(attention_mask, inputs_embeds.dtype) elif self.config._attn_implementation == 'flex_attention': if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask, is_causal=False) else: attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) return attention_mask def _update_causal_mask(self, attention_mask: Optional[Union[torch.Tensor, 'BlockMask']], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache): if self.config._attn_implementation == 'flex_attention': if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask) elif attention_mask is None: attention_mask = make_flex_block_causal_mask(torch.ones(size=(input_tensor.shape[0], input_tensor.shape[1]), device=attention_mask.device)) return attention_mask if 'flash' in self.config._attn_implementation: if attention_mask is not None and (attention_mask == 0.0).any(): return attention_mask return None past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_compilable_cache = past_key_values.is_compileable if past_key_values is not None else False if self.config._attn_implementation == 'sdpa' and (not using_compilable_cache): if AttentionMaskConverter._ignore_causal_mask_sdpa(attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training): return None dtype = input_tensor.dtype sequence_length = input_tensor.shape[1] if using_compilable_cache: target_length = past_key_values.get_max_cache_shape() else: target_length = attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, cache_position=cache_position, batch_size=input_tensor.shape[0]) if self.config._attn_implementation == 'sdpa' and attention_mask is not None and (attention_mask.device.type in ['cuda', 'xpu', 'npu']): min_dtype = torch.finfo(dtype).min causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @staticmethod def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. """ if attention_mask is not None and attention_mask.dim() == 4: causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device) padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype) return causal_mask def _update_cross_attn_mask(self, encoder_hidden_states: Union[torch.Tensor, None], encoder_attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor): if encoder_hidden_states is not None and encoder_attention_mask is not None: if 'flash' in self.config._attn_implementation: encoder_attention_mask = encoder_attention_mask if 0 in encoder_attention_mask else None elif self.config._attn_implementation == 'sdpa': encoder_attention_mask = _prepare_4d_attention_mask_for_sdpa(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) elif self.config._attn_implementation == 'flex_attention': if isinstance(encoder_attention_mask, torch.Tensor): encoder_attention_mask = make_flex_block_causal_mask(encoder_attention_mask, query_length=input_shape[-1], is_causal=False) else: encoder_attention_mask = _prepare_4d_attention_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) return encoder_attention_mask
@auto_docstring class BlenderbotPreTrainedModel(PreTrainedModel): def _init_weights(self, module): pass @property def dummy_inputs(self): pass def _update_full_mask(self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor): pass def _update_causal_mask(self, attention_mask: Optional[Union[torch.Tensor, 'BlockMask']], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache): pass @staticmethod def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs): ''' Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. ''' pass def _update_cross_attn_mask(self, encoder_hidden_states: Union[torch.Tensor, None], encoder_attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor): pass
10
1
10
0
10
0
3
0
1
0
0
6
2
0
2
2
26
2
24
11
20
0
18
10
15
5
1
2
6
925
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blenderbot/modeling_blenderbot.py
transformers.models.blenderbot.modeling_blenderbot.BlenderbotScaledWordEmbedding
from typing import Callable, Optional, Union from torch import nn import torch class BlenderbotScaledWordEmbedding(nn.Embedding): """ This module overrides nn.Embeddings' forward by multiplying with embeddings scale. """ def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int, embed_scale: Optional[float]=1.0): super().__init__(num_embeddings, embedding_dim, padding_idx) self.embed_scale = embed_scale def forward(self, input_ids: torch.Tensor): return super().forward(input_ids) * self.embed_scale
class BlenderbotScaledWordEmbedding(nn.Embedding): ''' This module overrides nn.Embeddings' forward by multiplying with embeddings scale. ''' def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int, embed_scale: Optional[float]=1.0): pass def forward(self, input_ids: torch.Tensor): pass
3
1
3
0
3
0
1
0.5
1
4
0
0
2
1
2
2
11
2
6
4
3
3
6
4
3
1
1
0
2
926
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blenderbot/tokenization_blenderbot.py
transformers.models.blenderbot.tokenization_blenderbot.BlenderbotTokenizer
import os import regex as re from typing import Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer import json class BlenderbotTokenizer(PreTrainedTokenizer): """ Constructs a Blenderbot tokenizer, derived from the GPT-2 tokenizer, using byte-level Byte-Pair-Encoding. This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not: ```python >>> from transformers import BlenderbotTokenizer >>> tokenizer = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B") >>> tokenizer.add_prefix_space = False >>> tokenizer("Hello world")["input_ids"] [47, 921, 86, 1085, 2] >>> tokenizer(" Hello world")["input_ids"] [6950, 1085, 2] ``` You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance. <Tip> When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one). </Tip> This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. merges_file (`str`): Path to the merges file. errors (`str`, *optional*, defaults to `"replace"`): Paradigm to follow when decoding bytes to UTF-8. See [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. add_prefix_space (`bool`, *optional*, defaults to `False`): Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. (Blenderbot tokenizer detect beginning of words by the preceding space). """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] def __init__(self, vocab_file, merges_file, errors='replace', bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', add_prefix_space=False, **kwargs): bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token mask_token = AddedToken(mask_token, lstrip=True, rstrip=False, normalized=False) if isinstance(mask_token, str) else mask_token with open(vocab_file, encoding='utf-8') as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for k, v in self.encoder.items()} self.errors = errors self.byte_encoder = bytes_to_unicode() self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} with open(merges_file, encoding='utf-8') as merges_handle: bpe_merges = merges_handle.read().split('\n')[1:-1] bpe_merges = [tuple(merge.split()) for merge in bpe_merges] self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) self.cache = {} self.add_prefix_space = add_prefix_space self.pat = re.compile("'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)|\\s+") super().__init__(errors=errors, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, **kwargs) @property def vocab_size(self): return len(self.encoder) def get_vocab(self): vocab = dict(self.encoder).copy() vocab.update(self.added_tokens_encoder) return vocab def bpe(self, token): if token in self.cache: return self.cache[token] word = tuple(token) pairs = get_pairs(word) if not pairs: return token while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf'))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if word[i] == first and i < len(word) - 1 and (word[i + 1] == second): new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = ' '.join(word) self.cache[token] = word return word def _tokenize(self, text): """Tokenize a string.""" bpe_tokens = [] for token in re.findall(self.pat, text): token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8'))) bpe_tokens.extend((bpe_token for bpe_token in self.bpe(token).split(' '))) return bpe_tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.decoder.get(index) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" text = ''.join(tokens) text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors) return text def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) merge_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']) with open(vocab_file, 'w', encoding='utf-8') as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + '\n') index = 0 with open(merge_file, 'w', encoding='utf-8') as writer: writer.write('#version: 0.2\n') for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning(f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!') index = token_index writer.write(' '.join(bpe_tokens) + '\n') index += 1 return (vocab_file, merge_file) def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if token_ids_1 is None: return [1] + [0] * len(token_ids_0) + [1] return [1] + [0] * len(token_ids_0) + [1, 1] + [0] * len(token_ids_1) + [1] def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. Blenderbot does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): add_prefix_space = kwargs.pop('add_prefix_space', self.add_prefix_space) if (is_split_into_words or add_prefix_space) and (len(text) > 0 and (not text[0].isspace())): text = ' ' + text return (text, kwargs) def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None): """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A Blenderbot sequence has the following format: - single sequence: ` X </s>` Args: token_ids_0 (`list[int]`): List of IDs to which the special tokens will be added token_ids_1 (`list[int]`, *optional*): Will be ignored Returns: `list[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ return token_ids_0 + [self.eos_token_id]
class BlenderbotTokenizer(PreTrainedTokenizer): ''' Constructs a Blenderbot tokenizer, derived from the GPT-2 tokenizer, using byte-level Byte-Pair-Encoding. This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not: ```python >>> from transformers import BlenderbotTokenizer >>> tokenizer = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B") >>> tokenizer.add_prefix_space = False >>> tokenizer("Hello world")["input_ids"] [47, 921, 86, 1085, 2] >>> tokenizer(" Hello world")["input_ids"] [6950, 1085, 2] ``` You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance. <Tip> When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one). </Tip> This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. merges_file (`str`): Path to the merges file. errors (`str`, *optional*, defaults to `"replace"`): Paradigm to follow when decoding bytes to UTF-8. See [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. add_prefix_space (`bool`, *optional*, defaults to `False`): Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. (Blenderbot tokenizer detect beginning of words by the preceding space). ''' def __init__(self, vocab_file, merges_file, errors='replace', bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', add_prefix_space=False, **kwargs): pass @property def vocab_size(self): pass def get_vocab(self): pass def bpe(self, token): pass def _tokenize(self, text): '''Tokenize a string.''' pass def _convert_token_to_id(self, token): '''Converts a token (str) in an id using the vocab.''' pass def _convert_id_to_token(self, index): '''Converts an index (integer) in a token (str) using the vocab.''' pass def convert_tokens_to_string(self, tokens): '''Converts a sequence of tokens (string) in a single string.''' pass def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: pass def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]: ''' Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. ''' pass def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: ''' Create a mask from the two sequences passed to be used in a sequence-pair classification task. Blenderbot does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of zeros. ''' pass def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): pass def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None): ''' Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A Blenderbot sequence has the following format: - single sequence: ` X </s>` Args: token_ids_0 (`list[int]`): List of IDs to which the special tokens will be added token_ids_1 (`list[int]`, *optional*): Will be ignored Returns: `list[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens. ''' pass
15
8
17
1
12
4
3
0.71
1
11
0
0
13
9
13
102
329
50
165
67
132
117
118
44
104
9
3
3
38
927
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blenderbot/tokenization_blenderbot_fast.py
transformers.models.blenderbot.tokenization_blenderbot_fast.BlenderbotTokenizerFast
import json from ...tokenization_utils_base import AddedToken, BatchEncoding from typing import Optional from tokenizers import processors from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_blenderbot import BlenderbotTokenizer class BlenderbotTokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" Blenderbot tokenizer (backed by HuggingFace's *tokenizers* library), derived from the GPT-2 tokenizer, using byte-level Byte-Pair-Encoding. This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not: ```python >>> from transformers import BlenderbotTokenizerFast >>> tokenizer = BlenderbotTokenizerFast.from_pretrained("facebook/blenderbot-3B") >>> tokenizer("Hello world")["input_ids"] [6950, 1085, 2] >>> tokenizer(" Hello world")["input_ids"] [6950, 1085, 2] ``` You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance. <Tip> When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`. </Tip> This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. merges_file (`str`): Path to the merges file. errors (`str`, *optional*, defaults to `"replace"`): Paradigm to follow when decoding bytes to UTF-8. See [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. add_prefix_space (`bool`, *optional*, defaults to `False`): Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. (Blenderbot tokenizer detect beginning of words by the preceding space). trim_offsets (`bool`, *optional*, defaults to `True`): Whether the post processing step should trim offsets to avoid including whitespaces. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] slow_tokenizer_class = BlenderbotTokenizer def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, errors='replace', bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', add_prefix_space=False, trim_offsets=True, **kwargs): mask_token = AddedToken(mask_token, lstrip=True, rstrip=False, normalized=False) if isinstance(mask_token, str) else mask_token super().__init__(vocab_file, merges_file, tokenizer_file=tokenizer_file, errors=errors, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, trim_offsets=trim_offsets, **kwargs) tokenizer_component = 'post_processor' tokenizer_component_instance = getattr(self.backend_tokenizer, tokenizer_component, None) if tokenizer_component_instance: state = json.loads(tokenizer_component_instance.__getstate__()) if 'sep' in state: state['sep'] = tuple(state['sep']) if 'cls' in state: state['cls'] = tuple(state['cls']) changes_to_apply = False if state.get('add_prefix_space', add_prefix_space) != add_prefix_space: state['add_prefix_space'] = add_prefix_space changes_to_apply = True if state.get('trim_offsets', trim_offsets) != trim_offsets: state['trim_offsets'] = trim_offsets changes_to_apply = True if changes_to_apply: component_class = getattr(processors, state.pop('type')) new_value = component_class(**state) setattr(self.backend_tokenizer, tokenizer_component, new_value) @property def mask_token(self) -> str: """ `str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not having been set. Blenderbot tokenizer has a special mask token to be usable in the fill-mask pipeline. The mask token will greedily comprise the space before the *<mask>*. """ if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.') return None return str(self._mask_token) @mask_token.setter def mask_token(self, value): """ Overriding the default behavior of the mask token to have it eat the space before it. This is needed to preserve backward compatibility with all the previously used models based on Roberta. """ value = AddedToken(value, lstrip=True, rstrip=False) if isinstance(value, str) else value self._mask_token = value def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding: is_split_into_words = kwargs.get('is_split_into_words', False) assert self.add_prefix_space or not is_split_into_words, f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.' return super()._batch_encode_plus(*args, **kwargs) def _encode_plus(self, *args, **kwargs) -> BatchEncoding: is_split_into_words = kwargs.get('is_split_into_words', False) assert self.add_prefix_space or not is_split_into_words, f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.' return super()._encode_plus(*args, **kwargs) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. Blenderbot does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None): """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A Blenderbot sequence has the following format: - single sequence: ` X </s>` Args: token_ids_0 (`list[int]`): List of IDs to which the special tokens will be added token_ids_1 (`list[int]`, *optional*): Will be ignored Returns: `list[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ return token_ids_0 + [self.eos_token_id]
class BlenderbotTokenizerFast(PreTrainedTokenizerFast): ''' Construct a "fast" Blenderbot tokenizer (backed by HuggingFace's *tokenizers* library), derived from the GPT-2 tokenizer, using byte-level Byte-Pair-Encoding. This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not: ```python >>> from transformers import BlenderbotTokenizerFast >>> tokenizer = BlenderbotTokenizerFast.from_pretrained("facebook/blenderbot-3B") >>> tokenizer("Hello world")["input_ids"] [6950, 1085, 2] >>> tokenizer(" Hello world")["input_ids"] [6950, 1085, 2] ``` You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance. <Tip> When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`. </Tip> This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. merges_file (`str`): Path to the merges file. errors (`str`, *optional*, defaults to `"replace"`): Paradigm to follow when decoding bytes to UTF-8. See [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. add_prefix_space (`bool`, *optional*, defaults to `False`): Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. (Blenderbot tokenizer detect beginning of words by the preceding space). trim_offsets (`bool`, *optional*, defaults to `True`): Whether the post processing step should trim offsets to avoid including whitespaces. ''' def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, errors='replace', bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', add_prefix_space=False, trim_offsets=True, **kwargs): pass @property def mask_token(self) -> str: ''' `str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not having been set. Blenderbot tokenizer has a special mask token to be usable in the fill-mask pipeline. The mask token will greedily comprise the space before the *<mask>*. ''' pass @mask_token.setter def mask_token(self) -> str: ''' Overriding the default behavior of the mask token to have it eat the space before it. This is needed to preserve backward compatibility with all the previously used models based on Roberta. ''' pass def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding: pass def _encode_plus(self, *args, **kwargs) -> BatchEncoding: pass def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: pass def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: ''' Create a mask from the two sequences passed to be used in a sequence-pair classification task. Blenderbot does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of zeros. ''' pass def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None): ''' Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A Blenderbot sequence has the following format: - single sequence: ` X </s>` Args: token_ids_0 (`list[int]`): List of IDs to which the special tokens will be added token_ids_1 (`list[int]`, *optional*): Will be ignored Returns: `list[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens. ''' pass
11
5
18
2
12
5
2
1.04
1
5
1
0
8
1
8
96
244
42
99
44
70
103
54
24
45
8
3
2
19
928
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blenderbot_small/configuration_blenderbot_small.py
transformers.models.blenderbot_small.configuration_blenderbot_small.BlenderbotSmallConfig
from ...configuration_utils import PretrainedConfig class BlenderbotSmallConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`BlenderbotSmallModel`]. It is used to instantiate an BlenderbotSmall model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the BlenderbotSmall [facebook/blenderbot_small-90M](https://huggingface.co/facebook/blenderbot_small-90M) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50265): Vocabulary size of the BlenderbotSmall model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`BlenderbotSmallModel`] or [`TFBlenderbotSmallModel`]. d_model (`int`, *optional*, defaults to 512): Dimensionality of the layers and the pooler layer. encoder_layers (`int`, *optional*, defaults to 8): Number of encoder layers. decoder_layers (`int`, *optional*, defaults to 8): Number of decoder layers. encoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (`int`, *optional*, defaults to 2048): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. encoder_ffn_dim (`int`, *optional*, defaults to 2048): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. scale_embedding (`bool`, *optional*, defaults to `False`): Scale embeddings by diving by sqrt(d_model). use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models) forced_eos_token_id (`int`, *optional*, defaults to 2): The id of the token to force as the last generated token when `max_length` is reached. Usually set to `eos_token_id`. Example: ```python >>> from transformers import BlenderbotSmallConfig, BlenderbotSmallModel >>> # Initializing a BlenderbotSmall facebook/blenderbot_small-90M style configuration >>> configuration = BlenderbotSmallConfig() >>> # Initializing a model (with random weights) from the facebook/blenderbot_small-90M style configuration >>> model = BlenderbotSmallModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'blenderbot-small' keys_to_ignore_at_inference = ['past_key_values'] attribute_map = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__(self, vocab_size=50265, max_position_embeddings=512, encoder_layers=8, encoder_ffn_dim=2048, encoder_attention_heads=16, decoder_layers=8, decoder_ffn_dim=2048, decoder_attention_heads=16, encoder_layerdrop=0.0, decoder_layerdrop=0.0, use_cache=True, is_encoder_decoder=True, activation_function='gelu', d_model=512, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, decoder_start_token_id=1, scale_embedding=False, pad_token_id=0, bos_token_id=1, eos_token_id=2, forced_eos_token_id=2, **kwargs): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.use_cache = use_cache self.num_hidden_layers = encoder_layers self.scale_embedding = scale_embedding super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, decoder_start_token_id=decoder_start_token_id, forced_eos_token_id=forced_eos_token_id, **kwargs)
class BlenderbotSmallConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`BlenderbotSmallModel`]. It is used to instantiate an BlenderbotSmall model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the BlenderbotSmall [facebook/blenderbot_small-90M](https://huggingface.co/facebook/blenderbot_small-90M) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50265): Vocabulary size of the BlenderbotSmall model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`BlenderbotSmallModel`] or [`TFBlenderbotSmallModel`]. d_model (`int`, *optional*, defaults to 512): Dimensionality of the layers and the pooler layer. encoder_layers (`int`, *optional*, defaults to 8): Number of encoder layers. decoder_layers (`int`, *optional*, defaults to 8): Number of decoder layers. encoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (`int`, *optional*, defaults to 2048): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. encoder_ffn_dim (`int`, *optional*, defaults to 2048): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. scale_embedding (`bool`, *optional*, defaults to `False`): Scale embeddings by diving by sqrt(d_model). use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models) forced_eos_token_id (`int`, *optional*, defaults to 2): The id of the token to force as the last generated token when `max_length` is reached. Usually set to `eos_token_id`. Example: ```python >>> from transformers import BlenderbotSmallConfig, BlenderbotSmallModel >>> # Initializing a BlenderbotSmall facebook/blenderbot_small-90M style configuration >>> configuration = BlenderbotSmallConfig() >>> # Initializing a model (with random weights) from the facebook/blenderbot_small-90M style configuration >>> model = BlenderbotSmallModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, vocab_size=50265, max_position_embeddings=512, encoder_layers=8, encoder_ffn_dim=2048, encoder_attention_heads=16, decoder_layers=8, decoder_ffn_dim=2048, decoder_attention_heads=16, encoder_layerdrop=0.0, decoder_layerdrop=0.0, use_cache=True, is_encoder_decoder=True, activation_function='gelu', d_model=512, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, decoder_start_token_id=1, scale_embedding=False, pad_token_id=0, bos_token_id=1, eos_token_id=2, forced_eos_token_id=2, **kwargs): pass
2
1
57
1
56
1
1
1.05
1
1
0
0
1
19
1
1
133
11
60
51
31
63
25
24
23
1
1
0
1
929
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blenderbot_small/configuration_blenderbot_small.py
transformers.models.blenderbot_small.configuration_blenderbot_small.BlenderbotSmallOnnxConfig
from ...onnx.utils import compute_effective_axis_dimension from ...file_utils import is_torch_available from typing import Any from ... import PreTrainedTokenizer from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast from collections import OrderedDict from collections.abc import Mapping class BlenderbotSmallOnnxConfig(OnnxSeq2SeqConfigWithPast): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: if self.task in ['default', 'seq2seq-lm']: common_inputs = OrderedDict([('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'})]) if self.use_past: common_inputs['decoder_input_ids'] = {0: 'batch'} common_inputs['decoder_attention_mask'] = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: common_inputs['decoder_input_ids'] = {0: 'batch', 1: 'decoder_sequence'} common_inputs['decoder_attention_mask'] = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(common_inputs, direction='inputs') elif self.task == 'causal-lm': common_inputs = OrderedDict([('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'})]) if self.use_past: num_encoder_layers, _ = self.num_layers for i in range(num_encoder_layers): common_inputs[f'past_key_values.{i}.key'] = {0: 'batch', 2: 'past_sequence + sequence'} common_inputs[f'past_key_values.{i}.value'] = {0: 'batch', 2: 'past_sequence + sequence'} else: common_inputs = OrderedDict([('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}), ('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'})]) return common_inputs @property def outputs(self) -> Mapping[str, Mapping[int, str]]: if self.task in ['default', 'seq2seq-lm']: common_outputs = super().outputs else: common_outputs = super(OnnxConfigWithPast, self).outputs if self.use_past: num_encoder_layers, _ = self.num_layers for i in range(num_encoder_layers): common_outputs[f'present.{i}.key'] = {0: 'batch', 2: 'past_sequence + sequence'} common_outputs[f'present.{i}.value'] = {0: 'batch', 2: 'past_sequence + sequence'} return common_outputs def _generate_dummy_inputs_for_default_and_seq2seq_lm(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False) -> Mapping[str, Any]: encoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(tokenizer, batch_size, seq_length, is_pair) decoder_seq_length = seq_length if not self.use_past else 1 decoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(tokenizer, batch_size, decoder_seq_length, is_pair) decoder_inputs = {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()} common_inputs = dict(**encoder_inputs, **decoder_inputs) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.') else: import torch batch, encoder_seq_length = common_inputs['input_ids'].shape decoder_seq_length = common_inputs['decoder_input_ids'].shape[1] num_encoder_attention_heads, num_decoder_attention_heads = self.num_attention_heads encoder_shape = (batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads) decoder_past_length = decoder_seq_length + 3 decoder_shape = (batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads) common_inputs['decoder_attention_mask'] = torch.cat([common_inputs['decoder_attention_mask'], torch.ones(batch, decoder_past_length)], dim=1) common_inputs['past_key_values'] = [] num_encoder_layers, num_decoder_layers = self.num_layers min_num_layers = min(num_encoder_layers, num_decoder_layers) max_num_layers = max(num_encoder_layers, num_decoder_layers) - min_num_layers remaining_side_name = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder' for _ in range(min_num_layers): common_inputs['past_key_values'].append((torch.zeros(decoder_shape), torch.zeros(decoder_shape), torch.zeros(encoder_shape), torch.zeros(encoder_shape))) shape = encoder_shape if remaining_side_name == 'encoder' else decoder_shape for _ in range(min_num_layers, max_num_layers): common_inputs['past_key_values'].append((torch.zeros(shape), torch.zeros(shape))) return common_inputs def _generate_dummy_inputs_for_causal_lm(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False) -> Mapping[str, Any]: common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(tokenizer, batch_size, seq_length, is_pair) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.') else: import torch batch, seqlen = common_inputs['input_ids'].shape past_key_values_length = seqlen + 2 num_encoder_layers, _ = self.num_layers num_encoder_attention_heads, _ = self.num_attention_heads past_shape = (batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads) mask_dtype = common_inputs['attention_mask'].dtype common_inputs['attention_mask'] = torch.cat([common_inputs['attention_mask'], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1) common_inputs['past_key_values'] = [(torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(num_encoder_layers)] return common_inputs def _generate_dummy_inputs_for_sequence_classification_and_question_answering(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False) -> Mapping[str, Any]: batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0) token_to_add = tokenizer.num_special_tokens_to_add(is_pair) seq_length = compute_effective_axis_dimension(seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add) dummy_input = [' '.join([tokenizer.unk_token]) * seq_length] * batch_size common_inputs = dict(tokenizer(dummy_input, return_tensors='pt')) return common_inputs def generate_dummy_inputs(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False) -> Mapping[str, Any]: if self.task in ['default', 'seq2seq-lm']: common_inputs = self._generate_dummy_inputs_for_default_and_seq2seq_lm(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair) elif self.task == 'causal-lm': common_inputs = self._generate_dummy_inputs_for_causal_lm(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair) else: common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair) return common_inputs def _flatten_past_key_values_(self, flattened_output, name, idx, t): if self.task in ['default', 'seq2seq-lm']: flattened_output = super()._flatten_past_key_values_(flattened_output, name, idx, t) else: flattened_output = super(OnnxSeq2SeqConfigWithPast, self)._flatten_past_key_values_(flattened_output, name, idx, t)
class BlenderbotSmallOnnxConfig(OnnxSeq2SeqConfigWithPast): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: pass @property def outputs(self) -> Mapping[str, Mapping[int, str]]: pass def _generate_dummy_inputs_for_default_and_seq2seq_lm(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False) -> Mapping[str, Any]: pass def _generate_dummy_inputs_for_causal_lm(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False) -> Mapping[str, Any]: pass def _generate_dummy_inputs_for_sequence_classification_and_question_answering(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False) -> Mapping[str, Any]: pass def generate_dummy_inputs(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False) -> Mapping[str, Any]: pass def _flatten_past_key_values_(self, flattened_output, name, idx, t): pass
10
0
30
2
27
1
4
0.05
1
9
0
0
7
1
7
7
221
20
191
73
151
10
89
42
79
8
1
3
28
930
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py
transformers.models.blenderbot_small.modeling_blenderbot_small.BlenderbotSmallAttention
from ...modeling_flash_attention_utils import FlashAttentionKwargs from .configuration_blenderbot_small import BlenderbotSmallConfig from ...processing_utils import Unpack from typing import Callable, Optional, Union from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel import torch from ...utils.deprecation import deprecate_kwarg from torch import nn class BlenderbotSmallAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, is_causal: bool=False, config: Optional[BlenderbotSmallConfig]=None, layer_idx: Optional[int]=None): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if self.head_dim * num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).') self.scaling = self.head_dim ** (-0.5) self.is_decoder = is_decoder self.is_causal = is_causal self.layer_idx = layer_idx if layer_idx is None and self.is_decoder: logger.warning_once(f'Instantiating a decoder {self.__class__.__name__} without passing `layer_idx` is not recommended and will lead to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` when creating this class.') self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" is_cross_attention = key_value_states is not None bsz, tgt_len = hidden_states.shape[:-1] src_len = key_value_states.shape[1] if is_cross_attention else tgt_len q_input_shape = (bsz, tgt_len, -1, self.head_dim) kv_input_shape = (bsz, src_len, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(*q_input_shape).transpose(1, 2) is_updated = False if past_key_values is not None: if isinstance(past_key_values, EncoderDecoderCache): is_updated = past_key_values.is_updated.get(self.layer_idx) if is_cross_attention: curr_past_key_value = past_key_values.cross_attention_cache else: curr_past_key_value = past_key_values.self_attention_cache else: curr_past_key_value = past_key_values current_states = key_value_states if is_cross_attention else hidden_states if is_cross_attention and past_key_values is not None and is_updated: key_states = curr_past_key_value.layers[self.layer_idx].keys value_states = curr_past_key_value.layers[self.layer_idx].values else: key_states = self.k_proj(current_states) value_states = self.v_proj(current_states) key_states = key_states.view(*kv_input_shape).transpose(1, 2) value_states = value_states.view(*kv_input_shape).transpose(1, 2) if past_key_values is not None: cache_position = cache_position if not is_cross_attention else None key_states, value_states = curr_past_key_value.update(key_states, value_states, self.layer_idx, {'cache_position': cache_position}) if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache): past_key_values.is_updated[self.layer_idx] = True attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != 'eager': attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.dropout, scaling=self.scaling, output_attentions=output_attentions, head_mask=layer_head_mask, **kwargs) attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous() attn_output = self.out_proj(attn_output) return (attn_output, attn_weights)
class BlenderbotSmallAttention(nn.Module): '''Multi-headed attention from 'Attention Is All You Need' paper''' def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, is_causal: bool=False, config: Optional[BlenderbotSmallConfig]=None, layer_idx: Optional[int]=None): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: '''Input shape: Batch x Time x Channel''' pass
4
2
50
7
35
8
5
0.24
1
7
1
0
3
12
3
13
156
23
107
44
86
26
68
27
64
12
1
2
15
931
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py
transformers.models.blenderbot_small.modeling_blenderbot_small.BlenderbotSmallDecoder
from ...utils import auto_docstring, is_torch_flex_attn_available, is_torchdynamo_compiling, logging from typing import Callable, Optional, Union from .configuration_blenderbot_small import BlenderbotSmallConfig import math from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput from torch import nn import torch from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache class BlenderbotSmallDecoder(BlenderbotSmallPreTrainedModel): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`BlenderbotSmallDecoderLayer`] Args: config: BlenderbotSmallConfig embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: BlenderbotSmallConfig, embed_tokens: Optional[nn.Embedding]=None): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop self.padding_idx = config.pad_token_id self.max_target_positions = config.max_position_embeddings self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 if embed_tokens is not None: self.embed_tokens = embed_tokens else: self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx) self.embed_positions = BlenderbotSmallLearnedPositionalEmbedding(config.max_position_embeddings, config.d_model) self.layers = nn.ModuleList([BlenderbotSmallDecoderLayer(config, layer_idx=i) for i in range(config.decoder_layers)]) self.layernorm_embedding = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False self.post_init() def forward(self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, cache_position=None): """ Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing cross-attention on hidden heads. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. It is used to update the cache in the correct position and to infer the complete sequence length. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError('You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time') elif input_ids is not None: input = input_ids input_shape = input.shape input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] input = inputs_embeds[:, :, -1] else: raise ValueError('You have to specify either decoder_input_ids or decoder_inputs_embeds') if inputs_embeds is None: inputs_embeds = self.embed_tokens(input) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing`. Setting `use_cache=False`...') use_cache = False if use_cache and past_key_values is None: past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config)) if encoder_hidden_states is not None else DynamicCache(config=self.config) if use_cache and isinstance(past_key_values, tuple): logger.warning_once('Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. You should pass an instance of `EncoderDecoderCache` instead, e.g. `past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`.') past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values) batch_size, seq_length = inputs_embeds.size()[:-1] past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 if cache_position is None: cache_position = torch.arange(past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device) if attention_mask is None and (not is_torchdynamo_compiling()): mask_seq_length = past_key_values_length + seq_length attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device) self_attn_cache = past_key_values.self_attention_cache if isinstance(past_key_values, EncoderDecoderCache) else past_key_values causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, self_attn_cache) encoder_attention_mask = self._update_cross_attn_mask(encoder_hidden_states, encoder_attention_mask, input_shape, inputs_embeds) position_ids = self.embed_positions((batch_size, seq_length), past_key_values_length, position_ids=cache_position) inputs_embeds = self.layernorm_embedding(inputs_embeds) hidden_states = inputs_embeds + position_ids hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if output_attentions and encoder_hidden_states is not None else None for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ['head_mask', 'cross_attn_head_mask']): if attn_mask is not None: if attn_mask.size()[0] != len(self.layers): raise ValueError(f'The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.') for idx, decoder_layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue layer_outputs = decoder_layer(hidden_states, causal_mask, encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) if output_hidden_states: all_hidden_states += (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions)
class BlenderbotSmallDecoder(BlenderbotSmallPreTrainedModel): ''' Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`BlenderbotSmallDecoderLayer`] Args: config: BlenderbotSmallConfig embed_tokens (nn.Embedding): output embedding ''' def __init__(self, config: BlenderbotSmallConfig, embed_tokens: Optional[nn.Embedding]=None): pass def forward(self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, cache_position=None): ''' Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing cross-attention on hidden heads. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. It is used to update the cache in the correct position and to infer the complete sequence length. ''' pass
3
2
60
8
36
16
11
0.49
1
10
4
0
4
10
4
6
251
38
143
43
124
70
77
29
72
37
2
3
42
932
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py
transformers.models.blenderbot_small.modeling_blenderbot_small.BlenderbotSmallDecoderLayer
from .configuration_blenderbot_small import BlenderbotSmallConfig from ...activations import ACT2FN import torch from ...utils.deprecation import deprecate_kwarg from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from typing import Callable, Optional, Union from ...modeling_layers import GradientCheckpointingLayer from torch import nn class BlenderbotSmallDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: BlenderbotSmallConfig, layer_idx: Optional[int]=None): super().__init__() self.embed_dim = config.d_model self.self_attn = BlenderbotSmallAttention(embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, is_causal=True, config=config, layer_idx=layer_idx) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.encoder_attn = BlenderbotSmallAttention(self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, config=config, layer_idx=layer_idx) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, cross_attn_layer_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of size `(decoder_attention_heads,)`. past_key_values (`Cache`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. It is used to update the cache in the correct position and to infer the complete sequence length. """ residual = hidden_states hidden_states, self_attn_weights = self.self_attn(hidden_states=hidden_states, past_key_values=past_key_values, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, cache_position=cache_position) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states, cross_attn_weights = self.encoder_attn(hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_values=past_key_values, output_attentions=output_attentions, cache_position=cache_position) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) return outputs
class BlenderbotSmallDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: BlenderbotSmallConfig, layer_idx: Optional[int]=None): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, cross_attn_layer_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: ''' Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of size `(decoder_attention_heads,)`. past_key_values (`Cache`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. It is used to update the cache in the correct position and to infer the complete sequence length. ''' pass
4
1
58
6
40
13
4
0.31
1
4
1
0
2
11
2
12
118
12
81
32
67
25
44
21
41
6
1
1
7
933
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py
transformers.models.blenderbot_small.modeling_blenderbot_small.BlenderbotSmallDecoderWrapper
class BlenderbotSmallDecoderWrapper(BlenderbotSmallPreTrainedModel): """ This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is used in combination with the [`EncoderDecoderModel`] framework. """ def __init__(self, config): super().__init__(config) self.decoder = BlenderbotSmallDecoder(config) def forward(self, *args, **kwargs): return self.decoder(*args, **kwargs)
class BlenderbotSmallDecoderWrapper(BlenderbotSmallPreTrainedModel): ''' This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is used in combination with the [`EncoderDecoderModel`] framework. ''' def __init__(self, config): pass def forward(self, *args, **kwargs): pass
3
1
3
0
3
0
1
0.67
1
2
1
0
2
1
2
4
12
2
6
4
3
4
6
4
3
1
2
0
2
934
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py
transformers.models.blenderbot_small.modeling_blenderbot_small.BlenderbotSmallEncoder
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput from .configuration_blenderbot_small import BlenderbotSmallConfig import math from typing import Callable, Optional, Union from torch import nn import torch class BlenderbotSmallEncoder(BlenderbotSmallPreTrainedModel): """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`BlenderbotSmallEncoderLayer`]. Args: config: BlenderbotSmallConfig embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: BlenderbotSmallConfig, embed_tokens: Optional[nn.Embedding]=None): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop embed_dim = config.d_model self.padding_idx = config.pad_token_id self.max_source_positions = config.max_position_embeddings self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 if embed_tokens is not None: self.embed_tokens = embed_tokens else: self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx) self.embed_positions = BlenderbotSmallLearnedPositionalEmbedding(config.max_position_embeddings, embed_dim) self.layers = nn.ModuleList([BlenderbotSmallEncoderLayer(config) for _ in range(config.encoder_layers)]) self.layernorm_embedding = nn.LayerNorm(embed_dim) self.gradient_checkpointing = False self.post_init() def forward(self, input_ids=None, attention_mask=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None): """ Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(input_shape) hidden_states = inputs_embeds + embed_pos hidden_states = self.layernorm_embedding(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) attention_mask = self._update_full_mask(attention_mask, inputs_embeds) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None if head_mask is not None: if head_mask.size()[0] != len(self.layers): raise ValueError(f'The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.') for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) to_drop = False if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: to_drop = True if to_drop: layer_outputs = (None, None) else: layer_outputs = encoder_layer(hidden_states, attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
class BlenderbotSmallEncoder(BlenderbotSmallPreTrainedModel): ''' Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`BlenderbotSmallEncoderLayer`]. Args: config: BlenderbotSmallConfig embed_tokens (nn.Embedding): output embedding ''' def __init__(self, config: BlenderbotSmallConfig, embed_tokens: Optional[nn.Embedding]=None): pass def forward(self, input_ids=None, attention_mask=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None): ''' Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' pass
3
2
78
12
49
19
14
0.45
1
9
4
0
2
10
2
4
167
26
98
32
86
44
62
23
59
24
2
3
27
935
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py
transformers.models.blenderbot_small.modeling_blenderbot_small.BlenderbotSmallEncoderLayer
from ...activations import ACT2FN from typing import Callable, Optional, Union from .configuration_blenderbot_small import BlenderbotSmallConfig from ...modeling_layers import GradientCheckpointingLayer import torch from torch import nn class BlenderbotSmallEncoderLayer(GradientCheckpointingLayer): def __init__(self, config: BlenderbotSmallConfig, layer_idx: Optional[int]=None): super().__init__() self.embed_dim = config.d_model self.self_attn = BlenderbotSmallAttention(embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, config=config, layer_idx=layer_idx) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward(self, hidden_states: torch.FloatTensor, attention_mask: torch.FloatTensor, layer_head_mask: torch.FloatTensor, output_attentions: Optional[bool]=False) -> tuple[torch.FloatTensor, Optional[torch.FloatTensor]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) if hidden_states.dtype == torch.float16 and (torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs
class BlenderbotSmallEncoderLayer(GradientCheckpointingLayer): def __init__(self, config: BlenderbotSmallConfig, layer_idx: Optional[int]=None): pass def forward(self, hidden_states: torch.FloatTensor, attention_mask: torch.FloatTensor, layer_head_mask: torch.FloatTensor, output_attentions: Optional[bool]=False) -> tuple[torch.FloatTensor, Optional[torch.FloatTensor]]: ''' Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. ''' pass
3
1
33
3
25
6
2
0.22
1
3
1
0
2
9
2
12
68
7
50
22
41
11
32
16
29
3
1
1
4
936
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py
transformers.models.blenderbot_small.modeling_blenderbot_small.BlenderbotSmallForCausalLM
from typing import Callable, Optional, Union from ...utils import auto_docstring, is_torch_flex_attn_available, is_torchdynamo_compiling, logging from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput from torch import nn from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache import torch from ...generation import GenerationMixin from torch.nn import CrossEntropyLoss class BlenderbotSmallForCausalLM(BlenderbotSmallPreTrainedModel, GenerationMixin): _tied_weights_keys = ['lm_head.weight'] def __init__(self, config): config.is_decoder = True config.is_encoder_decoder = False super().__init__(config) self.model = BlenderbotSmallDecoderWrapper(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens def set_input_embeddings(self, value): self.model.decoder.embed_tokens = value def set_decoder(self, decoder): self.model.decoder = decoder def get_decoder(self): return self.model.decoder @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple, CausalLMOutputWithCrossAttentions]: """ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from transformers import AutoTokenizer, BlenderbotSmallForCausalLM >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M") >>> model = BlenderbotSmallForCausalLM.from_pretrained("facebook/blenderbot_small-90M", add_cross_attention=False) >>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder." >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> expected_shape = [1, inputs.input_ids.shape[-1], model.config.vocab_size] >>> list(logits.shape) == expected_shape True ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.model.decoder(input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position) logits = self.lm_head(outputs[0]) loss = None if labels is not None: labels = labels.to(logits.device) loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithCrossAttentions(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions)
class BlenderbotSmallForCausalLM(BlenderbotSmallPreTrainedModel, GenerationMixin): def __init__(self, config): pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass def set_decoder(self, decoder): pass def get_decoder(self): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple, CausalLMOutputWithCrossAttentions]: ''' cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from transformers import AutoTokenizer, BlenderbotSmallForCausalLM >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M") >>> model = BlenderbotSmallForCausalLM.from_pretrained("facebook/blenderbot_small-90M", add_cross_attention=False) >>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder." >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> expected_shape = [1, inputs.input_ids.shape[-1], model.config.vocab_size] >>> list(logits.shape) == expected_shape True ```''' pass
8
1
19
3
9
8
2
0.84
2
6
2
0
8
2
9
11
186
33
83
37
56
70
42
20
32
7
2
1
16
937
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py
transformers.models.blenderbot_small.modeling_blenderbot_small.BlenderbotSmallForConditionalGeneration
from ...generation import GenerationMixin from torch.nn import CrossEntropyLoss import torch from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from .configuration_blenderbot_small import BlenderbotSmallConfig from torch import nn from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput from ...utils import auto_docstring, is_torch_flex_attn_available, is_torchdynamo_compiling, logging from typing import Callable, Optional, Union @auto_docstring(custom_intro='\n The BlenderbotSmall Model with a language modeling head. Can be used for summarization.\n ') class BlenderbotSmallForConditionalGeneration(BlenderbotSmallPreTrainedModel, GenerationMixin): base_model_prefix = 'model' _keys_to_ignore_on_load_missing = ['final_logits_bias'] _tied_weights_keys = ['decoder.embed_tokens.weight', 'encoder.embed_tokens.weight', 'lm_head.weight'] def __init__(self, config: BlenderbotSmallConfig): super().__init__(config) self.model = BlenderbotSmallModel(config) self.register_buffer('final_logits_bias', torch.zeros((1, self.model.shared.num_embeddings))) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) self.post_init() def get_encoder(self): return self.model.get_encoder() def get_decoder(self): return self.model.get_decoder() def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int]=None, mean_resizing: bool=True) -> nn.Embedding: new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing) self._resize_final_logits_bias(new_embeddings.weight.shape[0]) return new_embeddings def _resize_final_logits_bias(self, new_num_tokens: int) -> None: old_num_tokens = self.final_logits_bias.shape[-1] if new_num_tokens <= old_num_tokens: new_bias = self.final_logits_bias[:, :new_num_tokens] else: extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device) new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1) self.register_buffer('final_logits_bias', new_bias) @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[Union[tuple, BaseModelOutput]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.Tensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple[torch.FloatTensor], Seq2SeqLMOutput]: """ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) BlenderbotSmall uses the `bos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example Conversation: ```python >>> from transformers import AutoTokenizer, BlenderbotSmallForConditionalGeneration >>> mname = "facebook/blenderbot_small-90M" >>> model = BlenderbotSmallForConditionalGeneration.from_pretrained(mname) >>> tokenizer = AutoTokenizer.from_pretrained(mname) >>> UTTERANCE = "My friends are cool but they eat too many carbs." >>> print("Human: ", UTTERANCE) Human: My friends are cool but they eat too many carbs. >>> inputs = tokenizer([UTTERANCE], return_tensors="pt") >>> reply_ids = model.generate(**inputs) >>> print("Bot: ", tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0]) Bot: what kind of carbs do they eat? i don't know much about carbs. >>> REPLY = "I'm not sure" >>> print("Human: ", REPLY) Human: I'm not sure >>> NEXT_UTTERANCE = ( ... "My friends are cool but they eat too many carbs.__end__ __start__what kind of carbs do they eat? " ... "i don't know much about carbs__end__ " ... "__start__ I'm not sure." ... ) >>> inputs = tokenizer([NEXT_UTTERANCE], return_tensors="pt") >>> next_reply_ids = model.generate(**inputs) >>> print("Bot: ", tokenizer.batch_decode(next_reply_ids, skip_special_tokens=True)[0]) Bot: they eat a lot of carbs. carbs are high in fat, protein, and fats. ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: if use_cache: logger.warning('The `use_cache` argument is changed to `False` since `labels` is provided.') use_cache = False if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) outputs = self.model(input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position) lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (lm_logits,) + outputs[1:] return (masked_lm_loss,) + output if masked_lm_loss is not None else output return Seq2SeqLMOutput(loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions)
@auto_docstring(custom_intro='\n The BlenderbotSmall Model with a language modeling head. Can be used for summarization.\n ') class BlenderbotSmallForConditionalGeneration(BlenderbotSmallPreTrainedModel, GenerationMixin): def __init__(self, config: BlenderbotSmallConfig): pass def get_encoder(self): pass def get_decoder(self): pass def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int]=None, mean_resizing: bool=True) -> nn.Embedding: pass def _resize_final_logits_bias(self, new_num_tokens: int) -> None: pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[Union[tuple, BaseModelOutput]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.Tensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple[torch.FloatTensor], Seq2SeqLMOutput]: ''' decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) BlenderbotSmall uses the `bos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example Conversation: ```python >>> from transformers import AutoTokenizer, BlenderbotSmallForConditionalGeneration >>> mname = "facebook/blenderbot_small-90M" >>> model = BlenderbotSmallForConditionalGeneration.from_pretrained(mname) >>> tokenizer = AutoTokenizer.from_pretrained(mname) >>> UTTERANCE = "My friends are cool but they eat too many carbs." >>> print("Human: ", UTTERANCE) Human: My friends are cool but they eat too many carbs. >>> inputs = tokenizer([UTTERANCE], return_tensors="pt") >>> reply_ids = model.generate(**inputs) >>> print("Bot: ", tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0]) Bot: what kind of carbs do they eat? i don't know much about carbs. >>> REPLY = "I'm not sure" >>> print("Human: ", REPLY) Human: I'm not sure >>> NEXT_UTTERANCE = ( ... "My friends are cool but they eat too many carbs.__end__ __start__what kind of carbs do they eat? " ... "i don't know much about carbs__end__ " ... "__start__ I'm not sure." ... ) >>> inputs = tokenizer([NEXT_UTTERANCE], return_tensors="pt") >>> next_reply_ids = model.generate(**inputs) >>> print("Bot: ", tokenizer.batch_decode(next_reply_ids, skip_special_tokens=True)[0]) Bot: they eat a lot of carbs. carbs are high in fat, protein, and fats. ``` ''' pass
9
1
13
1
11
1
2
0.08
2
9
4
0
8
3
9
11
133
16
108
49
74
9
52
26
42
8
2
2
18
938
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py
transformers.models.blenderbot_small.modeling_blenderbot_small.BlenderbotSmallLearnedPositionalEmbedding
import torch from torch import nn from typing import Callable, Optional, Union class BlenderbotSmallLearnedPositionalEmbedding(nn.Embedding): """ This module learns positional embeddings up to a fixed maximum size. """ def __init__(self, num_embeddings: int, embedding_dim: int): super().__init__(num_embeddings, embedding_dim) def forward(self, input_ids_shape: torch.Size, past_key_values_length: int=0, position_ids: Optional[torch.Tensor]=None): """`input_ids_shape` is expected to be [bsz x seqlen].""" if position_ids is None: bsz, seq_len = input_ids_shape[:2] position_ids = torch.arange(past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device) return super().forward(position_ids)
class BlenderbotSmallLearnedPositionalEmbedding(nn.Embedding): ''' This module learns positional embeddings up to a fixed maximum size. ''' def __init__(self, num_embeddings: int, embedding_dim: int): pass def forward(self, input_ids_shape: torch.Size, past_key_values_length: int=0, position_ids: Optional[torch.Tensor]=None): '''`input_ids_shape` is expected to be [bsz x seqlen].''' pass
3
2
5
0
4
1
1
0.44
1
2
0
0
2
0
2
2
15
2
9
5
6
4
7
5
4
1
1
0
2
939
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py
transformers.models.blenderbot_small.modeling_blenderbot_small.BlenderbotSmallModel
import torch from ...utils import auto_docstring, is_torch_flex_attn_available, is_torchdynamo_compiling, logging from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from torch import nn from .configuration_blenderbot_small import BlenderbotSmallConfig from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput from typing import Callable, Optional, Union @auto_docstring class BlenderbotSmallModel(BlenderbotSmallPreTrainedModel): _tied_weights_keys = ['decoder.embed_tokens.weight', 'encoder.embed_tokens.weight'] def __init__(self, config: BlenderbotSmallConfig): super().__init__(config) padding_idx, vocab_size = (config.pad_token_id, config.vocab_size) self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx) self.encoder = BlenderbotSmallEncoder(config, self.shared) self.decoder = BlenderbotSmallDecoder(config, self.shared) self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, value): self.shared = value self.encoder.embed_tokens = self.shared self.decoder.embed_tokens = self.shared def get_encoder(self): return self.encoder @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[Union[tuple, BaseModelOutput]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.Tensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple[torch.FloatTensor], Seq2SeqModelOutput]: """ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) BlenderbotSmall uses the `bos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. Example: ```python >>> from transformers import AutoTokenizer, BlenderbotSmallModel >>> model = BlenderbotSmallModel.from_pretrained("facebook/blenderbot_small-90M") >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M") >>> inputs = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt") >>> decoder_inputs = tokenizer("Studies show that", return_tensors="pt") # Batch size 1 >>> outputs = model(input_ids=inputs.input_ids, decoder_input_ids=decoder_inputs.input_ids) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) [1, 3, 512] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if encoder_outputs is None: encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)): encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None) decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions)
@auto_docstring class BlenderbotSmallModel(BlenderbotSmallPreTrainedModel): def __init__(self, config: BlenderbotSmallConfig): pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass def get_encoder(self): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[Union[tuple, BaseModelOutput]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.Tensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple[torch.FloatTensor], Seq2SeqModelOutput]: ''' decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) BlenderbotSmall uses the `bos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. Example: ```python >>> from transformers import AutoTokenizer, BlenderbotSmallModel >>> model = BlenderbotSmallModel.from_pretrained("facebook/blenderbot_small-90M") >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M") >>> inputs = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt") >>> decoder_inputs = tokenizer("Studies show that", return_tensors="pt") # Batch size 1 >>> outputs = model(input_ids=inputs.input_ids, decoder_input_ids=decoder_inputs.input_ids) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) [1, 3, 512] ```''' pass
8
1
19
2
14
3
3
0.2
1
8
5
0
6
3
6
8
122
18
87
31
61
17
31
13
24
10
2
1
15
940
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py
transformers.models.blenderbot_small.modeling_blenderbot_small.BlenderbotSmallPreTrainedModel
import torch from torch import nn from ...modeling_attn_mask_utils import AttentionMaskConverter, _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...utils import auto_docstring, is_torch_flex_attn_available, is_torchdynamo_compiling, logging from .configuration_blenderbot_small import BlenderbotSmallConfig from typing import Callable, Optional, Union @auto_docstring class BlenderbotSmallPreTrainedModel(PreTrainedModel): config: BlenderbotSmallConfig base_model_prefix = 'model' supports_gradient_checkpointing = True _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = True _can_compile_fullgraph = True def _init_weights(self, module): std = self.config.init_std if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.weight.data.fill_(1.0) module.bias.data.zero_() @property def dummy_inputs(self): pad_token = self.config.pad_token_id input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device) dummy_inputs = {'attention_mask': input_ids.ne(pad_token), 'input_ids': input_ids, 'decoder_input_ids': input_ids} return dummy_inputs def _update_full_mask(self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor): if attention_mask is not None: if 'flash' in self.config._attn_implementation: attention_mask = attention_mask if 0 in attention_mask else None elif self.config._attn_implementation == 'sdpa': attention_mask = _prepare_4d_attention_mask_for_sdpa(attention_mask, inputs_embeds.dtype) elif self.config._attn_implementation == 'flex_attention': if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask, is_causal=False) else: attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) return attention_mask def _update_causal_mask(self, attention_mask: Optional[Union[torch.Tensor, 'BlockMask']], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache): if self.config._attn_implementation == 'flex_attention': if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask) elif attention_mask is None: attention_mask = make_flex_block_causal_mask(torch.ones(size=(input_tensor.shape[0], input_tensor.shape[1]), device=attention_mask.device)) return attention_mask if 'flash' in self.config._attn_implementation: if attention_mask is not None and (attention_mask == 0.0).any(): return attention_mask return None past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_compilable_cache = past_key_values.is_compileable if past_key_values is not None else False if self.config._attn_implementation == 'sdpa' and (not using_compilable_cache): if AttentionMaskConverter._ignore_causal_mask_sdpa(attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training): return None dtype = input_tensor.dtype sequence_length = input_tensor.shape[1] if using_compilable_cache: target_length = past_key_values.get_max_cache_shape() else: target_length = attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, cache_position=cache_position, batch_size=input_tensor.shape[0]) if self.config._attn_implementation == 'sdpa' and attention_mask is not None and (attention_mask.device.type in ['cuda', 'xpu', 'npu']): min_dtype = torch.finfo(dtype).min causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @staticmethod def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. """ if attention_mask is not None and attention_mask.dim() == 4: causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device) padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype) return causal_mask def _update_cross_attn_mask(self, encoder_hidden_states: Union[torch.Tensor, None], encoder_attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor): if encoder_hidden_states is not None and encoder_attention_mask is not None: if 'flash' in self.config._attn_implementation: encoder_attention_mask = encoder_attention_mask if 0 in encoder_attention_mask else None elif self.config._attn_implementation == 'sdpa': encoder_attention_mask = _prepare_4d_attention_mask_for_sdpa(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) elif self.config._attn_implementation == 'flex_attention': if isinstance(encoder_attention_mask, torch.Tensor): encoder_attention_mask = make_flex_block_causal_mask(encoder_attention_mask, query_length=input_shape[-1], is_causal=False) else: encoder_attention_mask = _prepare_4d_attention_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) return encoder_attention_mask
@auto_docstring class BlenderbotSmallPreTrainedModel(PreTrainedModel): def _init_weights(self, module): pass @property def dummy_inputs(self): pass def _update_full_mask(self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor): pass def _update_causal_mask(self, attention_mask: Optional[Union[torch.Tensor, 'BlockMask']], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache): pass @staticmethod def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs): ''' Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. ''' pass def _update_cross_attn_mask(self, encoder_hidden_states: Union[torch.Tensor, None], encoder_attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor): pass
10
1
10
0
10
0
3
0
1
0
0
6
2
0
2
2
26
2
24
11
20
0
18
10
15
5
1
2
6
941
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blenderbot_small/tokenization_blenderbot_small.py
transformers.models.blenderbot_small.tokenization_blenderbot_small.BlenderbotSmallTokenizer
import os import json import regex as re from ...tokenization_utils import PreTrainedTokenizer from typing import Optional class BlenderbotSmallTokenizer(PreTrainedTokenizer): """ Constructs a Blenderbot-90M tokenizer based on BPE (Byte-Pair-Encoding) This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to the superclass for more information regarding methods. Args: vocab_file (`str`): File containing the vocabulary. merges_file (`str`): Path to the merges file. bos_token (`str`, *optional*, defaults to `"__start__"`): The beginning of sentence token. eos_token (`str`, *optional*, defaults to `"__end__"`): The end of sentence token. unk_token (`str`, *optional*, defaults to `"__unk__"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"__null__"`): The token used for padding, for example when batching sequences of different lengths. kwargs (*optional*): Additional keyword arguments passed along to [`PreTrainedTokenizer`] """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] def __init__(self, vocab_file, merges_file, bos_token='__start__', eos_token='__end__', unk_token='__unk__', pad_token='__null__', **kwargs): with open(vocab_file, encoding='utf-8') as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for k, v in self.encoder.items()} with open(merges_file, encoding='utf-8') as merges_handle: merges = merges_handle.read().split('\n')[1:-1] merges = [tuple(merge.split()) for merge in merges] self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {} super().__init__(unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, **kwargs) @property def vocab_size(self) -> int: return len(self.encoder) def get_vocab(self) -> dict: return dict(self.encoder, **self.added_tokens_encoder) def bpe(self, token: str) -> str: if token in self.cache: return self.cache[token] token = re.sub('([.,!?()])', ' \\1', token) token = re.sub("(')", ' \\1 ', token) token = re.sub('\\s{2,}', ' ', token) if '\n' in token: token = token.replace('\n', ' __newln__') tokens = token.split(' ') words = [] for token in tokens: if not len(token): continue token = token.lower() word = tuple(token) word = tuple(list(word[:-1]) + [word[-1] + '</w>']) pairs = get_pairs(word) if not pairs: words.append(token) continue while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf'))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) new_word.extend(word[i:j]) i = j except ValueError: new_word.extend(word[i:]) break if word[i] == first and i < len(word) - 1 and (word[i + 1] == second): new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = '@@ '.join(word) word = word[:-4] self.cache[token] = word words.append(word) return ' '.join(words) def _tokenize(self, text: str) -> list[str]: """Split a string into tokens using BPE.""" split_tokens = [] words = re.findall('\\S+\\n?', text) for token in words: split_tokens.extend(list(self.bpe(token).split(' '))) return split_tokens def _convert_token_to_id(self, token: str) -> int: """Converts a token to an id using the vocab.""" token = token.lower() return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index: int) -> str: """Converts an index (integer) in a token (str) using the vocab.""" return self.decoder.get(index, self.unk_token) def convert_tokens_to_string(self, tokens: list[str]) -> str: """Converts a sequence of tokens in a single string.""" out_string = ' '.join(tokens).replace('@@ ', '').strip() return out_string def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) merge_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']) with open(vocab_file, 'w', encoding='utf-8') as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + '\n') index = 0 with open(merge_file, 'w', encoding='utf-8') as writer: writer.write('#version: 0.2\n') for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning(f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!') index = token_index writer.write(' '.join(bpe_tokens) + '\n') index += 1 return (vocab_file, merge_file)
class BlenderbotSmallTokenizer(PreTrainedTokenizer): ''' Constructs a Blenderbot-90M tokenizer based on BPE (Byte-Pair-Encoding) This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to the superclass for more information regarding methods. Args: vocab_file (`str`): File containing the vocabulary. merges_file (`str`): Path to the merges file. bos_token (`str`, *optional*, defaults to `"__start__"`): The beginning of sentence token. eos_token (`str`, *optional*, defaults to `"__end__"`): The end of sentence token. unk_token (`str`, *optional*, defaults to `"__unk__"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"__null__"`): The token used for padding, for example when batching sequences of different lengths. kwargs (*optional*): Additional keyword arguments passed along to [`PreTrainedTokenizer`] ''' def __init__(self, vocab_file, merges_file, bos_token='__start__', eos_token='__end__', unk_token='__unk__', pad_token='__null__', **kwargs): pass @property def vocab_size(self) -> int: pass def get_vocab(self) -> dict: pass def bpe(self, token: str) -> str: pass def _tokenize(self, text: str) -> list[str]: '''Split a string into tokens using BPE.''' pass def _convert_token_to_id(self, token: str) -> int: '''Converts a token to an id using the vocab.''' pass def _convert_id_to_token(self, index: int) -> str: '''Converts an index (integer) in a token (str) using the vocab.''' pass def convert_tokens_to_string(self, tokens: list[str]) -> str: '''Converts a sequence of tokens in a single string.''' pass def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: pass
11
5
14
1
13
1
3
0.22
1
10
0
0
9
4
9
98
167
24
118
48
98
26
99
34
89
12
3
4
26
942
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blenderbot_small/tokenization_blenderbot_small_fast.py
transformers.models.blenderbot_small.tokenization_blenderbot_small_fast.BlenderbotSmallTokenizerFast
from typing import Optional from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_blenderbot_small import BlenderbotSmallTokenizer from tokenizers import ByteLevelBPETokenizer class BlenderbotSmallTokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" BlenderbotSmall tokenizer (backed by HuggingFace's *tokenizers* library). Args: vocab_file (`str`): Path to the vocabulary file. """ vocab_files_names = VOCAB_FILES_NAMES slow_tokenizer_class = BlenderbotSmallTokenizer def __init__(self, vocab_file=None, merges_file=None, unk_token='<|endoftext|>', bos_token='<|endoftext|>', eos_token='<|endoftext|>', add_prefix_space=False, trim_offsets=True, **kwargs): super().__init__(ByteLevelBPETokenizer(vocab=vocab_file, merges=merges_file, add_prefix_space=add_prefix_space, trim_offsets=trim_offsets), bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, **kwargs) self.add_prefix_space = add_prefix_space def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): output = [self.bos_token_id] + token_ids_0 + [self.eos_token_id] if token_ids_1 is None: return output return output + [self.eos_token_id] + token_ids_1 + [self.eos_token_id] def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. BlenderbotSmall does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
class BlenderbotSmallTokenizerFast(PreTrainedTokenizerFast): ''' Construct a "fast" BlenderbotSmall tokenizer (backed by HuggingFace's *tokenizers* library). Args: vocab_file (`str`): Path to the vocabulary file. ''' def __init__(self, vocab_file=None, merges_file=None, unk_token='<|endoftext|>', bos_token='<|endoftext|>', eos_token='<|endoftext|>', add_prefix_space=False, trim_offsets=True, **kwargs): pass def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): pass def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: ''' Create a mask from the two sequences passed to be used in a sequence-pair classification task. BlenderbotSmall does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of zeros. ''' pass
4
2
17
1
12
4
2
0.42
1
3
0
0
3
1
3
91
66
9
40
22
24
17
17
10
13
2
3
1
5
943
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip/configuration_blip.py
transformers.models.blip.configuration_blip.BlipConfig
from ...configuration_utils import PretrainedConfig class BlipConfig(PretrainedConfig): """ [`BlipConfig`] is the configuration class to store the configuration of a [`BlipModel`]. It is used to instantiate a BLIP model according to the specified arguments, defining the text model and vision model configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the BLIP-base [Salesforce/blip-vqa-base](https://huggingface.co/Salesforce/blip-vqa-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: text_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`BlipTextConfig`]. vision_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`BlipVisionConfig`]. projection_dim (`int`, *optional*, defaults to 512): Dimensionality of text and vision projection layers. logit_scale_init_value (`float`, *optional*, defaults to 2.6592): The initial value of the *logit_scale* parameter. Default is used as per the original BLIP implementation. image_text_hidden_size (`int`, *optional*, defaults to 256): Dimensionality of the hidden state of the image-text fusion layer. label_smoothing (float, optional, *optional*, defaults to 0.0): A float in [0.0, 1.0]. Specifies the amount of smoothing when computing the loss, where 0.0 means no smoothing. The targets become a mixture of the original ground truth and a uniform distribution as described in `Rethinking the Inception Architecture for Computer Vision <https://huggingface.co/papers/1512.00567>`__. Default: :math:`0.0`. kwargs (*optional*): Dictionary of keyword arguments. Example: ```python >>> from transformers import BlipConfig, BlipModel >>> # Initializing a BlipConfig with Salesforce/blip-vqa-base style configuration >>> configuration = BlipConfig() >>> # Initializing a BlipPModel (with random weights) from the Salesforce/blip-vqa-base style configuration >>> model = BlipModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config >>> # We can also initialize a BlipConfig from a BlipTextConfig and a BlipVisionConfig >>> # Initializing a BLIPText and BLIPVision configuration >>> config_text = BlipTextConfig() >>> config_vision = BlipVisionConfig() >>> config = BlipConfig.from_text_vision_configs(config_text, config_vision) ```""" model_type = 'blip' sub_configs = {'text_config': BlipTextConfig, 'vision_config': BlipVisionConfig} def __init__(self, text_config=None, vision_config=None, projection_dim=512, logit_scale_init_value=2.6592, image_text_hidden_size=256, label_smoothing=0.0, **kwargs): super().__init__(**kwargs) if text_config is None: text_config = {} logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.') if vision_config is None: vision_config = {} logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.') self.text_config = BlipTextConfig(**text_config) self.vision_config = BlipVisionConfig(**vision_config) self.text_config.encoder_hidden_size = self.vision_config.hidden_size self.projection_dim = projection_dim self.logit_scale_init_value = logit_scale_init_value self.initializer_factor = 1.0 self.initializer_range = 0.02 self.image_text_hidden_size = image_text_hidden_size self.label_smoothing = label_smoothing
class BlipConfig(PretrainedConfig): ''' [`BlipConfig`] is the configuration class to store the configuration of a [`BlipModel`]. It is used to instantiate a BLIP model according to the specified arguments, defining the text model and vision model configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the BLIP-base [Salesforce/blip-vqa-base](https://huggingface.co/Salesforce/blip-vqa-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: text_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`BlipTextConfig`]. vision_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`BlipVisionConfig`]. projection_dim (`int`, *optional*, defaults to 512): Dimensionality of text and vision projection layers. logit_scale_init_value (`float`, *optional*, defaults to 2.6592): The initial value of the *logit_scale* parameter. Default is used as per the original BLIP implementation. image_text_hidden_size (`int`, *optional*, defaults to 256): Dimensionality of the hidden state of the image-text fusion layer. label_smoothing (float, optional, *optional*, defaults to 0.0): A float in [0.0, 1.0]. Specifies the amount of smoothing when computing the loss, where 0.0 means no smoothing. The targets become a mixture of the original ground truth and a uniform distribution as described in `Rethinking the Inception Architecture for Computer Vision <https://huggingface.co/papers/1512.00567>`__. Default: :math:`0.0`. kwargs (*optional*): Dictionary of keyword arguments. Example: ```python >>> from transformers import BlipConfig, BlipModel >>> # Initializing a BlipConfig with Salesforce/blip-vqa-base style configuration >>> configuration = BlipConfig() >>> # Initializing a BlipPModel (with random weights) from the Salesforce/blip-vqa-base style configuration >>> model = BlipModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config >>> # We can also initialize a BlipConfig from a BlipTextConfig and a BlipVisionConfig >>> # Initializing a BLIPText and BLIPVision configuration >>> config_text = BlipTextConfig() >>> config_vision = BlipVisionConfig() >>> config = BlipConfig.from_text_vision_configs(config_text, config_vision) ```''' def __init__(self, text_config=None, vision_config=None, projection_dim=512, logit_scale_init_value=2.6592, image_text_hidden_size=256, label_smoothing=0.0, **kwargs): pass
2
1
21
4
14
3
2
1.41
1
3
2
0
1
8
2
2
97
20
32
23
19
45
22
13
19
3
1
1
4
944
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip/configuration_blip.py
transformers.models.blip.configuration_blip.BlipTextConfig
from ...configuration_utils import PretrainedConfig class BlipTextConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`BlipTextModel`]. It is used to instantiate a BLIP text model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the `BlipText` used by the [base architectures](https://huggingface.co/Salesforce/blip-vqa-base). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30524): Vocabulary size of the `Blip` text model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`BlipModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. encoder_hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers from the vision model. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. bos_token_id (`int`, *optional*, defaults to 30522): The id of the `beginning-of-sequence` token. eos_token_id (`int`, *optional*, defaults to 2): The id of the `end-of-sequence` token. pad_token_id (`int`, *optional*, defaults to 0): The id of the `padding` token. sep_token_id (`int`, *optional*, defaults to 102): The id of the `separator` token. is_decoder (`bool`, *optional*, defaults to `True`): Whether the model is used as a decoder. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). label_smoothing (float, *optional*): A float in [0.0, 1.0]. Specifies the amount of smoothing when computing the loss, where 0.0 means no smoothing. The targets become a mixture of the original ground truth and a uniform distribution as described in `Rethinking the Inception Architecture for Computer Vision <https://huggingface.co/papers/1512.00567>`__. Default: :math:`0.0`. Example: ```python >>> from transformers import BlipTextConfig, BlipTextModel >>> # Initializing a BlipTextConfig with Salesforce/blip-vqa-base style configuration >>> configuration = BlipTextConfig() >>> # Initializing a BlipTextModel (with random weights) from the Salesforce/blip-vqa-base style configuration >>> model = BlipTextModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'blip_text_model' base_config_key = 'text_config' def __init__(self, vocab_size=30524, hidden_size=768, encoder_hidden_size=768, intermediate_size=3072, projection_dim=768, num_hidden_layers=12, num_attention_heads=8, max_position_embeddings=512, hidden_act='gelu', layer_norm_eps=1e-12, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, bos_token_id=30522, eos_token_id=2, pad_token_id=0, sep_token_id=102, is_decoder=True, use_cache=True, label_smoothing=0.0, **kwargs): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, sep_token_id=sep_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.encoder_hidden_size = encoder_hidden_size self.intermediate_size = intermediate_size self.projection_dim = projection_dim self.hidden_dropout_prob = hidden_dropout_prob self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.max_position_embeddings = max_position_embeddings self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.initializer_range = initializer_range self.attention_probs_dropout_prob = attention_probs_dropout_prob self.is_decoder = is_decoder self.use_cache = use_cache self.label_smoothing = label_smoothing
class BlipTextConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`BlipTextModel`]. It is used to instantiate a BLIP text model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the `BlipText` used by the [base architectures](https://huggingface.co/Salesforce/blip-vqa-base). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30524): Vocabulary size of the `Blip` text model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`BlipModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. encoder_hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers from the vision model. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. bos_token_id (`int`, *optional*, defaults to 30522): The id of the `beginning-of-sequence` token. eos_token_id (`int`, *optional*, defaults to 2): The id of the `end-of-sequence` token. pad_token_id (`int`, *optional*, defaults to 0): The id of the `padding` token. sep_token_id (`int`, *optional*, defaults to 102): The id of the `separator` token. is_decoder (`bool`, *optional*, defaults to `True`): Whether the model is used as a decoder. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). label_smoothing (float, *optional*): A float in [0.0, 1.0]. Specifies the amount of smoothing when computing the loss, where 0.0 means no smoothing. The targets become a mixture of the original ground truth and a uniform distribution as described in `Rethinking the Inception Architecture for Computer Vision <https://huggingface.co/papers/1512.00567>`__. Default: :math:`0.0`. Example: ```python >>> from transformers import BlipTextConfig, BlipTextModel >>> # Initializing a BlipTextConfig with Salesforce/blip-vqa-base style configuration >>> configuration = BlipTextConfig() >>> # Initializing a BlipTextModel (with random weights) from the Salesforce/blip-vqa-base style configuration >>> model = BlipTextModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, vocab_size=30524, hidden_size=768, encoder_hidden_size=768, intermediate_size=3072, projection_dim=768, num_hidden_layers=12, num_attention_heads=8, max_position_embeddings=512, hidden_act='gelu', layer_norm_eps=1e-12, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, bos_token_id=30522, eos_token_id=2, pad_token_id=0, sep_token_id=102, is_decoder=True, use_cache=True, label_smoothing=0.0, **kwargs): pass
2
1
48
1
47
0
1
1.22
1
1
0
0
1
16
1
1
122
11
50
43
25
61
21
20
19
1
1
0
1
945
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip/configuration_blip.py
transformers.models.blip.configuration_blip.BlipVisionConfig
from ...configuration_utils import PretrainedConfig class BlipVisionConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`BlipVisionModel`]. It is used to instantiate a BLIP vision model according to the specified arguments, defining the model architecture. Instantiating a configuration defaults will yield a similar configuration to that of the Blip-base [Salesforce/blip-vqa-base](https://huggingface.co/Salesforce/blip-vqa-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. image_size (`int`, *optional*, defaults to 384): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 1e-10): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. Example: ```python >>> from transformers import BlipVisionConfig, BlipVisionModel >>> # Initializing a BlipVisionConfig with Salesforce/blip-vqa-base style configuration >>> configuration = BlipVisionConfig() >>> # Initializing a BlipVisionModel (with random weights) from the Salesforce/blip-vqa-base style configuration >>> model = BlipVisionModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'blip_vision_model' base_config_key = 'vision_config' def __init__(self, hidden_size=768, intermediate_size=3072, projection_dim=512, num_hidden_layers=12, num_attention_heads=12, image_size=384, patch_size=16, hidden_act='gelu', layer_norm_eps=1e-05, attention_dropout=0.0, initializer_range=1e-10, **kwargs): super().__init__(**kwargs) self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.patch_size = patch_size self.image_size = image_size self.initializer_range = initializer_range self.attention_dropout = attention_dropout self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act
class BlipVisionConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`BlipVisionModel`]. It is used to instantiate a BLIP vision model according to the specified arguments, defining the model architecture. Instantiating a configuration defaults will yield a similar configuration to that of the Blip-base [Salesforce/blip-vqa-base](https://huggingface.co/Salesforce/blip-vqa-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. image_size (`int`, *optional*, defaults to 384): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 1e-10): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. Example: ```python >>> from transformers import BlipVisionConfig, BlipVisionModel >>> # Initializing a BlipVisionConfig with Salesforce/blip-vqa-base style configuration >>> configuration = BlipVisionConfig() >>> # Initializing a BlipVisionModel (with random weights) from the Salesforce/blip-vqa-base style configuration >>> model = BlipVisionModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, hidden_size=768, intermediate_size=3072, projection_dim=512, num_hidden_layers=12, num_attention_heads=12, image_size=384, patch_size=16, hidden_act='gelu', layer_norm_eps=1e-05, attention_dropout=0.0, initializer_range=1e-10, **kwargs): pass
2
1
28
1
27
0
1
1.3
1
1
0
0
1
11
1
1
80
11
30
29
14
39
16
15
14
1
1
0
1
946
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip/image_processing_blip.py
transformers.models.blip.image_processing_blip.BlipImageProcessor
from typing import Optional, Union from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging from ...image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_flat_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict import numpy as np from ...image_transforms import convert_to_rgb, resize, to_channel_dimension_format class BlipImageProcessor(BaseImageProcessor): """ Constructs a BLIP image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`dict`, *optional*, defaults to `{"height": 384, "width": 384}`): Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`): Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. Can be overridden by the `resample` parameter in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Only has an effect if `do_rescale` is set to `True`. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. Can be overridden by the `image_std` parameter in the `preprocess` method. do_convert_rgb (`bool`, *optional*, defaults to `True`): Whether to convert the image to RGB. """ model_input_names = ['pixel_values'] def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_convert_rgb: bool=True, **kwargs) -> None: super().__init__(**kwargs) size = size if size is not None else {'height': 384, 'width': 384} size = get_size_dict(size, default_to_square=True) self.do_resize = do_resize self.size = size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD self.do_convert_rgb = do_convert_rgb def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: """ Resize an image to `(size["height"], size["width"])`. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]`): Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`. data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. Returns: `np.ndarray`: The resized image. """ size = get_size_dict(size) if 'height' not in size or 'width' not in size: raise ValueError(f'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}') output_size = (size['height'], size['width']) return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs) @filter_out_non_signature_kwargs() def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, return_tensors: Optional[Union[str, TensorType]]=None, do_convert_rgb: Optional[bool]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Controls the size of the image after `resize`. The shortest edge of the image is resized to `size["shortest_edge"]` whilst preserving the aspect ratio. If the longest edge of this resized image is > `int(size["shortest_edge"] * (1333 / 800))`, then the image is resized again to make the longest edge equal to `int(size["shortest_edge"] * (1333 / 800))`. resample (`PILImageResampling`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean to normalize the image by if `do_normalize` is set to `True`. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to normalize the image by if `do_normalize` is set to `True`. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the image to RGB. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb size = size if size is not None else self.size size = get_size_dict(size, default_to_square=False) images = self.fetch_images(images) images = make_flat_list_of_images(images) if not valid_images(images): raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor') validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample) if do_convert_rgb: images = [convert_to_rgb(image) for image in images] images = [to_numpy_array(image) for image in images] if do_rescale and is_scaled_image(images[0]): logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.') if input_data_format is None: input_data_format = infer_channel_dimension_format(images[0]) if do_resize: images = [self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) for image in images] if do_rescale: images = [self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images] if do_normalize: images = [self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images] images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images] encoded_outputs = BatchFeature(data={'pixel_values': images}, tensor_type=return_tensors) return encoded_outputs
class BlipImageProcessor(BaseImageProcessor): ''' Constructs a BLIP image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`dict`, *optional*, defaults to `{"height": 384, "width": 384}`): Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`): Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. Can be overridden by the `resample` parameter in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Only has an effect if `do_rescale` is set to `True`. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. Can be overridden by the `image_std` parameter in the `preprocess` method. do_convert_rgb (`bool`, *optional*, defaults to `True`): Whether to convert the image to RGB. ''' def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_convert_rgb: bool=True, **kwargs) -> None: pass def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: ''' Resize an image to `(size["height"], size["width"])`. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]`): Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`. data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. Returns: `np.ndarray`: The resized image. ''' pass @filter_out_non_signature_kwargs() def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, return_tensors: Optional[Union[str, TensorType]]=None, do_convert_rgb: Optional[bool]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image: ''' Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Controls the size of the image after `resize`. The shortest edge of the image is resized to `size["shortest_edge"]` whilst preserving the aspect ratio. If the longest edge of this resized image is > `int(size["shortest_edge"] * (1333 / 800))`, then the image is resized again to make the longest edge equal to `int(size["shortest_edge"] * (1333 / 800))`. resample (`PILImageResampling`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean to normalize the image by if `do_normalize` is set to `True`. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to normalize the image by if `do_normalize` is set to `True`. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the image to RGB. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. ''' pass
5
3
69
5
39
24
8
0.88
1
8
2
0
3
9
3
23
248
21
121
52
81
106
52
16
48
17
3
1
23
947
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip/image_processing_blip_fast.py
transformers.models.blip.image_processing_blip_fast.BlipImageProcessorFast
from ...image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, PILImageResampling from ...utils import auto_docstring from ...image_processing_utils_fast import BaseImageProcessorFast @auto_docstring class BlipImageProcessorFast(BaseImageProcessorFast): resample = PILImageResampling.BICUBIC image_mean = OPENAI_CLIP_MEAN image_std = OPENAI_CLIP_STD size = {'height': 384, 'width': 384} do_resize = True do_rescale = True do_normalize = True do_convert_rgb = True
@auto_docstring class BlipImageProcessorFast(BaseImageProcessorFast): pass
2
0
0
0
0
0
0
0.22
1
0
0
0
0
0
0
34
11
0
9
9
8
2
9
9
8
0
4
0
0
948
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip/modeling_blip.py
transformers.models.blip.modeling_blip.BlipAttention
import torch from ...processing_utils import Unpack from typing import Any, Optional, Union from torch import nn from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_int class BlipAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).') self.scale = self.head_dim ** (-0.5) self.dropout = nn.Dropout(config.attention_dropout) self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim) self.projection = nn.Linear(self.embed_dim, self.embed_dim) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor, torch.Tensor]: """Input shape: Batch x Time x Channel""" bsz, tgt_len, embed_dim = hidden_states.size() mixed_qkv = self.qkv(hidden_states).reshape(bsz, tgt_len, 3, self.num_heads, embed_dim // self.num_heads).permute(2, 0, 3, 1, 4) query_states, key_states, value_states = (mixed_qkv[0], mixed_qkv[1], mixed_qkv[2]) attention_scores = torch.matmul(query_states, key_states.transpose(-1, -2)) attention_scores = attention_scores * self.scale attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_states).permute(0, 2, 1, 3) new_context_layer_shape = context_layer.size()[:-2] + (self.embed_dim,) context_layer = context_layer.reshape(new_context_layer_shape) output = self.projection(context_layer) return (output, attention_probs)
class BlipAttention(nn.Module): '''Multi-headed attention from 'Attention Is All You Need' paper''' def __init__(self, config): pass def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): pass def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor, torch.Tensor]: '''Input shape: Batch x Time x Channel''' pass
4
2
21
5
14
2
2
0.16
1
5
0
0
3
8
3
13
67
17
43
26
34
7
31
21
27
3
1
1
6
949
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip/modeling_blip.py
transformers.models.blip.modeling_blip.BlipEncoder
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling from torch import nn from .configuration_blip import BlipConfig, BlipTextConfig, BlipVisionConfig from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_int import torch from ...processing_utils import Unpack from typing import Any, Optional, Union class BlipEncoder(nn.Module): """ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`BlipEncoderLayer`]. Args: config (`BlipConfig`): The corresponding vision configuration for the `BlipEncoder`. """ def __init__(self, config: BlipConfig): super().__init__() self.config = config self.layers = nn.ModuleList([BlipEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False @auto_docstring def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, BaseModelOutput]: hidden_states = inputs_embeds for encoder_layer in self.layers: hidden_states = encoder_layer(hidden_states, attention_mask=attention_mask, **kwargs) return BaseModelOutput(last_hidden_state=hidden_states)
class BlipEncoder(nn.Module): ''' Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`BlipEncoderLayer`]. Args: config (`BlipConfig`): The corresponding vision configuration for the `BlipEncoder`. ''' def __init__(self, config: BlipConfig): pass @auto_docstring def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, BaseModelOutput]: pass
4
1
37
4
24
9
7
0.52
1
9
3
0
2
3
2
12
84
11
48
18
38
25
27
11
24
12
1
2
13
950
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip/modeling_blip.py
transformers.models.blip.modeling_blip.BlipEncoderLayer
from ...modeling_layers import GradientCheckpointingLayer from ...processing_utils import Unpack import torch from torch import nn from .configuration_blip import BlipConfig, BlipTextConfig, BlipVisionConfig from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_int class BlipEncoderLayer(GradientCheckpointingLayer): def __init__(self, config: BlipConfig): super().__init__() self.embed_dim = config.hidden_size self.self_attn = BlipAttention(config) self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = BlipMLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) @auto_docstring def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, **kwargs: Unpack[TransformersKwargs]) -> torch.FloatTensor: residual = hidden_states hidden_states = self.layer_norm1(hidden_states) hidden_states, _ = self.self_attn(hidden_states=hidden_states, head_mask=attention_mask, **kwargs) hidden_states = hidden_states + residual residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = hidden_states + residual return hidden_states
class BlipEncoderLayer(GradientCheckpointingLayer): def __init__(self, config: BlipConfig): pass @auto_docstring def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, **kwargs: Unpack[TransformersKwargs]) -> torch.FloatTensor: pass
4
0
22
3
15
5
2
0.33
1
6
3
0
2
5
2
12
46
6
30
16
22
10
21
11
18
2
1
1
3
951
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip/modeling_blip.py
transformers.models.blip.modeling_blip.BlipForConditionalGeneration
import torch from .configuration_blip import BlipConfig, BlipTextConfig, BlipVisionConfig from typing import Any, Optional, Union from ...processing_utils import Unpack from .modeling_blip_text import BlipTextLMHeadModel, BlipTextModel from ...generation import GenerationMixin from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_int @auto_docstring(custom_intro='\n BLIP Model for image captioning. The model consists of a vision encoder and a text decoder. One can optionally pass\n `input_ids` to the model, which serve as a text prompt, to make the text decoder continue the prompt. Otherwise,\n the decoder starts generating text from the [BOS] (beginning-of-sequence) token. will start generating the caption\n from the text input. If no text input is provided, the decoder will start with the [BOS] token only.\n ') class BlipForConditionalGeneration(BlipPreTrainedModel, GenerationMixin): config: BlipConfig _tied_weights_keys = ['text_decoder.cls.predictions.decoder.bias'] main_input_name = 'pixel_values' def __init__(self, config: BlipConfig): super().__init__(config) self.vision_model = BlipVisionModel(config.vision_config) self.text_decoder = BlipTextLMHeadModel(config.text_config) self.decoder_input_ids = config.text_config.bos_token_id self.decoder_pad_token_id = config.text_config.pad_token_id self.post_init() def get_input_embeddings(self): return self.text_decoder.get_input_embeddings() def set_input_embeddings(self, value): self.text_decoder.set_input_embeddings(value) @can_return_tuple @auto_docstring def forward(self, pixel_values: torch.FloatTensor, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None, labels: Optional[torch.LongTensor]=None, interpolate_pos_encoding: bool=False, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, BlipForConditionalGenerationModelOutput]: """ Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, BlipForConditionalGeneration >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base") >>> model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> text = "A picture of" >>> inputs = processor(images=image, text=text, return_tensors="pt") >>> outputs = model(**inputs) ```""" vision_outputs = self.vision_model(pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, **kwargs) image_embeds = vision_outputs.last_hidden_state outputs = self.text_decoder(input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=image_embeds, labels=labels, reduction='mean', **kwargs) return BlipForConditionalGenerationModelOutput(loss=outputs.loss, logits=outputs.logits, image_embeds=image_embeds, last_hidden_state=vision_outputs.last_hidden_state, hidden_states=vision_outputs.hidden_states, attentions=vision_outputs.attentions) @torch.no_grad() def generate(self, pixel_values: torch.FloatTensor, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None, interpolate_pos_encoding: bool=False, **generate_kwargs) -> torch.LongTensor: """ Overrides *generate* function to be able to use the model as a conditional generator Parameters: pixel_values (*torch.FloatTensor* of shape *(batch_size, num_channels, image_height, image_width)*: Input image to be processed input_ids (*torch.LongTensor* of shape *(batch_size, sequence_length)*, *optional*): The sequence used as a prompt for the generation. attention_mask (*torch.LongTensor* of shape *(batch_size, sequence_length)*, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, BlipForConditionalGeneration >>> model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base") >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> outputs = model.generate(**inputs) >>> print(processor.decode(outputs[0], skip_special_tokens=True)) two cats sleeping on a couch ``` """ batch_size = pixel_values.shape[0] vision_outputs = self.vision_model(pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) image_embeds = vision_outputs[0] image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device) if isinstance(input_ids, list): input_ids = torch.LongTensor(input_ids) elif input_ids is None: input_ids = torch.LongTensor([[self.decoder_input_ids, self.config.text_config.eos_token_id]]).repeat(batch_size, 1).to(image_embeds.device) input_ids[:, 0] = self.config.text_config.bos_token_id attention_mask = attention_mask[:, :-1] if attention_mask is not None else None outputs = self.text_decoder.generate(input_ids=input_ids[:, :-1], eos_token_id=self.config.text_config.sep_token_id, pad_token_id=self.config.text_config.pad_token_id, attention_mask=attention_mask, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, **generate_kwargs) return outputs
null
10
2
32
6
18
8
3
0.41
2
8
4
0
5
4
5
6
172
35
97
40
71
40
39
21
33
6
2
1
13
952
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip/modeling_blip.py
transformers.models.blip.modeling_blip.BlipForConditionalGenerationModelOutput
from typing import Any, Optional, Union import warnings from dataclasses import dataclass from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_int import torch @dataclass @auto_docstring(custom_intro="\n Adapted from the base class for vision model's outputs that also contains image embeddings of the pooling of the\n last hidden states. This class also adds the loss term from the text decoder.\n ") class BlipForConditionalGenerationModelOutput(ModelOutput): """ loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): Language modeling loss from the text decoder. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`, *optional*): Prediction scores of the language modeling head of the text decoder model. image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*): The image embeddings obtained after applying the Vision Transformer model to the input image. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[tuple[torch.FloatTensor]] = None logits: Optional[tuple[torch.FloatTensor]] = None image_embeds: Optional[torch.FloatTensor] = None last_hidden_state: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None @property def decoder_logits(self): warnings.warn('`decoder_logits` attribute is deprecated and will be removed in version 5 of Transformers. Please use the `logits` attribute to retrieve the final output instead.', FutureWarning) return self.logits
@dataclass @auto_docstring(custom_intro="\n Adapted from the base class for vision model's outputs that also contains image embeddings of the pooling of the\n last hidden states. This class also adds the loss term from the text decoder.\n ") class BlipForConditionalGenerationModelOutput(ModelOutput): ''' loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): Language modeling loss from the text decoder. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`, *optional*): Prediction scores of the language modeling head of the text decoder model. image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*): The image embeddings obtained after applying the Vision Transformer model to the input image. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. ''' @property def decoder_logits(self): pass
5
1
7
0
7
0
1
1.47
1
1
0
0
1
0
1
1
42
5
15
9
12
22
10
8
8
1
1
0
1
953
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip/modeling_blip.py
transformers.models.blip.modeling_blip.BlipForImageTextRetrieval
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_int from typing import Any, Optional, Union from torch import nn from .modeling_blip_text import BlipTextLMHeadModel, BlipTextModel from .configuration_blip import BlipConfig, BlipTextConfig, BlipVisionConfig from torch.nn.functional import normalize from ...processing_utils import Unpack import torch @auto_docstring(custom_intro='\n BLIP Model with a vision and text projector, and a classification head on top. The model is used in the context of\n image-text retrieval. Given an image and a text, the model returns the probability of the text being relevant to\n the image.\n ') class BlipForImageTextRetrieval(BlipPreTrainedModel): config: BlipConfig def __init__(self, config: BlipConfig): super().__init__(config) self.vision_model = BlipVisionModel(config.vision_config) self.text_encoder = BlipTextModel(config.text_config, add_pooling_layer=False) self.vision_proj = nn.Linear(config.vision_config.hidden_size, config.image_text_hidden_size) self.text_proj = nn.Linear(config.text_config.hidden_size, config.image_text_hidden_size) self.itm_head = nn.Linear(config.text_config.hidden_size, 2) self.decoder_pad_token_id = config.text_config.pad_token_id if not hasattr(config, 'decoder_pad_token_id') else config.decoder_pad_token_id self.decoder_start_token_id = config.text_config.bos_token_id if not hasattr(config, 'decoder_start_token_id') else config.decoder_start_token_id self.post_init() def get_input_embeddings(self): return self.text_encoder.get_input_embeddings() def set_input_embeddings(self, value): self.text_encoder.set_input_embeddings(value) @can_return_tuple @auto_docstring def forward(self, input_ids: torch.LongTensor, pixel_values: torch.FloatTensor, use_itm_head: Optional[bool]=True, attention_mask: Optional[torch.LongTensor]=None, interpolate_pos_encoding: bool=False, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, BlipTextVisionModelOutput]: """ use_itm_head (`bool`, *optional*, defaults to `True`): Whether or not to use the image-text matching head. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, BlipForImageTextRetrieval >>> model = BlipForImageTextRetrieval.from_pretrained("Salesforce/blip-itm-base-coco") >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-itm-base-coco") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> text = "an image of a cat" >>> inputs = processor(images=image, text=text, return_tensors="pt") >>> outputs = model(**inputs) ``` """ vision_outputs = self.vision_model(pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, **kwargs) image_embeds = vision_outputs.last_hidden_state image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long) if use_itm_head: question_embeds = self.text_encoder(input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=image_embeds, encoder_attention_mask=image_atts, **kwargs) question_embeds = question_embeds.last_hidden_state output = self.itm_head(question_embeds[:, 0, :]) else: question_embeds = self.text_encoder(input_ids=input_ids, attention_mask=attention_mask, **kwargs) question_embeds = question_embeds.last_hidden_state image_feat = normalize(self.vision_proj(image_embeds[:, 0, :]), dim=-1) text_feat = normalize(self.text_proj(question_embeds[:, 0, :]), dim=-1) output = image_feat @ text_feat.t() return BlipImageTextMatchingModelOutput(itm_score=output, last_hidden_state=vision_outputs.last_hidden_state, hidden_states=vision_outputs.hidden_states, attentions=vision_outputs.attentions, question_embeds=question_embeds)
@auto_docstring(custom_intro='\n BLIP Model with a vision and text projector, and a classification head on top. The model is used in the context of\n image-text retrieval. Given an image and a text, the model returns the probability of the text being relevant to\n the image.\n ') class BlipForImageTextRetrieval(BlipPreTrainedModel): def __init__(self, config: BlipConfig): pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass @can_return_tuple @auto_docstring def forward(self, input_ids: torch.LongTensor, pixel_values: torch.FloatTensor, use_itm_head: Optional[bool]=True, attention_mask: Optional[torch.LongTensor]=None, interpolate_pos_encoding: bool=False, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, BlipTextVisionModelOutput]: ''' use_itm_head (`bool`, *optional*, defaults to `True`): Whether or not to use the image-text matching head. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, BlipForImageTextRetrieval >>> model = BlipForImageTextRetrieval.from_pretrained("Salesforce/blip-itm-base-coco") >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-itm-base-coco") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> text = "an image of a cat" >>> inputs = processor(images=image, text=text, return_tensors="pt") >>> outputs = model(**inputs) ``` ''' pass
8
1
29
5
19
5
3
0.25
1
8
5
0
4
7
4
5
125
24
81
32
64
20
36
21
31
8
2
1
13
954
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip/modeling_blip.py
transformers.models.blip.modeling_blip.BlipForQuestionAnswering
from typing import Any, Optional, Union import torch from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_int from .configuration_blip import BlipConfig, BlipTextConfig, BlipVisionConfig from .modeling_blip_text import BlipTextLMHeadModel, BlipTextModel from ...generation import GenerationMixin from ...processing_utils import Unpack @auto_docstring(custom_intro='\n BLIP Model for visual question answering. The model consists of a vision encoder, a text encoder as well as a text\n decoder. The vision encoder will encode the input image, the text encoder will encode the input question together\n with the encoding of the image, and the text decoder will output the answer to the question.\n ') class BlipForQuestionAnswering(BlipPreTrainedModel, GenerationMixin): config: BlipConfig _tied_weights_keys = ['text_decoder.cls.predictions.decoder.bias'] def __init__(self, config: BlipConfig): super().__init__(config) self.vision_model = BlipVisionModel(config.vision_config) self.text_encoder = BlipTextModel(config.text_config, add_pooling_layer=False) self.text_decoder = BlipTextLMHeadModel(config.text_config) self.decoder_pad_token_id = config.text_config.pad_token_id self.decoder_start_token_id = config.text_config.bos_token_id self.post_init() def set_input_embeddings(self, value): self.text_encoder.set_input_embeddings(value) def get_input_embeddings(self): return self.text_encoder.get_input_embeddings() @can_return_tuple @auto_docstring def forward(self, input_ids: torch.LongTensor, pixel_values: torch.FloatTensor, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None, labels: Optional[torch.LongTensor]=None, interpolate_pos_encoding: bool=False, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, BlipTextVisionModelOutput]: """ Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, BlipForQuestionAnswering >>> model = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base") >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-vqa-base") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> # training >>> text = "How many cats are in the picture?" >>> label = "2" >>> inputs = processor(images=image, text=text, return_tensors="pt") >>> labels = processor(text=label, return_tensors="pt").input_ids >>> inputs["labels"] = labels >>> outputs = model(**inputs) >>> loss = outputs.loss >>> loss.backward() >>> # inference >>> text = "How many cats are in the picture?" >>> inputs = processor(images=image, text=text, return_tensors="pt") >>> outputs = model.generate(**inputs) >>> print(processor.decode(outputs[0], skip_special_tokens=True)) 2 ```""" if labels is None and decoder_input_ids is None: raise ValueError('Either `decoder_input_ids` or `labels` should be passed when calling `forward` with `BlipForQuestionAnswering`. if you are training the model make sure that `labels` is passed, if you are using the model for inference make sure that `decoder_input_ids` is passed or call `generate`') vision_outputs = self.vision_model(pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, **kwargs) image_embeds = vision_outputs.last_hidden_state image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long) question_embeds = self.text_encoder(input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, **kwargs) if labels is not None and decoder_input_ids is None: decoder_input_ids = labels question_embeds = question_embeds[0] answer_output = self.text_decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=question_embeds, encoder_attention_mask=attention_mask, labels=labels, reduction='mean', **kwargs) if labels is not None: decoder_loss = answer_output.loss.mean() else: decoder_loss = None return BlipTextVisionModelOutput(loss=decoder_loss, image_embeds=image_embeds, last_hidden_state=vision_outputs.last_hidden_state, hidden_states=vision_outputs.hidden_states, attentions=vision_outputs.attentions) @torch.no_grad() def generate(self, input_ids: torch.LongTensor, pixel_values: torch.FloatTensor, attention_mask: Optional[torch.LongTensor]=None, interpolate_pos_encoding: bool=False, **generate_kwargs) -> torch.LongTensor: """ Overrides *generate* function to be able to use the model as a conditional generator Parameters: input_ids (*torch.LongTensor* of shape *(batch_size, sequence_length)*): The sequence used as a prompt for the generation. pixel_values (*torch.FloatTensor* of shape *(batch_size, num_channels, image_height, image_width)*: Input image to be processed attention_mask (*torch.LongTensor* of shape *(batch_size, sequence_length)*, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`. `1` for tokens that are NOT MASKED, `0` for MASKED tokens. **generate_kwargs: Additional arguments passed to the *generate* function of the decoder Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, BlipForQuestionAnswering >>> model = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base") >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-vqa-base") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> text = "How many cats are in the picture?" >>> inputs = processor(images=image, text=text, return_tensors="pt") >>> outputs = model.generate(**inputs) >>> print(processor.decode(outputs[0], skip_special_tokens=True)) 2 ``` """ vision_outputs = self.vision_model(pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) image_embeds = vision_outputs[0] image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device) if isinstance(input_ids, list): input_ids = torch.LongTensor(input_ids) question_outputs = self.text_encoder(input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, return_dict=False) question_embeds = question_outputs[0] question_attention_mask = torch.ones(question_embeds.size()[:-1], dtype=torch.long, device=question_embeds.device) bos_ids = torch.full((question_embeds.size(0), 1), fill_value=self.decoder_start_token_id, device=question_embeds.device) outputs = self.text_decoder.generate(input_ids=bos_ids, eos_token_id=self.config.text_config.sep_token_id, pad_token_id=self.config.text_config.pad_token_id, encoder_hidden_states=question_embeds, encoder_attention_mask=question_attention_mask, **generate_kwargs) return outputs
@auto_docstring(custom_intro='\n BLIP Model for visual question answering. The model consists of a vision encoder, a text encoder as well as a text\n decoder. The vision encoder will encode the input image, the text encoder will encode the input question together\n with the encoding of the image, and the text decoder will output the answer to the question.\n ') class BlipForQuestionAnswering(BlipPreTrainedModel, GenerationMixin): def __init__(self, config: BlipConfig): pass def set_input_embeddings(self, value): pass def get_input_embeddings(self): pass @can_return_tuple @auto_docstring def forward(self, input_ids: torch.LongTensor, pixel_values: torch.FloatTensor, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None, labels: Optional[torch.LongTensor]=None, interpolate_pos_encoding: bool=False, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, BlipTextVisionModelOutput]: ''' Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, BlipForQuestionAnswering >>> model = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base") >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-vqa-base") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> # training >>> text = "How many cats are in the picture?" >>> label = "2" >>> inputs = processor(images=image, text=text, return_tensors="pt") >>> labels = processor(text=label, return_tensors="pt").input_ids >>> inputs["labels"] = labels >>> outputs = model(**inputs) >>> loss = outputs.loss >>> loss.backward() >>> # inference >>> text = "How many cats are in the picture?" >>> inputs = processor(images=image, text=text, return_tensors="pt") >>> outputs = model.generate(**inputs) >>> print(processor.decode(outputs[0], skip_special_tokens=True)) 2 ```''' pass @torch.no_grad() def generate(self, input_ids: torch.LongTensor, pixel_values: torch.FloatTensor, attention_mask: Optional[torch.LongTensor]=None, interpolate_pos_encoding: bool=False, **generate_kwargs) -> torch.LongTensor: ''' Overrides *generate* function to be able to use the model as a conditional generator Parameters: input_ids (*torch.LongTensor* of shape *(batch_size, sequence_length)*): The sequence used as a prompt for the generation. pixel_values (*torch.FloatTensor* of shape *(batch_size, num_channels, image_height, image_width)*: Input image to be processed attention_mask (*torch.LongTensor* of shape *(batch_size, sequence_length)*, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`. `1` for tokens that are NOT MASKED, `0` for MASKED tokens. **generate_kwargs: Additional arguments passed to the *generate* function of the decoder Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, BlipForQuestionAnswering >>> model = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base") >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-vqa-base") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> text = "How many cats are in the picture?" >>> inputs = processor(images=image, text=text, return_tensors="pt") >>> outputs = model.generate(**inputs) >>> print(processor.decode(outputs[0], skip_special_tokens=True)) 2 ``` ''' pass
10
2
42
8
23
12
3
0.48
1
10
5
0
5
5
5
6
222
43
121
49
93
58
48
28
42
10
2
1
15
955
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip/modeling_blip.py
transformers.models.blip.modeling_blip.BlipMLP
from torch import nn import torch from ...activations import ACT2FN class BlipMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states
class BlipMLP(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass
3
0
6
0
6
0
1
0
1
2
0
0
2
4
2
12
13
1
12
7
9
0
12
7
9
1
1
0
2
956
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip/modeling_blip.py
transformers.models.blip.modeling_blip.BlipModel
import torch from .configuration_blip import BlipConfig, BlipTextConfig, BlipVisionConfig from ...processing_utils import Unpack from .modeling_blip_text import BlipTextLMHeadModel, BlipTextModel from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_int from torch import nn from typing import Any, Optional, Union @auto_docstring(custom_intro='\n This model is going to be deprecated in future versions. Please use `BlipForConditionalGeneration`, `BlipForQuestionAnswering` or `BlipForImageTextRetrieval` depending on your usecase.\n ') class BlipModel(BlipPreTrainedModel): config: BlipConfig def __init__(self, config: BlipConfig): super().__init__(config) if not isinstance(config.text_config, BlipTextConfig): raise TypeError(f'config.text_config is expected to be of type BlipTextConfig but is of type {type(config.text_config)}.') if not isinstance(config.vision_config, BlipVisionConfig): raise TypeError(f'config.vision_config is expected to be of type BlipVisionConfig but is of type {type(config.vision_config)}.') text_config = config.text_config vision_config = config.vision_config self.projection_dim = config.projection_dim self.text_embed_dim = text_config.hidden_size self.vision_embed_dim = vision_config.hidden_size self.text_model = BlipTextModel(text_config) self.vision_model = BlipVisionModel(vision_config) self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False) self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False) self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value)) logger.warning('`BlipModel` is going to be deprecated in future release, please use `BlipForConditionalGeneration`, `BlipForQuestionAnswering` or `BlipForImageTextRetrieval` depending on your usecase.') self.post_init() def get_input_embeddings(self): return self.text_model.get_input_embeddings() def set_input_embeddings(self, value): self.text_model.set_input_embeddings(value) @auto_docstring def get_text_features(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None) -> torch.FloatTensor: """ Returns: text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`BlipTextModel`]. Examples: ```python >>> from transformers import AutoProcessor, BlipModel >>> model = BlipModel.from_pretrained("Salesforce/blip-image-captioning-base") >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base") >>> inputs = processor(text=["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt") >>> text_features = model.get_text_features(**inputs) ```""" text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids) pooled_output = text_outputs[1] text_features = self.text_projection(pooled_output) return text_features @auto_docstring def get_image_features(self, pixel_values: Optional[torch.FloatTensor]=None, interpolate_pos_encoding: bool=False) -> torch.FloatTensor: """ Returns: image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`BlipVisionModel`]. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, BlipModel >>> model = BlipModel.from_pretrained("Salesforce/blip-image-captioning-base") >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> image_features = model.get_image_features(**inputs) ```""" vision_outputs = self.vision_model(pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) pooled_output = vision_outputs[1] image_features = self.visual_projection(pooled_output) return image_features @auto_docstring def get_multimodal_features(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, interpolate_pos_encoding: bool=False) -> torch.FloatTensor: """ Returns: multimodal_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The multimodal embeddings obtained by applying the image embeddings to the text encoder using the cross-attention mechanism. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, BlipModel >>> model = BlipModel.from_pretrained("Salesforce/blip-image-captioning-base") >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> texts = ["a photo of a cat", "a photo of a dog"] >>> inputs = processor(images=image, text=texts, padding=True, return_tensors="pt") >>> multimodal_features = model.get_multimodal_features(**inputs) ```""" vision_outputs = self.vision_model(pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) image_embeds = vision_outputs[0] image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long) text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=image_embeds, encoder_attention_mask=image_atts) pooled_output = text_outputs[1] multimodal_features = self.text_projection(pooled_output) return multimodal_features @can_return_tuple @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, return_loss: Optional[bool]=None, interpolate_pos_encoding: bool=False, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, BlipOutput]: """ return_loss (`bool`, *optional*): Whether or not to return the contrastive loss. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, BlipModel >>> model = BlipModel.from_pretrained("Salesforce/blip-image-captioning-base") >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor( ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True ... ) >>> outputs = model(**inputs) >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities ```""" vision_outputs = self.vision_model(pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, **kwargs) text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, **kwargs) image_embeds = vision_outputs.pooler_output image_embeds = self.visual_projection(image_embeds) text_embeds = text_outputs.pooler_output text_embeds = self.text_projection(text_embeds) image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True) text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True) logit_scale = self.logit_scale.exp().to(device=text_embeds.device) image_embeds = image_embeds.to(device=text_embeds.device, dtype=text_embeds.dtype) logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale logits_per_image = logits_per_text.t() loss = None if return_loss: loss = blip_loss(logits_per_text) return BlipOutput(loss=loss, logits_per_image=logits_per_image, logits_per_text=logits_per_text, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs)
@auto_docstring(custom_intro='\n This model is going to be deprecated in future versions. Please use `BlipForConditionalGeneration`, `BlipForQuestionAnswering` or `BlipForImageTextRetrieval` depending on your usecase.\n ') class BlipModel(BlipPreTrainedModel): def __init__(self, config: BlipConfig): pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass @auto_docstring def get_text_features(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None) -> torch.FloatTensor: ''' Returns: text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`BlipTextModel`]. Examples: ```python >>> from transformers import AutoProcessor, BlipModel >>> model = BlipModel.from_pretrained("Salesforce/blip-image-captioning-base") >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base") >>> inputs = processor(text=["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt") >>> text_features = model.get_text_features(**inputs) ```''' pass @auto_docstring def get_image_features(self, pixel_values: Optional[torch.FloatTensor]=None, interpolate_pos_encoding: bool=False) -> torch.FloatTensor: ''' Returns: image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`BlipVisionModel`]. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, BlipModel >>> model = BlipModel.from_pretrained("Salesforce/blip-image-captioning-base") >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> image_features = model.get_image_features(**inputs) ```''' pass @auto_docstring def get_multimodal_features(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, interpolate_pos_encoding: bool=False) -> torch.FloatTensor: ''' Returns: multimodal_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The multimodal embeddings obtained by applying the image embeddings to the text encoder using the cross-attention mechanism. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, BlipModel >>> model = BlipModel.from_pretrained("Salesforce/blip-image-captioning-base") >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> texts = ["a photo of a cat", "a photo of a dog"] >>> inputs = processor(images=image, text=texts, padding=True, return_tensors="pt") >>> multimodal_features = model.get_multimodal_features(**inputs) ```''' pass @can_return_tuple @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, return_loss: Optional[bool]=None, interpolate_pos_encoding: bool=False, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, BlipOutput]: ''' return_loss (`bool`, *optional*): Whether or not to return the contrastive loss. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, BlipModel >>> model = BlipModel.from_pretrained("Salesforce/blip-image-captioning-base") >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor( ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True ... ) >>> outputs = model(**inputs) >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities ```''' pass
14
4
37
7
21
10
3
0.45
1
11
6
0
7
8
7
8
275
54
154
73
112
69
68
40
60
7
2
1
18
957
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip/modeling_blip.py
transformers.models.blip.modeling_blip.BlipOutput
import torch from typing import Any, Optional, Union from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling from dataclasses import dataclass from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_int @dataclass @auto_docstring class BlipOutput(ModelOutput): """ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): Contrastive loss for image-text similarity. logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`): The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text similarity scores. logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`): The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image similarity scores. text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`BlipTextModel`]. image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`BlipVisionModel`]. text_model_output (`BaseModelOutputWithPooling`): The output of the [`BlipTextModel`]. vision_model_output (`BaseModelOutputWithPooling`): The output of the [`BlipVisionModel`]. """ loss: Optional[torch.FloatTensor] = None logits_per_image: Optional[torch.FloatTensor] = None logits_per_text: Optional[torch.FloatTensor] = None text_embeds: Optional[torch.FloatTensor] = None image_embeds: Optional[torch.FloatTensor] = None text_model_output: BaseModelOutputWithPooling = None vision_model_output: BaseModelOutputWithPooling = None def to_tuple(self) -> tuple[Any]: return tuple((self[k] if k not in ['text_model_output', 'vision_model_output'] else getattr(self, k).to_tuple() for k in self.keys()))
@dataclass @auto_docstring class BlipOutput(ModelOutput): ''' loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): Contrastive loss for image-text similarity. logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`): The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text similarity scores. logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`): The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image similarity scores. text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`BlipTextModel`]. image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`BlipVisionModel`]. text_model_output (`BaseModelOutputWithPooling`): The output of the [`BlipTextModel`]. vision_model_output (`BaseModelOutputWithPooling`): The output of the [`BlipVisionModel`]. ''' def to_tuple(self) -> tuple[Any]: pass
4
1
5
0
5
0
2
1.46
1
2
0
0
1
0
1
1
34
2
13
9
11
19
10
9
8
2
1
0
2
958
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip/modeling_blip.py
transformers.models.blip.modeling_blip.BlipPreTrainedModel
from torch import nn from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_int from ...modeling_utils import PreTrainedModel from .configuration_blip import BlipConfig, BlipTextConfig, BlipVisionConfig @auto_docstring class BlipPreTrainedModel(PreTrainedModel): config: BlipConfig base_model_prefix = 'blip' supports_gradient_checkpointing = True _no_split_modules = ['BlipEncoderLayer', 'BlipTextEmbeddings'] _skip_keys_device_placement = ['past_key_values'] def _init_weights(self, module): """Initialize the weights""" factor = self.config.initializer_range if isinstance(module, (nn.Conv2d, nn.Embedding, nn.Linear)): module.weight.data.normal_(mean=0.0, std=factor) if hasattr(module, 'bias') and module.bias is not None: module.bias.data.zero_() if isinstance(module, BlipVisionEmbeddings): if hasattr(self.config, 'vision_config'): factor = self.config.vision_config.initializer_range nn.init.trunc_normal_(module.position_embedding, mean=0.0, std=factor) nn.init.trunc_normal_(module.class_embedding, mean=0.0, std=factor) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_()
@auto_docstring class BlipPreTrainedModel(PreTrainedModel): def _init_weights(self, module): '''Initialize the weights''' pass
3
1
28
3
24
1
7
0.17
1
1
1
5
1
0
1
1
40
5
30
8
28
5
20
8
18
7
1
2
7
959
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip/modeling_blip.py
transformers.models.blip.modeling_blip.BlipTextEmbeddings
import torch from typing import Any, Optional, Union from torch import nn from .configuration_blip import BlipConfig, BlipTextConfig, BlipVisionConfig class BlipTextEmbeddings(nn.Module): def __init__(self, config: BlipTextConfig): super().__init__() embed_dim = config.hidden_size self.token_embedding = nn.Embedding(config.vocab_size, embed_dim) self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim) self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False) def forward(self, input_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None) -> torch.Tensor: seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2] max_position_embedding = self.position_embedding.weight.shape[0] if seq_length > max_position_embedding: raise ValueError(f'Sequence length must be less than max_position_embeddings (got `sequence length`: {seq_length} and max_position_embeddings: {max_position_embedding}') if position_ids is None: position_ids = self.position_ids[:, :seq_length] if inputs_embeds is None: inputs_embeds = self.token_embedding(input_ids) position_embeddings = self.position_embedding(position_ids) embeddings = inputs_embeds + position_embeddings return embeddings
class BlipTextEmbeddings(nn.Module): def __init__(self, config: BlipTextConfig): pass def forward(self, input_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None) -> torch.Tensor: pass
3
0
18
4
14
1
3
0.03
1
4
1
0
2
2
2
12
38
8
29
15
21
1
19
10
16
5
1
1
6
960
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip/modeling_blip.py
transformers.models.blip.modeling_blip.BlipVisionEmbeddings
from torch import nn from .configuration_blip import BlipConfig, BlipTextConfig, BlipVisionConfig from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_int import torch class BlipVisionEmbeddings(nn.Module): def __init__(self, config: BlipVisionConfig): super().__init__() self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.class_embedding = nn.Parameter(torch.randn(1, 1, self.embed_dim)) self.patch_embedding = nn.Conv2d(in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size) self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches + 1 self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim)) def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: """ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. This method is also adapted to support torch.jit tracing. Adapted from: - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211 """ num_patches = embeddings.shape[1] - 1 num_positions = self.position_embedding.shape[1] - 1 if not torch.jit.is_tracing() and num_patches == num_positions and (height == width): return self.position_embedding class_pos_embed = self.position_embedding[:, :1] patch_pos_embed = self.position_embedding[:, 1:] dim = embeddings.shape[-1] new_height = height // self.patch_size new_width = width // self.patch_size sqrt_num_positions = torch_int(num_positions ** 0.5) patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim) patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) patch_pos_embed = nn.functional.interpolate(patch_pos_embed, size=(new_height, new_width), mode='bicubic', align_corners=False) patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return torch.cat((class_pos_embed, patch_pos_embed), dim=1) def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding: bool=False) -> torch.Tensor: batch_size, _, height, width = pixel_values.shape target_dtype = self.patch_embedding.weight.dtype patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) patch_embeds = patch_embeds.flatten(2).transpose(1, 2) class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype) embeddings = torch.cat([class_embeds, patch_embeds], dim=1) if interpolate_pos_encoding: position_embedding = self.interpolate_pos_encoding(embeddings, height, width) else: position_embedding = self.position_embedding embeddings = embeddings + position_embedding[:, :embeddings.size(1), :].to(target_dtype) return embeddings
class BlipVisionEmbeddings(nn.Module): def __init__(self, config: BlipVisionConfig): pass def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: ''' This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. This method is also adapted to support torch.jit tracing. Adapted from: - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211 ''' pass def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding: bool=False) -> torch.Tensor: pass
4
1
23
5
16
3
2
0.19
1
5
1
0
3
9
3
13
72
16
48
27
44
9
40
27
36
2
1
1
5
961
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip/modeling_blip.py
transformers.models.blip.modeling_blip.BlipVisionModel
from ...utils.generic import check_model_inputs from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_int from .configuration_blip import BlipConfig, BlipTextConfig, BlipVisionConfig from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling import torch from ...processing_utils import Unpack from typing import Any, Optional, Union from torch import nn class BlipVisionModel(BlipPreTrainedModel): main_input_name = 'pixel_values' config: BlipVisionConfig _can_record_outputs = {'hidden_states': BlipEncoderLayer, 'attentions': BlipAttention} def __init__(self, config: BlipVisionConfig): super().__init__(config) self.config = config embed_dim = config.hidden_size self.embeddings = BlipVisionEmbeddings(config) self.encoder = BlipEncoder(config) self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.post_init() @check_model_inputs @auto_docstring def forward(self, pixel_values: Optional[torch.FloatTensor]=None, interpolate_pos_encoding: bool=False, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, BaseModelOutputWithPooling]: if pixel_values is None: raise ValueError('You have to specify pixel_values') hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) encoder_outputs: BaseModelOutput = self.encoder(inputs_embeds=hidden_states, **kwargs) last_hidden_state = encoder_outputs.last_hidden_state last_hidden_state = self.post_layernorm(last_hidden_state) pooled_output = last_hidden_state[:, 0, :] pooled_output = self.post_layernorm(pooled_output) return BaseModelOutputWithPooling(last_hidden_state=last_hidden_state, pooler_output=pooled_output) def get_input_embeddings(self): return self.embeddings
class BlipVisionModel(BlipPreTrainedModel): def __init__(self, config: BlipVisionConfig): pass @check_model_inputs @auto_docstring def forward(self, pixel_values: Optional[torch.FloatTensor]=None, interpolate_pos_encoding: bool=False, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, BaseModelOutputWithPooling]: pass def get_input_embeddings(self): pass
6
0
19
3
15
1
3
0.06
1
7
4
0
3
4
3
4
65
13
49
23
36
3
28
15
24
6
2
1
8
962
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip/modeling_blip_text.py
transformers.models.blip.modeling_blip_text.BlipTextAttention
from torch import Tensor, device, nn from typing import Optional, Union import torch from ...utils.deprecation import deprecate_kwarg from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer class BlipTextAttention(nn.Module): def __init__(self, config, is_cross_attention=False, layer_idx=None): super().__init__() self.self = BlipTextSelfAttention(config, is_cross_attention, layer_idx=layer_idx) self.output = BlipTextSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads) self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.Tensor]: self_outputs = self.self(hidden_states, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, past_key_values=past_key_values, output_attentions=output_attentions, cache_position=cache_position) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs
class BlipTextAttention(nn.Module): def __init__(self, config, is_cross_attention=False, layer_idx=None): pass def prune_heads(self, heads): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.Tensor]: pass
5
0
15
1
13
1
1
0.07
1
6
2
0
3
3
3
13
47
4
41
20
28
3
22
11
18
2
1
1
4
963
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip/modeling_blip_text.py
transformers.models.blip.modeling_blip_text.BlipTextEmbeddings
from typing import Optional, Union from torch import Tensor, device, nn import torch class BlipTextEmbeddings(nn.Module): """Construct the embeddings from word and position embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') self.config = config def forward(self, input_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, past_key_values_length: int=0) -> torch.Tensor: if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length:seq_length + past_key_values_length] if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) embeddings = inputs_embeds if self.position_embedding_type == 'absolute': position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings
class BlipTextEmbeddings(nn.Module): '''Construct the embeddings from word and position embeddings.''' def __init__(self, config): pass def forward(self, input_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, past_key_values_length: int=0) -> torch.Tensor: pass
3
1
23
4
17
2
3
0.11
1
3
0
0
2
6
2
12
49
10
35
19
26
4
26
13
23
5
1
1
6
964
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip/modeling_blip_text.py
transformers.models.blip.modeling_blip_text.BlipTextEncoder
import torch from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from torch import Tensor, device, nn from typing import Optional, Union class BlipTextEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([BlipTextLayer(config, i) for i in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True, cache_position: Optional[torch.Tensor]=None) -> Union[tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: if self.gradient_checkpointing and self.training: if use_cache: logger.warning('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False if use_cache: if isinstance(past_key_values, tuple): logger.warning_once('Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. You should pass an instance of `EncoderDecoderCache` instead, e.g. `past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`.') past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values) elif isinstance(past_key_values, DynamicCache): past_key_values = EncoderDecoderCache(past_key_values, DynamicCache(config=self.config)) elif past_key_values is None: past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config)) all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and encoder_hidden_states is not None else None for i in range(self.config.num_hidden_layers): layer_module = self.layer[i] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_values, output_attentions, cache_position) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions)
class BlipTextEncoder(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True, cache_position: Optional[torch.Tensor]=None) -> Union[tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: pass
3
0
45
4
41
0
9
0
1
7
2
0
2
3
2
12
91
8
83
27
68
0
35
15
32
16
1
2
17
965
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip/modeling_blip_text.py
transformers.models.blip.modeling_blip_text.BlipTextIntermediate
import torch from torch import Tensor, device, nn from ...activations import ACT2FN class BlipTextIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states
class BlipTextIntermediate(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass
3
0
6
0
6
0
2
0
1
3
0
0
2
2
2
12
13
1
12
5
9
0
11
5
8
2
1
1
3
966
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip/modeling_blip_text.py
transformers.models.blip.modeling_blip_text.BlipTextLMHeadModel
from typing import Optional, Union from ...generation import GenerationMixin from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions import torch from torch.nn import CrossEntropyLoss from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache class BlipTextLMHeadModel(BlipTextPreTrainedModel, GenerationMixin): _tied_weights_keys = ['cls.predictions.decoder.weight', 'cls.predictions.decoder.bias'] def __init__(self, config): super().__init__(config) self.bert = BlipTextModel(config, add_pooling_layer=False) self.cls = BlipTextOnlyMLMHead(config) self.label_smoothing = config.label_smoothing def get_input_embeddings(self): return self.bert.get_input_embeddings() def set_input_embeddings(self, new_embeddings): self.bert.set_input_embeddings(new_embeddings) def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, return_logits: Optional[bool]=False, is_decoder: Optional[bool]=True, reduction: Optional[str]='mean', cache_position: Optional[torch.Tensor]=None) -> Union[tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]: """ encoder_hidden_states (`torch.FloatTensor`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. labels (`torch.LongTensor`, *optional*): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]` past_key_values (`Cache`, *optional*): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False outputs = self.bert(input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, is_decoder=is_decoder, cache_position=cache_position) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) if return_logits: return prediction_scores[:, :-1, :].contiguous() lm_loss = None if labels is not None: shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() labels = labels[:, 1:].contiguous().to(shifted_prediction_scores.device) loss_fct = CrossEntropyLoss(reduction=reduction, label_smoothing=self.label_smoothing) lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if reduction == 'none': lm_loss = lm_loss.view(prediction_scores.size(0), -1).sum(1) if not return_dict: output = (prediction_scores,) + outputs[2:] return (lm_loss,) + output if lm_loss is not None else output return CausalLMOutputWithCrossAttentions(loss=lm_loss, logits=prediction_scores, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs): model_inputs = super().prepare_inputs_for_generation(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, **model_kwargs) model_inputs['is_decoder'] = True return model_inputs
class BlipTextLMHeadModel(BlipTextPreTrainedModel, GenerationMixin): def __init__(self, config): pass def get_input_embeddings(self): pass def set_input_embeddings(self, new_embeddings): pass def get_output_embeddings(self): pass def set_output_embeddings(self, new_embeddings): pass def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, return_logits: Optional[bool]=False, is_decoder: Optional[bool]=True, reduction: Optional[str]='mean', cache_position: Optional[torch.Tensor]=None) -> Union[tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]: ''' encoder_hidden_states (`torch.FloatTensor`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. labels (`torch.LongTensor`, *optional*): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]` past_key_values (`Cache`, *optional*): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). ''' pass def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs): pass
8
1
18
2
13
4
2
0.28
2
8
3
0
8
3
8
9
148
19
101
42
74
28
52
24
43
8
2
2
19
967
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip/modeling_blip_text.py
transformers.models.blip.modeling_blip_text.BlipTextLMPredictionHead
from torch import Tensor, device, nn import torch class BlipTextLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = BlipTextPredictionHeadTransform(config) self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) self.decoder.bias = self.bias def _tie_weights(self): self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states
class BlipTextLMPredictionHead(nn.Module): def __init__(self, config): pass def _tie_weights(self): pass def forward(self, hidden_states): pass
4
0
6
1
4
1
1
0.23
1
2
1
0
3
3
3
13
21
5
13
7
9
3
13
7
9
1
1
0
3
968
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip/modeling_blip_text.py
transformers.models.blip.modeling_blip_text.BlipTextLayer
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils.deprecation import deprecate_kwarg import torch from ...modeling_layers import GradientCheckpointingLayer from typing import Optional, Union from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache class BlipTextLayer(GradientCheckpointingLayer): def __init__(self, config, layer_num): super().__init__() self.config = config self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = BlipTextAttention(config, layer_idx=layer_num) self.layer_num = layer_num if self.config.is_decoder: self.crossattention = BlipTextAttention(config, is_cross_attention=self.config.is_decoder, layer_idx=layer_num) self.intermediate = BlipTextIntermediate(config) self.output = BlipTextOutput(config) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.Tensor]: self_attention_outputs = self.attention(hidden_states, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, past_key_values=past_key_values, cache_position=cache_position) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] if encoder_hidden_states is not None: cross_attention_outputs = self.crossattention(attention_output, attention_mask=encoder_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, past_key_values=past_key_values, output_attentions=output_attentions, cache_position=cache_position) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:] layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output) return (layer_output,) + outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output
class BlipTextLayer(GradientCheckpointingLayer): def __init__(self, config, layer_num): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.Tensor]: pass def feed_forward_chunk(self, attention_output): pass
5
0
19
1
18
1
2
0.04
1
6
3
0
3
8
3
13
61
6
54
30
41
2
30
21
26
3
1
1
6
969
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip/modeling_blip_text.py
transformers.models.blip.modeling_blip_text.BlipTextModel
from torch import Tensor, device, nn import torch from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions from typing import Optional, Union class BlipTextModel(BlipTextPreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in [Attention is all you need](https://huggingface.co/papers/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. argument and `is_decoder` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. """ def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = BlipTextEmbeddings(config) self.encoder = BlipTextEncoder(config) self.pooler = BlipTextPooler(config) if add_pooling_layer else None self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: tuple[int], device: device, is_decoder: bool) -> Tensor: """ Makes broadcastable attention and causal masks so that future and masked tokens are ignored. Arguments: attention_mask (`torch.Tensor`): Mask with ones indicating tokens to attend to, zeros for tokens to ignore. input_shape (`tuple[int]`): The shape of the input to the model. device (`torch.device`): The device of the input to the model. Returns: `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`. """ if attention_mask.dim() == 3: extended_attention_mask = attention_mask[:, None, :, :] elif attention_mask.dim() == 2: if is_decoder: batch_size, seq_length = input_shape seq_ids = torch.arange(seq_length, device=device) causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None] causal_mask = causal_mask.to(attention_mask.dtype) if causal_mask.shape[1] < attention_mask.shape[1]: prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1] causal_mask = torch.cat([torch.ones((batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype), causal_mask], axis=-1) extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :] else: extended_attention_mask = attention_mask[:, None, None, :] else: raise ValueError(f'Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})') extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 return extended_attention_mask def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, is_decoder: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None) -> Union[tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: """ encoder_hidden_states (`torch.FloatTensor`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`Cache`, *optional*): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() batch_size, seq_length = input_shape device = input_ids.device elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] batch_size, seq_length = input_shape device = inputs_embeds.device elif encoder_embeds is not None: input_shape = encoder_embeds.size()[:-1] batch_size, seq_length = input_shape device = encoder_embeds.device else: raise ValueError('You have to specify either input_ids or inputs_embeds or encoder_embeds') past_key_values_length = 0 if past_key_values is not None: past_key_values_length = past_key_values[0][0].shape[-2] if not isinstance(past_key_values, Cache) else past_key_values.get_seq_length() if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length + past_key_values_length)).to(device) extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device, is_decoder) if encoder_hidden_states is not None: if isinstance(encoder_hidden_states, list): encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size() else: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if isinstance(encoder_attention_mask, list): encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask] elif encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) if encoder_embeds is None: embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length) else: embedding_output = encoder_embeds encoder_outputs = self.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions)
class BlipTextModel(BlipTextPreTrainedModel): ''' The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in [Attention is all you need](https://huggingface.co/papers/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. argument and `is_decoder` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. ''' def __init__(self, config, add_pooling_layer=True): pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass def _prune_heads(self, heads_to_prune): ''' Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel ''' pass def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: tuple[int], device: device, is_decoder: bool) -> Tensor: ''' Makes broadcastable attention and causal masks so that future and masked tokens are ignored. Arguments: attention_mask (`torch.Tensor`): Mask with ones indicating tokens to attend to, zeros for tokens to ignore. input_shape (`tuple[int]`): The shape of the input to the model. device (`torch.device`): The device of the input to the model. Returns: `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`. ''' pass def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, is_decoder: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None) -> Union[tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: ''' encoder_hidden_states (`torch.FloatTensor`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`Cache`, *optional*): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). ''' pass
7
4
37
3
24
10
5
0.44
1
10
4
0
6
4
6
7
237
26
147
47
122
65
77
29
70
19
2
3
30
970
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip/modeling_blip_text.py
transformers.models.blip.modeling_blip_text.BlipTextOnlyMLMHead
import torch from torch import Tensor, device, nn class BlipTextOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = BlipTextLMPredictionHead(config) def forward(self, sequence_output: torch.Tensor) -> torch.Tensor: prediction_scores = self.predictions(sequence_output) return prediction_scores
class BlipTextOnlyMLMHead(nn.Module): def __init__(self, config): pass def forward(self, sequence_output: torch.Tensor) -> torch.Tensor: pass
3
0
3
0
3
0
1
0
1
3
1
0
2
1
2
12
8
1
7
5
4
0
7
5
4
1
1
0
2
971
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip/modeling_blip_text.py
transformers.models.blip.modeling_blip_text.BlipTextOutput
import torch from torch import Tensor, device, nn class BlipTextOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states
class BlipTextOutput(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: pass
3
0
5
0
5
0
1
0
1
2
0
0
2
3
2
12
12
1
11
6
8
0
11
6
8
1
1
0
2
972
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip/modeling_blip_text.py
transformers.models.blip.modeling_blip_text.BlipTextPooler
import torch from torch import Tensor, device, nn class BlipTextPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output
class BlipTextPooler(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass
3
0
6
0
5
1
1
0.2
1
2
0
0
2
2
2
12
13
1
10
7
7
2
10
7
7
1
1
0
2
973
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip/modeling_blip_text.py
transformers.models.blip.modeling_blip_text.BlipTextPreTrainedModel
from torch import Tensor, device, nn from .configuration_blip import BlipTextConfig from ...modeling_utils import PreTrainedModel class BlipTextPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config: BlipTextConfig base_model_prefix = 'bert' _no_split_modules = [] def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Embedding)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_()
class BlipTextPreTrainedModel(PreTrainedModel): ''' An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. ''' def _init_weights(self, module): '''Initialize the weights''' pass
2
2
11
0
8
3
4
0.58
1
0
0
2
1
0
1
1
21
2
12
5
10
7
11
5
9
4
1
1
4
974
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip/modeling_blip_text.py
transformers.models.blip.modeling_blip_text.BlipTextPredictionHeadTransform
from ...activations import ACT2FN from torch import Tensor, device, nn import torch class BlipTextPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states
class BlipTextPredictionHeadTransform(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass
3
0
7
0
7
0
2
0
1
3
0
0
2
3
2
12
15
1
14
6
11
0
13
6
10
2
1
1
3
975
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip/modeling_blip_text.py
transformers.models.blip.modeling_blip_text.BlipTextSelfAttention
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache import torch from torch import Tensor, device, nn from ...utils.deprecation import deprecate_kwarg import math from typing import Optional, Union class BlipTextSelfAttention(nn.Module): def __init__(self, config, is_cross_attention, layer_idx=None): super().__init__() self.config = config if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')): raise ValueError('The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.layer_idx = layer_idx self.query = nn.Linear(config.hidden_size, self.all_head_size) if is_cross_attention: self.key = nn.Linear(config.encoder_hidden_size, self.all_head_size) self.value = nn.Linear(config.encoder_hidden_size, self.all_head_size) else: self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) def save_attn_gradients(self, attn_gradients): self.attn_gradients = attn_gradients def get_attn_gradients(self): return self.attn_gradients def save_attention_map(self, attention_map): self.attention_map = attention_map def get_attention_map(self): return self.attention_map @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.Tensor]: batch_size, seq_length, _ = hidden_states.shape query_layer = self.query(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2) is_cross_attention = encoder_hidden_states is not None attention_mask = encoder_attention_mask if is_cross_attention else attention_mask is_updated = False if past_key_values is not None: if isinstance(past_key_values, EncoderDecoderCache): is_updated = past_key_values.is_updated.get(self.layer_idx) if is_cross_attention: curr_past_key_value = past_key_values.cross_attention_cache else: curr_past_key_value = past_key_values.self_attention_cache else: curr_past_key_value = past_key_values current_states = encoder_hidden_states if is_cross_attention else hidden_states if is_cross_attention and past_key_values is not None and is_updated: key_layer = curr_past_key_value.layers[self.layer_idx].keys value_layer = curr_past_key_value.layers[self.layer_idx].values else: key_layer = self.key(current_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2) value_layer = self.value(current_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2) if past_key_values is not None: cache_position = cache_position if not is_cross_attention else None key_layer, value_layer = curr_past_key_value.update(key_layer, value_layer, self.layer_idx, {'cache_position': cache_position}) if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache): past_key_values.is_updated[self.layer_idx] = True attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': seq_length = hidden_states.size()[1] position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) if self.position_embedding_type == 'relative_key': relative_position_scores = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == 'relative_key_query': relative_position_scores_query = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: attention_scores = attention_scores + attention_mask.to(attention_scores.device) attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs_dropped = self.dropout(attention_probs) if head_mask is not None: attention_probs_dropped = attention_probs_dropped * head_mask context_layer = torch.matmul(attention_probs_dropped, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return (context_layer, attention_probs)
class BlipTextSelfAttention(nn.Module): def __init__(self, config, is_cross_attention, layer_idx=None): pass def save_attn_gradients(self, attn_gradients): pass def get_attn_gradients(self): pass def save_attention_map(self, attention_map): pass def get_attention_map(self): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.Tensor]: pass
8
0
17
3
13
1
3
0.11
1
5
0
0
7
13
7
17
124
24
91
50
74
10
75
41
67
9
1
2
18
976
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip/modeling_blip_text.py
transformers.models.blip.modeling_blip_text.BlipTextSelfOutput
from torch import Tensor, device, nn import torch class BlipTextSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states
class BlipTextSelfOutput(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: pass
3
0
5
0
5
0
1
0
1
2
0
0
2
3
2
12
12
1
11
6
8
0
11
6
8
1
1
0
2
977
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip/processing_blip.py
transformers.models.blip.processing_blip.BlipProcessor
from typing import Optional, Union from ...processing_utils import ProcessingKwargs, ProcessorMixin, Unpack from ...tokenization_utils_base import BatchEncoding, PreTokenizedInput, TextInput from ...image_utils import ImageInput class BlipProcessor(ProcessorMixin): """ Constructs a BLIP processor which wraps a BERT tokenizer and BLIP image processor into a single processor. [`BlipProcessor`] offers all the functionalities of [`BlipImageProcessor`] and [`BertTokenizerFast`]. See the docstring of [`~BlipProcessor.__call__`] and [`~BlipProcessor.decode`] for more information. Args: image_processor (`BlipImageProcessor`): An instance of [`BlipImageProcessor`]. The image processor is a required input. tokenizer (`BertTokenizerFast`): An instance of ['BertTokenizerFast`]. The tokenizer is a required input. """ attributes = ['image_processor', 'tokenizer'] image_processor_class = ('BlipImageProcessor', 'BlipImageProcessorFast') tokenizer_class = ('BertTokenizer', 'BertTokenizerFast') def __init__(self, image_processor, tokenizer, **kwargs): tokenizer.return_token_type_ids = False super().__init__(image_processor, tokenizer) self.current_processor = self.image_processor def __call__(self, images: Optional[ImageInput]=None, text: Optional[Union[str, list[str], TextInput, PreTokenizedInput]]=None, audio=None, videos=None, **kwargs: Unpack[BlipProcessorKwargs]) -> BatchEncoding: """ This method uses [`BlipImageProcessor.__call__`] method to prepare image(s) for the model, and [`BertTokenizerFast.__call__`] to prepare text for the model. Please refer to the docstring of the above two methods for more information. Args: images (`ImageInput`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. text (`TextInput`, `PreTokenizedInput`, `list[TextInput]`, `list[PreTokenizedInput]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. """ if images is None and text is None: raise ValueError('You have to specify either images or text.') text_encoding = None output_kwargs = self._merge_kwargs(BlipProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs) if text is not None: text_encoding = self.tokenizer(text, **output_kwargs['text_kwargs']) if images is not None: encoding_image_processor = self.image_processor(images, **output_kwargs['images_kwargs']) if text_encoding is not None: encoding_image_processor.update(text_encoding) return encoding_image_processor return text_encoding @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names tokenizer_input_names = [name for name in tokenizer_input_names if name != 'token_type_ids'] return tokenizer_input_names + image_processor_input_names
class BlipProcessor(ProcessorMixin): ''' Constructs a BLIP processor which wraps a BERT tokenizer and BLIP image processor into a single processor. [`BlipProcessor`] offers all the functionalities of [`BlipImageProcessor`] and [`BertTokenizerFast`]. See the docstring of [`~BlipProcessor.__call__`] and [`~BlipProcessor.decode`] for more information. Args: image_processor (`BlipImageProcessor`): An instance of [`BlipImageProcessor`]. The image processor is a required input. tokenizer (`BertTokenizerFast`): An instance of ['BertTokenizerFast`]. The tokenizer is a required input. ''' def __init__(self, image_processor, tokenizer, **kwargs): pass def __call__(self, images: Optional[ImageInput]=None, text: Optional[Union[str, list[str], TextInput, PreTokenizedInput]]=None, audio=None, videos=None, **kwargs: Unpack[BlipProcessorKwargs]) -> BatchEncoding: ''' This method uses [`BlipImageProcessor.__call__`] method to prepare image(s) for the model, and [`BertTokenizerFast.__call__`] to prepare text for the model. Please refer to the docstring of the above two methods for more information. Args: images (`ImageInput`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. text (`TextInput`, `PreTokenizedInput`, `list[TextInput]`, `list[PreTokenizedInput]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. ''' pass @property def model_input_names(self): pass
5
2
14
1
7
6
2
0.93
1
7
2
0
5
1
5
22
94
13
42
24
28
39
30
16
24
5
2
2
9
978
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip/processing_blip.py
transformers.models.blip.processing_blip.BlipProcessorKwargs
from ...processing_utils import ProcessingKwargs, ProcessorMixin, Unpack class BlipProcessorKwargs(ProcessingKwargs, total=False): _defaults = {'text_kwargs': {'add_special_tokens': True, 'padding': False, 'stride': 0, 'return_overflowing_tokens': False, 'return_special_tokens_mask': False, 'return_offsets_mapping': False, 'return_token_type_ids': False, 'return_length': False, 'verbose': True}, 'images_kwargs': {}}
class BlipProcessorKwargs(ProcessingKwargs, total=False): pass
1
0
0
0
0
0
0
0
2
0
0
0
0
0
0
0
15
0
15
2
14
0
2
2
1
0
3
0
0
979
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip_2/configuration_blip_2.py
transformers.models.blip_2.configuration_blip_2.Blip2Config
from ...configuration_utils import PretrainedConfig from ..auto import CONFIG_MAPPING, AutoConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from typing import Optional class Blip2Config(PretrainedConfig): """ [`Blip2Config`] is the configuration class to store the configuration of a [`Blip2ForConditionalGeneration`]. It is used to instantiate a BLIP-2 model according to the specified arguments, defining the vision model, Q-Former model and language model configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the BLIP-2 [Salesforce/blip2-opt-2.7b](https://huggingface.co/Salesforce/blip2-opt-2.7b) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vision_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`Blip2VisionConfig`]. qformer_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`Blip2QFormerConfig`]. text_config (`dict`, *optional*): Dictionary of configuration options used to initialize any [`PretrainedConfig`]. num_query_tokens (`int`, *optional*, defaults to 32): The number of query tokens passed through the Transformer. image_text_hidden_size (`int`, *optional*, defaults to 256): Dimensionality of the hidden state of the image-text fusion layer. image_token_index (`int`, *optional*): Token index of special image token. kwargs (*optional*): Dictionary of keyword arguments. Example: ```python >>> from transformers import ( ... Blip2VisionConfig, ... Blip2QFormerConfig, ... OPTConfig, ... Blip2Config, ... Blip2ForConditionalGeneration, ... ) >>> # Initializing a Blip2Config with Salesforce/blip2-opt-2.7b style configuration >>> configuration = Blip2Config() >>> # Initializing a Blip2ForConditionalGeneration (with random weights) from the Salesforce/blip2-opt-2.7b style configuration >>> model = Blip2ForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config >>> # We can also initialize a Blip2Config from a Blip2VisionConfig, Blip2QFormerConfig and any PretrainedConfig >>> # Initializing BLIP-2 vision, BLIP-2 Q-Former and language model configurations >>> vision_config = Blip2VisionConfig() >>> qformer_config = Blip2QFormerConfig() >>> text_config = OPTConfig() >>> config = Blip2Config.from_text_vision_configs(vision_config, qformer_config, text_config) ```""" model_type = 'blip-2' attribute_map = {'image_token_id': 'image_token_index'} sub_configs = {'text_config': AutoConfig, 'qformer_config': Blip2QFormerConfig, 'vision_config': Blip2VisionConfig} def __init__(self, vision_config=None, qformer_config=None, text_config=None, num_query_tokens=32, image_text_hidden_size=256, image_token_index=None, **kwargs): super().__init__(**kwargs) if vision_config is None: vision_config = {} logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.') if qformer_config is None: qformer_config = {} logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.') if text_config is None: text_config = {} logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).') self.vision_config = Blip2VisionConfig(**vision_config) self.qformer_config = Blip2QFormerConfig(**qformer_config) text_model_type = text_config.get('model_type', 'opt') self.text_config = CONFIG_MAPPING[text_model_type](**text_config) self.num_query_tokens = num_query_tokens self.image_text_hidden_size = image_text_hidden_size self.image_token_index = image_token_index self.qformer_config.encoder_hidden_size = self.vision_config.hidden_size self.use_decoder_only_language_model = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES self.is_encoder_decoder = self.text_config.is_encoder_decoder self.initializer_factor = 1.0 self.initializer_range = 0.02 @classmethod def from_vision_qformer_text_configs(cls, vision_config: Blip2VisionConfig, qformer_config: Blip2QFormerConfig, text_config: Optional[PretrainedConfig]=None, **kwargs): """ Instantiate a [`Blip2Config`] (or a derived class) from a BLIP-2 vision model, Q-Former and language model configurations. Args: vision_config (`dict`): Dictionary of configuration options used to initialize [`Blip2VisionConfig`]. qformer_config (`dict`): Dictionary of configuration options used to initialize [`Blip2QFormerConfig`]. text_config (`dict`, *optional*): Dictionary of configuration options used to initialize any [`PretrainedConfig`]. Returns: [`Blip2Config`]: An instance of a configuration object """ return cls(vision_config=vision_config.to_dict(), qformer_config=qformer_config.to_dict(), text_config=text_config.to_dict() if text_config is not None else None, **kwargs)
class Blip2Config(PretrainedConfig): ''' [`Blip2Config`] is the configuration class to store the configuration of a [`Blip2ForConditionalGeneration`]. It is used to instantiate a BLIP-2 model according to the specified arguments, defining the vision model, Q-Former model and language model configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the BLIP-2 [Salesforce/blip2-opt-2.7b](https://huggingface.co/Salesforce/blip2-opt-2.7b) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vision_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`Blip2VisionConfig`]. qformer_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`Blip2QFormerConfig`]. text_config (`dict`, *optional*): Dictionary of configuration options used to initialize any [`PretrainedConfig`]. num_query_tokens (`int`, *optional*, defaults to 32): The number of query tokens passed through the Transformer. image_text_hidden_size (`int`, *optional*, defaults to 256): Dimensionality of the hidden state of the image-text fusion layer. image_token_index (`int`, *optional*): Token index of special image token. kwargs (*optional*): Dictionary of keyword arguments. Example: ```python >>> from transformers import ( ... Blip2VisionConfig, ... Blip2QFormerConfig, ... OPTConfig, ... Blip2Config, ... Blip2ForConditionalGeneration, ... ) >>> # Initializing a Blip2Config with Salesforce/blip2-opt-2.7b style configuration >>> configuration = Blip2Config() >>> # Initializing a Blip2ForConditionalGeneration (with random weights) from the Salesforce/blip2-opt-2.7b style configuration >>> model = Blip2ForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config >>> # We can also initialize a Blip2Config from a Blip2VisionConfig, Blip2QFormerConfig and any PretrainedConfig >>> # Initializing BLIP-2 vision, BLIP-2 Q-Former and language model configurations >>> vision_config = Blip2VisionConfig() >>> qformer_config = Blip2QFormerConfig() >>> text_config = OPTConfig() >>> config = Blip2Config.from_text_vision_configs(vision_config, qformer_config, text_config) ```''' def __init__(self, vision_config=None, qformer_config=None, text_config=None, num_query_tokens=32, image_text_hidden_size=256, image_token_index=None, **kwargs): pass @classmethod def from_vision_qformer_text_configs(cls, vision_config: Blip2VisionConfig, qformer_config: Blip2QFormerConfig, text_config: Optional[PretrainedConfig]=None, **kwargs): ''' Instantiate a [`Blip2Config`] (or a derived class) from a BLIP-2 vision model, Q-Former and language model configurations. Args: vision_config (`dict`): Dictionary of configuration options used to initialize [`Blip2VisionConfig`]. qformer_config (`dict`): Dictionary of configuration options used to initialize [`Blip2QFormerConfig`]. text_config (`dict`, *optional*): Dictionary of configuration options used to initialize any [`PretrainedConfig`]. Returns: [`Blip2Config`]: An instance of a configuration object ''' pass
4
2
33
4
22
7
4
1.19
1
3
2
0
1
9
2
2
127
22
48
31
29
57
27
15
24
5
1
1
7
980
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip_2/configuration_blip_2.py
transformers.models.blip_2.configuration_blip_2.Blip2QFormerConfig
from ...configuration_utils import PretrainedConfig class Blip2QFormerConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`Blip2QFormerModel`]. It is used to instantiate a BLIP-2 Querying Transformer (Q-Former) model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the BLIP-2 [Salesforce/blip2-opt-2.7b](https://huggingface.co/Salesforce/blip2-opt-2.7b) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Note that [`Blip2QFormerModel`] is very similar to [`BertLMHeadModel`] with interleaved cross-attention. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the Q-Former model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling the model. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. pad_token_id (`int`, *optional*, defaults to 0): Index to be used for padding token. position_embedding_type (`str`, *optional*, defaults to `"absolute"`): Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to [Self-Attention with Relative Position Representations (Shaw et al.)](https://huggingface.co/papers/1803.02155). For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models with Better Relative Position Embeddings (Huang et al.)](https://huggingface.co/papers/2009.13658). cross_attention_frequency (`int`, *optional*, defaults to 2): The frequency of adding cross-attention to the Transformer layers. encoder_hidden_size (`int`, *optional*, defaults to 1408): The hidden size of the hidden states for cross-attention. use_qformer_text_input (`bool`, *optional*, defaults to `False`): Whether to use BERT-style embeddings. Examples: ```python >>> from transformers import Blip2QFormerConfig, Blip2QFormerModel >>> # Initializing a BLIP-2 Salesforce/blip2-opt-2.7b style configuration >>> configuration = Blip2QFormerConfig() >>> # Initializing a model (with random weights) from the Salesforce/blip2-opt-2.7b style configuration >>> model = Blip2QFormerModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'blip_2_qformer' base_config_key = 'qformer_config' def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, position_embedding_type='absolute', cross_attention_frequency=2, encoder_hidden_size=1408, use_qformer_text_input=False, **kwargs): super().__init__(pad_token_id=pad_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.position_embedding_type = position_embedding_type self.cross_attention_frequency = cross_attention_frequency self.encoder_hidden_size = encoder_hidden_size self.use_qformer_text_input = use_qformer_text_input
class Blip2QFormerConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`Blip2QFormerModel`]. It is used to instantiate a BLIP-2 Querying Transformer (Q-Former) model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the BLIP-2 [Salesforce/blip2-opt-2.7b](https://huggingface.co/Salesforce/blip2-opt-2.7b) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Note that [`Blip2QFormerModel`] is very similar to [`BertLMHeadModel`] with interleaved cross-attention. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the Q-Former model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling the model. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. pad_token_id (`int`, *optional*, defaults to 0): Index to be used for padding token. position_embedding_type (`str`, *optional*, defaults to `"absolute"`): Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to [Self-Attention with Relative Position Representations (Shaw et al.)](https://huggingface.co/papers/1803.02155). For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models with Better Relative Position Embeddings (Huang et al.)](https://huggingface.co/papers/2009.13658). cross_attention_frequency (`int`, *optional*, defaults to 2): The frequency of adding cross-attention to the Transformer layers. encoder_hidden_size (`int`, *optional*, defaults to 1408): The hidden size of the hidden states for cross-attention. use_qformer_text_input (`bool`, *optional*, defaults to `False`): Whether to use BERT-style embeddings. Examples: ```python >>> from transformers import Blip2QFormerConfig, Blip2QFormerModel >>> # Initializing a BLIP-2 Salesforce/blip2-opt-2.7b style configuration >>> configuration = Blip2QFormerConfig() >>> # Initializing a model (with random weights) from the Salesforce/blip2-opt-2.7b style configuration >>> model = Blip2QFormerModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, position_embedding_type='absolute', cross_attention_frequency=2, encoder_hidden_size=1408, use_qformer_text_input=False, **kwargs): pass
2
1
37
1
36
0
1
1.44
1
1
0
0
1
15
1
1
104
9
39
38
18
56
20
19
18
1
1
0
1
981
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip_2/configuration_blip_2.py
transformers.models.blip_2.configuration_blip_2.Blip2VisionConfig
from ...configuration_utils import PretrainedConfig class Blip2VisionConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`Blip2VisionModel`]. It is used to instantiate a BLIP-2 vision encoder according to the specified arguments, defining the model architecture. Instantiating a configuration defaults will yield a similar configuration to that of the BLIP-2 [Salesforce/blip2-opt-2.7b](https://huggingface.co/Salesforce/blip2-opt-2.7b) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 1408): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 6144): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 39): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 14): The size (resolution) of each patch. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. qkv_bias (`bool`, *optional*, defaults to `True`): Whether to add a bias to the queries and values in the self-attention layers. Example: ```python >>> from transformers import Blip2VisionConfig, Blip2VisionModel >>> # Initializing a Blip2VisionConfig with Salesforce/blip2-opt-2.7b style configuration >>> configuration = Blip2VisionConfig() >>> # Initializing a Blip2VisionModel (with random weights) from the Salesforce/blip2-opt-2.7b style configuration >>> model = Blip2VisionModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'blip_2_vision_model' base_config_key = 'vision_config' def __init__(self, hidden_size=1408, intermediate_size=6144, num_hidden_layers=39, num_attention_heads=16, image_size=224, patch_size=14, hidden_act='gelu', layer_norm_eps=1e-06, attention_dropout=0.0, initializer_range=1e-10, qkv_bias=True, **kwargs): super().__init__(**kwargs) self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.patch_size = patch_size self.image_size = image_size self.initializer_range = initializer_range self.attention_dropout = attention_dropout self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.qkv_bias = qkv_bias
class Blip2VisionConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`Blip2VisionModel`]. It is used to instantiate a BLIP-2 vision encoder according to the specified arguments, defining the model architecture. Instantiating a configuration defaults will yield a similar configuration to that of the BLIP-2 [Salesforce/blip2-opt-2.7b](https://huggingface.co/Salesforce/blip2-opt-2.7b) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 1408): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 6144): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 39): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 14): The size (resolution) of each patch. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. qkv_bias (`bool`, *optional*, defaults to `True`): Whether to add a bias to the queries and values in the self-attention layers. Example: ```python >>> from transformers import Blip2VisionConfig, Blip2VisionModel >>> # Initializing a Blip2VisionConfig with Salesforce/blip2-opt-2.7b style configuration >>> configuration = Blip2VisionConfig() >>> # Initializing a Blip2VisionModel (with random weights) from the Salesforce/blip2-opt-2.7b style configuration >>> model = Blip2VisionModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, hidden_size=1408, intermediate_size=6144, num_hidden_layers=39, num_attention_heads=16, image_size=224, patch_size=14, hidden_act='gelu', layer_norm_eps=1e-06, attention_dropout=0.0, initializer_range=1e-10, qkv_bias=True, **kwargs): pass
2
1
28
1
27
0
1
1.33
1
1
0
0
1
11
1
1
80
10
30
29
14
40
16
15
14
1
1
0
1
982
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip_2/modeling_blip_2.py
transformers.models.blip_2.modeling_blip_2.Blip2Attention
from typing import Any, Callable, Optional, Union import torch from torch import nn from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel class Blip2Attention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).') self.scale = self.head_dim ** (-0.5) self.is_causal = False self.attention_dropout = config.attention_dropout self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim, bias=False) if config.qkv_bias: q_bias = nn.Parameter(torch.zeros(self.embed_dim)) v_bias = nn.Parameter(torch.zeros(self.embed_dim)) else: q_bias = None v_bias = None if q_bias is not None: qkv_bias = torch.cat((q_bias, torch.zeros_like(v_bias, requires_grad=False), v_bias)) self.qkv.bias = nn.Parameter(qkv_bias) self.projection = nn.Linear(self.embed_dim, self.embed_dim) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, **kwargs) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" bsz, tgt_len, embed_dim = hidden_states.size() mixed_qkv = self.qkv(hidden_states) mixed_qkv = mixed_qkv.reshape(bsz, tgt_len, 3, self.num_heads, embed_dim // self.num_heads).permute(2, 0, 3, 1, 4) query_states, key_states, value_states = (mixed_qkv[0], mixed_qkv[1], mixed_qkv[2]) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != 'eager': attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask=None, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scale, **kwargs) attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous() attn_output = self.projection(attn_output) return (attn_output, attn_weights)
class Blip2Attention(nn.Module): '''Multi-headed attention from 'Attention Is All You Need' paper''' def __init__(self, config): pass def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): pass def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, **kwargs) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: '''Input shape: Batch x Time x Channel''' pass
4
2
25
6
17
2
3
0.16
1
5
0
0
3
8
3
13
79
20
51
29
42
8
40
24
36
4
1
1
8
983
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip_2/modeling_blip_2.py
transformers.models.blip_2.modeling_blip_2.Blip2Encoder
from torch import nn from ...processing_utils import Unpack from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithPast, Seq2SeqLMOutput from .configuration_blip_2 import Blip2Config, Blip2QFormerConfig, Blip2VisionConfig import torch from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging, torch_int from typing import Any, Callable, Optional, Union class Blip2Encoder(nn.Module): """ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`Blip2EncoderLayer`]. Args: config (`Blip2Config`): The corresponding vision configuration for the `Blip2Encoder`. """ def __init__(self, config: Blip2Config): super().__init__() self.config = config self.layers = nn.ModuleList([Blip2EncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False @auto_docstring def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, BaseModelOutput]: hidden_states = inputs_embeds for encoder_layer in self.layers: hidden_states = encoder_layer(hidden_states, attention_mask=attention_mask, **kwargs) return BaseModelOutput(last_hidden_state=hidden_states)
class Blip2Encoder(nn.Module): ''' Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`Blip2EncoderLayer`]. Args: config (`Blip2Config`): The corresponding vision configuration for the `Blip2Encoder`. ''' def __init__(self, config: Blip2Config): pass @auto_docstring def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, BaseModelOutput]: pass
4
1
37
4
24
9
7
0.52
1
9
3
0
2
3
2
12
84
11
48
18
38
25
27
11
24
12
1
2
13
984
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip_2/modeling_blip_2.py
transformers.models.blip_2.modeling_blip_2.Blip2EncoderLayer
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging, torch_int from torch import nn from ...processing_utils import Unpack from .configuration_blip_2 import Blip2Config, Blip2QFormerConfig, Blip2VisionConfig import torch from ...modeling_layers import GradientCheckpointingLayer class Blip2EncoderLayer(GradientCheckpointingLayer): def __init__(self, config: Blip2Config): super().__init__() self.embed_dim = config.hidden_size self.self_attn = Blip2Attention(config) self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = Blip2MLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) @auto_docstring def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, **kwargs: Unpack[TransformersKwargs]) -> torch.FloatTensor: residual = hidden_states hidden_states = self.layer_norm1(hidden_states) hidden_states, _ = self.self_attn(hidden_states=hidden_states, head_mask=attention_mask, **kwargs) hidden_states = hidden_states + residual residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = hidden_states + residual return hidden_states
class Blip2EncoderLayer(GradientCheckpointingLayer): def __init__(self, config: Blip2Config): pass @auto_docstring def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, **kwargs: Unpack[TransformersKwargs]) -> torch.FloatTensor: pass
4
0
22
3
15
5
2
0.33
1
6
3
0
2
5
2
12
46
6
30
16
22
10
21
11
18
2
1
1
3
985
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip_2/modeling_blip_2.py
transformers.models.blip_2.modeling_blip_2.Blip2ForConditionalGeneration
from torch import nn from ...processing_utils import Unpack from .configuration_blip_2 import Blip2Config, Blip2QFormerConfig, Blip2VisionConfig import torch from ...generation import GenerationMixin from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging, torch_int from ..auto import AutoModelForCausalLM, AutoModelForSeq2SeqLM from typing import Any, Callable, Optional, Union from torch.nn import CrossEntropyLoss @auto_docstring(custom_intro='\n BLIP-2 Model for generating text given an image and an optional text prompt. The model consists of a vision\n encoder, Querying Transformer (Q-Former) and a language model.\n\n One can optionally pass `input_ids` to the model, which serve as a text prompt, to make the language model continue\n the prompt. Otherwise, the language model starts generating text from the [BOS] (beginning-of-sequence) token.\n\n <Tip>\n\n Note that Flan-T5 checkpoints cannot be cast to float16. They are pre-trained using bfloat16.\n\n </Tip>\n ') class Blip2ForConditionalGeneration(Blip2PreTrainedModel, GenerationMixin): config: Blip2Config main_input_name = 'pixel_values' _can_compile_fullgraph = True _keep_in_fp32_modules = ['query_tokens', 'qformer'] _supports_flash_attn = False def __init__(self, config: Blip2Config): super().__init__(config) self.vision_model = Blip2VisionModel._from_config(config.vision_config) self.query_tokens = nn.Parameter(torch.zeros(1, config.num_query_tokens, config.qformer_config.hidden_size)) self.qformer = Blip2QFormerModel._from_config(config.qformer_config) self.language_projection = nn.Linear(config.qformer_config.hidden_size, config.text_config.hidden_size) if config.use_decoder_only_language_model: language_model = AutoModelForCausalLM.from_config(config.text_config) else: language_model = AutoModelForSeq2SeqLM.from_config(config.text_config) if language_model._tied_weights_keys is not None: self._tied_weights_keys = [f'language_model.{k}' for k in language_model._tied_weights_keys] self.language_model = language_model self.post_init() def get_input_embeddings(self): return self.language_model.get_input_embeddings() def set_input_embeddings(self, value): self.language_model.set_input_embeddings(value) def set_output_embeddings(self, new_embeddings): self.language_model.set_output_embeddings(new_embeddings) def get_output_embeddings(self) -> nn.Module: return self.language_model.get_output_embeddings() def get_encoder(self): return self.language_model.get_encoder() def get_decoder(self): return self.language_model.get_decoder() def _tie_weights(self): if not self.config.use_decoder_only_language_model: self.language_model.encoder.embed_tokens = self.language_model.shared self.language_model.decoder.embed_tokens = self.language_model.shared def _preprocess_accelerate(self): """ Some pre-processing hacks to make the model `accelerate` compatible. Check https://github.com/huggingface/transformers/pull/21707 for more details. """ hf_device_map = self.hf_device_map if len(hf_device_map) > 1 and 'language_model' not in hf_device_map and (torch.cuda.device_count() > 1): logger.warning('The `language_model` is not in the `hf_device_map` dictionary and you are running your script in a multi-GPU environment. this may lead to unexpected behavior when using `accelerate`. Please pass a `device_map` that contains `language_model` to remove this warning. Please refer to https://github.com/huggingface/blog/blob/main/accelerate-large-models.md for more details on creating a `device_map` for large models.') if hasattr(self.language_model, '_hf_hook'): self.language_model._hf_hook.io_same_device = True def get_image_features(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding: Optional[bool]=False, return_dict: Optional[bool]=False): """ Encodes images into continuous embeddings that can be forwarded to the language model. Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`): The tensors corresponding to the input images. """ vision_outputs = self.vision_model(pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=True) image_embeds = vision_outputs[0] image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device) query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) query_outputs = self.qformer(query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, return_dict=True) query_output = query_outputs[0] if query_output.dtype != image_embeds.dtype: query_output = query_output.to(image_embeds.dtype) language_model_inputs = self.language_projection(query_output) if return_dict: return (language_model_inputs, vision_outputs, query_outputs) return language_model_inputs def get_placeholder_mask(self, input_ids: torch.LongTensor, inputs_embeds: torch.FloatTensor): """ Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`. """ if input_ids is None: special_image_mask = inputs_embeds == self.get_input_embeddings()(torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device)) special_image_mask = special_image_mask.all(-1) else: special_image_mask = input_ids == self.config.image_token_id special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device) return special_image_mask @can_return_tuple @auto_docstring def forward(self, pixel_values: torch.FloatTensor, input_ids: torch.LongTensor, attention_mask: Optional[torch.LongTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, interpolate_pos_encoding: bool=False, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, Blip2ForConditionalGenerationModelOutput]: """ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of input sequence tokens in the vocabulary of the language model. Input tokens can optionally be provided to serve as text prompt, which the language model can continue. Indices can be obtained using [`Blip2Processor`]. See [`Blip2Processor.__call__`] for details. [What are input IDs?](../glossary#input-ids) decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. Only relevant in case an encoder-decoder language model (like T5) is used. Examples: Prepare processor, model and image input ```python >>> from PIL import Image >>> import requests >>> from transformers import Blip2Processor, Blip2ForConditionalGeneration >>> import torch >>> device = "cuda" if torch.cuda.is_available() else "cpu" >>> processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b") >>> model = Blip2ForConditionalGeneration.from_pretrained( ... "Salesforce/blip2-opt-2.7b", load_in_8bit=True, device_map={"": 0}, dtype=torch.float16 ... ) # doctest: +IGNORE_RESULT >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) ``` Image captioning (without providing a text prompt): ```python >>> inputs = processor(images=image, return_tensors="pt").to(device, torch.float16) >>> generated_ids = model.generate(**inputs) >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip() >>> print(generated_text) two cats laying on a couch ``` Visual question answering (prompt = question): ```python >>> prompt = "Question: how many cats are there? Answer:" >>> inputs = processor(images=image, text=prompt, return_tensors="pt").to(device="cuda", dtype=torch.float16) >>> generated_ids = model.generate(**inputs) >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip() >>> print(generated_text) two ``` Note that int8 inference is also supported through [bitsandbytes](https://github.com/TimDettmers/bitsandbytes). This greatly reduces the amount of memory used by the model while maintaining the same performance. ```python >>> model = Blip2ForConditionalGeneration.from_pretrained( ... "Salesforce/blip2-opt-2.7b", load_in_8bit=True, device_map={"": 0}, dtype=torch.bfloat16 ... ) # doctest: +IGNORE_RESULT >>> inputs = processor(images=image, text=prompt, return_tensors="pt").to(device="cuda", dtype=torch.bfloat16) >>> generated_ids = model.generate(**inputs) >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip() >>> print(generated_text) two ```""" language_model_inputs, vision_outputs, query_outputs = self.get_image_features(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=True) if inputs_embeds is None: inputs_embeds = self.get_input_embeddings()(input_ids) if attention_mask is None: attention_mask = torch.ones_like(input_ids) language_model_inputs = language_model_inputs.to(inputs_embeds.device, inputs_embeds.dtype) special_image_mask = self.get_placeholder_mask(input_ids, inputs_embeds=inputs_embeds) inputs_embeds = inputs_embeds.to(language_model_inputs.device).masked_scatter(special_image_mask, language_model_inputs) if self.config.use_decoder_only_language_model: outputs = self.language_model(inputs_embeds=inputs_embeds, attention_mask=attention_mask, **kwargs) logits = outputs[0] loss = None if labels is not None: labels = labels.to(logits.device) logits = logits[:, -labels.size(1):, :] shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous().to(logits.device) loss_fct = CrossEntropyLoss(reduction='mean') loss = loss_fct(shift_logits.view(-1, self.config.text_config.vocab_size), shift_labels.view(-1)) else: kwargs['return_dict'] = True outputs = self.language_model(inputs_embeds=inputs_embeds, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, labels=labels, **kwargs) loss = outputs.loss logits = outputs.logits return Blip2ForConditionalGenerationModelOutput(loss=loss, logits=logits, vision_outputs=vision_outputs, qformer_outputs=query_outputs, language_model_outputs=outputs) @torch.no_grad() def generate(self, pixel_values: torch.FloatTensor, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, interpolate_pos_encoding: bool=False, **generate_kwargs) -> torch.LongTensor: """ Overrides `generate` function to be able to use the model as a conditional generator. Args: pixel_values (`torch.FloatTensor` of shape (batch_size, num_channels, height, width)): Input images to be processed. input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*): The sequence used as a prompt for the generation. attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*): Mask to avoid performing attention on padding token indices inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Embedded representation of the inputs. Should be float, not int tokens. interpolate_pos_encoding (`bool`, *optional*, defaults to `False`): Whether to interpolate the positional encoding of the image embeddings. Returns: captions (list): A list of strings of length batch_size * num_captions. """ if hasattr(self, 'hf_device_map'): self._preprocess_accelerate() batch_size = pixel_values.shape[0] image_embeds = self.vision_model(pixel_values, return_dict=True, interpolate_pos_encoding=interpolate_pos_encoding).last_hidden_state image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device) query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) query_outputs = self.qformer(query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, return_dict=True) query_output = query_outputs.last_hidden_state if query_output.dtype != image_embeds.dtype: query_output = query_output.to(image_embeds.dtype) language_model_inputs = self.language_projection(query_output) if inputs_embeds is None: if input_ids is None: image_tokens = [self.config.image_token_index] * self.config.num_query_tokens start_tokens = image_tokens + [self.config.text_config.bos_token_id] input_ids = torch.tensor([start_tokens], dtype=torch.long, device=image_embeds.device) input_ids = input_ids.repeat(batch_size, 1) inputs_embeds = self.get_input_embeddings()(input_ids) if attention_mask is None: attention_mask = torch.ones_like(input_ids) if input_ids is None: special_image_mask = inputs_embeds == self.get_input_embeddings()(torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device)) special_image_mask = special_image_mask.all(-1) else: special_image_mask = input_ids == self.config.image_token_id special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device) language_model_inputs = language_model_inputs.to(inputs_embeds.device, inputs_embeds.dtype) inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, language_model_inputs) inputs = {'inputs_embeds': inputs_embeds, 'attention_mask': attention_mask} if not self.language_model.config.is_encoder_decoder: inputs['input_ids'] = input_ids outputs = self.language_model.generate(**inputs, **generate_kwargs) return outputs
null
18
5
29
4
18
7
3
0.4
2
8
6
0
11
6
11
12
340
56
204
73
170
82
111
52
99
10
2
2
32
986
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip_2/modeling_blip_2.py
transformers.models.blip_2.modeling_blip_2.Blip2ForConditionalGenerationModelOutput
from dataclasses import dataclass from typing import Any, Callable, Optional, Union import torch from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging, torch_int @dataclass @auto_docstring(custom_intro='\n Class defining the outputs of [`Blip2ForConditionalGeneration`].\n ') class Blip2ForConditionalGenerationModelOutput(ModelOutput): """ loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): Language modeling loss from the language model. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head of the language model. vision_outputs (`BaseModelOutputWithPooling`): Outputs of the vision encoder. qformer_outputs (`BaseModelOutputWithPoolingAndCrossAttentions`): Outputs of the Q-Former (Querying Transformer). language_model_outputs (`CausalLMOutputWithPast` or `Seq2SeqLMOutput`): Outputs of the language model. """ loss: Optional[tuple[torch.FloatTensor]] = None logits: Optional[tuple[torch.FloatTensor]] = None vision_outputs: Optional[torch.FloatTensor] = None qformer_outputs: Optional[tuple[torch.FloatTensor]] = None language_model_outputs: Optional[tuple[torch.FloatTensor]] = None def to_tuple(self) -> tuple[Any]: return tuple((self[k] if k not in ['vision_outputs', 'qformer_outputs', 'language_model_outputs'] else getattr(self, k).to_tuple() for k in self.keys()))
@dataclass @auto_docstring(custom_intro='\n Class defining the outputs of [`Blip2ForConditionalGeneration`].\n ') class Blip2ForConditionalGenerationModelOutput(ModelOutput): ''' loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): Language modeling loss from the language model. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head of the language model. vision_outputs (`BaseModelOutputWithPooling`): Outputs of the vision encoder. qformer_outputs (`BaseModelOutputWithPoolingAndCrossAttentions`): Outputs of the Q-Former (Querying Transformer). language_model_outputs (`CausalLMOutputWithPast` or `Seq2SeqLMOutput`): Outputs of the language model. ''' def to_tuple(self) -> tuple[Any]: pass
4
1
7
0
7
0
2
1.08
1
2
0
0
1
0
1
1
30
3
13
7
11
14
8
7
6
2
1
0
2
987
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip_2/modeling_blip_2.py
transformers.models.blip_2.modeling_blip_2.Blip2ForImageTextRetrieval
from torch import nn from .configuration_blip_2 import Blip2Config, Blip2QFormerConfig, Blip2VisionConfig import torch from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging, torch_int from typing import Any, Callable, Optional, Union @auto_docstring(custom_intro='\n BLIP-2 Model with a vision and text projector, and a classification head on top. The model is used in the context\n of image-text retrieval. Given an image and a text, the model returns the probability of the text being relevant to\n the image.\n ') class Blip2ForImageTextRetrieval(Blip2PreTrainedModel): main_input_name = 'pixel_values' _keep_in_fp32_modules = ['query_tokens', 'qformer'] _supports_flash_attn = False def __init__(self, config: Blip2Config): super().__init__(config) self.vision_model = Blip2VisionModel._from_config(config.vision_config) self.query_tokens = nn.Parameter(torch.zeros(1, config.num_query_tokens, config.qformer_config.hidden_size)) self.embeddings = Blip2TextEmbeddings(config.qformer_config) self.qformer = Blip2QFormerModel._from_config(config.qformer_config) self.vision_projection = nn.Linear(config.qformer_config.hidden_size, config.image_text_hidden_size) self.text_projection = nn.Linear(config.qformer_config.hidden_size, config.image_text_hidden_size) self.itm_head = nn.Linear(config.qformer_config.hidden_size, 2) self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value @auto_docstring def forward(self, pixel_values: torch.FloatTensor, input_ids: torch.LongTensor, attention_mask: Optional[torch.LongTensor]=None, use_image_text_matching_head: Optional[bool]=False, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, Blip2ImageTextMatchingModelOutput]: """ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of input sequence tokens in the vocabulary of the language model. Input tokens can optionally be provided to serve as text prompt, which the language model can continue. Indices can be obtained using [`Blip2Processor`]. See [`Blip2Processor.__call__`] for details. [What are input IDs?](../glossary#input-ids) use_image_text_matching_head (`bool`, *optional*): Whether to return the Image-Text Matching or Contrastive scores. Examples: ```python >>> import torch >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, Blip2ForImageTextRetrieval >>> device = "cuda" if torch.cuda.is_available() else "cpu" >>> model = Blip2ForImageTextRetrieval.from_pretrained("Salesforce/blip2-itm-vit-g", dtype=torch.float16) >>> processor = AutoProcessor.from_pretrained("Salesforce/blip2-itm-vit-g") >>> model.to(device) # doctest: +IGNORE_RESULT >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> text = "two cats laying on a pink blanket" >>> inputs = processor(images=image, text=text, return_tensors="pt").to(device, torch.float16) >>> itm_out = model(**inputs, use_image_text_matching_head=True) >>> logits_per_image = torch.nn.functional.softmax(itm_out.logits_per_image, dim=1) >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities >>> print(f"{probs[0][0]:.1%} that image 0 is not '{text}'") 26.9% that image 0 is not 'two cats laying on a pink blanket' >>> print(f"{probs[0][1]:.1%} that image 0 is '{text}'") 73.0% that image 0 is 'two cats laying on a pink blanket' >>> texts = ["a photo of a cat", "a photo of a dog"] >>> inputs = processor(images=image, text=texts, return_tensors="pt").to(device, torch.float16) >>> itc_out = model(**inputs, use_image_text_matching_head=False) >>> logits_per_image = itc_out.logits_per_image # this is the image-text similarity score >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities >>> print(f"{probs[0][0]:.1%} that image 0 is '{texts[0]}'") 55.3% that image 0 is 'a photo of a cat' >>> print(f"{probs[0][1]:.1%} that image 0 is '{texts[1]}'") 44.7% that image 0 is 'a photo of a dog' ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) image_embeds = vision_outputs[0] image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device) if use_image_text_matching_head: query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) if self.config.image_token_index is not None: input_ids = input_ids[:, self.config.num_query_tokens:] else: query_attention_mask = torch.ones(query_tokens.size()[:-1], dtype=torch.long, device=query_tokens.device) attention_mask = torch.cat([query_attention_mask, attention_mask], dim=1) query_embeds = self.embeddings(input_ids=input_ids, query_embeds=query_tokens) text_outputs = self.qformer(query_embeds=query_embeds, query_length=query_tokens.shape[1], attention_mask=attention_mask, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, return_dict=return_dict) text_embeds = text_outputs[0] if not return_dict else text_outputs.last_hidden_state text_embeds = text_embeds.to(dtype=self.itm_head.weight.dtype) output = self.itm_head(text_embeds[:, :query_tokens.size(1), :]) logits_per_image = output.mean(dim=1) logits_per_text = logits_per_image.t() else: query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) query_outputs = self.qformer(query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, return_dict=return_dict) image_embeds = query_outputs[0] if not return_dict else query_outputs.last_hidden_state image_embeds = image_embeds.to(dtype=self.vision_projection.weight.dtype) if self.config.image_token_index is not None: input_ids = input_ids[:, self.config.num_query_tokens:] attention_mask = attention_mask[:, self.config.num_query_tokens:] query_embeds = self.embeddings(input_ids=input_ids) text_outputs = self.qformer(query_embeds=query_embeds, query_length=0, attention_mask=attention_mask, return_dict=return_dict) question_embeds = text_outputs[0] if not return_dict else text_outputs.last_hidden_state question_embeds = question_embeds.to(dtype=self.text_projection.weight.dtype) image_embeds = nn.functional.normalize(self.vision_projection(image_embeds), dim=-1) text_embeds = nn.functional.normalize(self.text_projection(question_embeds[:, 0, :]), dim=-1) logits_per_image = torch.matmul(image_embeds, text_embeds.t()) logits_per_image, _ = logits_per_image.max(dim=1) logits_per_text = logits_per_image.t() if not return_dict: output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) return output return Blip2ImageTextMatchingModelOutput(logits_per_image=logits_per_image, logits_per_text=logits_per_text, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs)
@auto_docstring(custom_intro='\n BLIP-2 Model with a vision and text projector, and a classification head on top. The model is used in the context\n of image-text retrieval. Given an image and a text, the model returns the probability of the text being relevant to\n the image.\n ') class Blip2ForImageTextRetrieval(Blip2PreTrainedModel): def __init__(self, config: Blip2Config): pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass @auto_docstring def forward(self, pixel_values: torch.FloatTensor, input_ids: torch.LongTensor, attention_mask: Optional[torch.LongTensor]=None, use_image_text_matching_head: Optional[bool]=False, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, Blip2ImageTextMatchingModelOutput]: ''' input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of input sequence tokens in the vocabulary of the language model. Input tokens can optionally be provided to serve as text prompt, which the language model can continue. Indices can be obtained using [`Blip2Processor`]. See [`Blip2Processor.__call__`] for details. [What are input IDs?](../glossary#input-ids) use_image_text_matching_head (`bool`, *optional*): Whether to return the Image-Text Matching or Contrastive scores. Examples: ```python >>> import torch >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, Blip2ForImageTextRetrieval >>> device = "cuda" if torch.cuda.is_available() else "cpu" >>> model = Blip2ForImageTextRetrieval.from_pretrained("Salesforce/blip2-itm-vit-g", dtype=torch.float16) >>> processor = AutoProcessor.from_pretrained("Salesforce/blip2-itm-vit-g") >>> model.to(device) # doctest: +IGNORE_RESULT >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> text = "two cats laying on a pink blanket" >>> inputs = processor(images=image, text=text, return_tensors="pt").to(device, torch.float16) >>> itm_out = model(**inputs, use_image_text_matching_head=True) >>> logits_per_image = torch.nn.functional.softmax(itm_out.logits_per_image, dim=1) >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities >>> print(f"{probs[0][0]:.1%} that image 0 is not '{text}'") 26.9% that image 0 is not 'two cats laying on a pink blanket' >>> print(f"{probs[0][1]:.1%} that image 0 is '{text}'") 73.0% that image 0 is 'two cats laying on a pink blanket' >>> texts = ["a photo of a cat", "a photo of a dog"] >>> inputs = processor(images=image, text=texts, return_tensors="pt").to(device, torch.float16) >>> itc_out = model(**inputs, use_image_text_matching_head=False) >>> logits_per_image = itc_out.logits_per_image # this is the image-text similarity score >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities >>> print(f"{probs[0][0]:.1%} that image 0 is '{texts[0]}'") 55.3% that image 0 is 'a photo of a cat' >>> print(f"{probs[0][1]:.1%} that image 0 is '{texts[1]}'") 44.7% that image 0 is 'a photo of a dog' ``` ''' pass
7
1
41
8
23
10
3
0.41
1
7
5
0
4
7
4
5
173
36
97
38
81
40
49
28
44
9
2
1
12
988
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip_2/modeling_blip_2.py
transformers.models.blip_2.modeling_blip_2.Blip2ImageTextMatchingModelOutput
from dataclasses import dataclass from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithPast, Seq2SeqLMOutput import torch from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging, torch_int from typing import Any, Callable, Optional, Union @dataclass @auto_docstring class Blip2ImageTextMatchingModelOutput(ModelOutput): """ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): Contrastive loss for image-text similarity. logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`): The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text similarity scores. logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`): The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image similarity scores. text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output. image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output. text_model_output (`BaseModelOutputWithPooling`): The output of the [`Blip2QFormerModel`]. vision_model_output (`BaseModelOutputWithPooling`): The output of the [`Blip2VisionModel`]. """ loss: Optional[torch.FloatTensor] = None logits_per_image: Optional[torch.FloatTensor] = None logits_per_text: Optional[torch.FloatTensor] = None text_embeds: Optional[torch.FloatTensor] = None image_embeds: Optional[torch.FloatTensor] = None text_model_output: BaseModelOutputWithPooling = None vision_model_output: BaseModelOutputWithPooling = None def to_tuple(self) -> tuple[Any]: return tuple((self[k] if k not in ['text_model_output', 'vision_model_output'] else getattr(self, k).to_tuple() for k in self.keys()))
@dataclass @auto_docstring class Blip2ImageTextMatchingModelOutput(ModelOutput): ''' loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): Contrastive loss for image-text similarity. logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`): The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text similarity scores. logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`): The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image similarity scores. text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output. image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output. text_model_output (`BaseModelOutputWithPooling`): The output of the [`Blip2QFormerModel`]. vision_model_output (`BaseModelOutputWithPooling`): The output of the [`Blip2VisionModel`]. ''' def to_tuple(self) -> tuple[Any]: pass
4
1
5
0
5
0
2
1.46
1
2
0
0
1
0
1
1
34
2
13
9
11
19
10
9
8
2
1
0
2
989
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip_2/modeling_blip_2.py
transformers.models.blip_2.modeling_blip_2.Blip2MLP
from ...activations import ACT2FN import torch from torch import nn class Blip2MLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states
class Blip2MLP(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass
3
0
6
0
6
0
1
0
1
2
0
0
2
4
2
12
13
1
12
7
9
0
12
7
9
1
1
0
2
990
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip_2/modeling_blip_2.py
transformers.models.blip_2.modeling_blip_2.Blip2Model
from torch import nn import warnings from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithPast, Seq2SeqLMOutput from ...processing_utils import Unpack from .configuration_blip_2 import Blip2Config, Blip2QFormerConfig, Blip2VisionConfig import torch from ..auto import AutoModelForCausalLM, AutoModelForSeq2SeqLM from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging, torch_int from typing import Any, Callable, Optional, Union from torch.nn import CrossEntropyLoss @auto_docstring(custom_intro='\n BLIP-2 Model for generating text and image features. The model consists of a vision encoder, Querying Transformer\n (Q-Former) and a language model.\n ') class Blip2Model(Blip2PreTrainedModel): config: Blip2Config main_input_name = 'pixel_values' _keep_in_fp32_modules = ['query_tokens', 'qformer'] _supports_flash_attn = False def __init__(self, config: Blip2Config): super().__init__(config) self.vision_model = Blip2VisionModel._from_config(config.vision_config) self.query_tokens = nn.Parameter(torch.zeros(1, config.num_query_tokens, config.qformer_config.hidden_size)) self.qformer = Blip2QFormerModel._from_config(config.qformer_config) self.language_projection = nn.Linear(config.qformer_config.hidden_size, config.text_config.hidden_size) if config.use_decoder_only_language_model: language_model = AutoModelForCausalLM.from_config(config.text_config) else: language_model = AutoModelForSeq2SeqLM.from_config(config.text_config) if language_model._tied_weights_keys is not None: self._tied_weights_keys = [f'language_model.{k}' for k in language_model._tied_weights_keys] self.language_model = language_model self.post_init() def get_input_embeddings(self): return self.language_model.get_input_embeddings() def set_input_embeddings(self, value): self.language_model.set_input_embeddings(value) def set_output_embeddings(self, new_embeddings): self.language_model.set_output_embeddings(new_embeddings) def get_output_embeddings(self) -> nn.Module: return self.language_model.get_output_embeddings() def get_encoder(self): return self.language_model.get_encoder() def get_decoder(self): return self.language_model.get_decoder() def _tie_weights(self): if not self.config.use_decoder_only_language_model: self.language_model.encoder.embed_tokens = self.language_model.shared self.language_model.decoder.embed_tokens = self.language_model.shared @filter_out_non_signature_kwargs() @auto_docstring def get_text_features(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.Tensor]=None, decoder_attention_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, legacy_output: bool=True) -> Union[torch.FloatTensor, CausalLMOutputWithPast]: """ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) T5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [T5 Training](./t5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. legacy_output (`bool`, *optional*, defaults to `True`): Whether to return a model output object or a tensor of features. Returns: text_outputs (`CausalLMOutputWithPast` or `torch.FloatTensor`): The language model outputs. If `legacy_output=False`, the output is a `torch.FloatTensor`. Examples: ```python >>> import torch >>> from transformers import AutoTokenizer, Blip2Model >>> model = Blip2Model.from_pretrained("Salesforce/blip2-opt-2.7b") >>> tokenizer = AutoTokenizer.from_pretrained("Salesforce/blip2-opt-2.7b") >>> inputs = tokenizer(["a photo of a cat"], padding=True, return_tensors="pt") >>> with torch.inference_mode(): ... text_features = model.get_text_features(**inputs) ```""" if legacy_output: warnings.warn('Deprecation notice: In Transformers v4.59, the default return value of `get_text_features` will change. Currently, this method returns a model output object, but starting in v4.59, it will return a tensor instead. To opt in to the new behavior now, set `legacy_output=False`.') if self.config.use_decoder_only_language_model: text_outputs: CausalLMOutputWithPast = self.language_model(input_ids=input_ids, attention_mask=attention_mask, return_dict=True) else: inputs_embeds = self.language_model.get_input_embeddings()(input_ids) text_outputs: Seq2SeqLMOutput = self.language_model(inputs_embeds=inputs_embeds, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, labels=labels, return_dict=True) return text_outputs if legacy_output else text_outputs.logits @filter_out_non_signature_kwargs() @auto_docstring def get_image_features(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding: bool=False, legacy_output: bool=True) -> Union[torch.FloatTensor, CausalLMOutputWithPast]: """ legacy_output (`bool`, *optional*, defaults to `True`): Whether to return a model output object or a tensor of features. Returns: vision_outputs (`BaseModelOutputWithPooling` or `torch.FloatTensor`): The vision model outputs. If `legacy_output=False`, the output is a `torch.FloatTensor`. Examples: ```python >>> import torch >>> from transformers import AutoProcessor, Blip2Model >>> from transformers.image_utils import load_image >>> model = Blip2Model.from_pretrained("Salesforce/blip2-opt-2.7b") >>> processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-2.7b") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = load_image(url) >>> inputs = processor(images=image, return_tensors="pt") >>> with torch.inference_mode(): ... image_outputs = model.get_image_features(**inputs) ```""" if legacy_output: warnings.warn('Deprecation notice: In Transformers v4.59, the default return value of `get_text_features` will change. Currently, this method returns a model output object, but starting in v4.59, it will return a tensor instead. To opt in to the new behavior now, set `legacy_output=False`.') vision_outputs = self.vision_model(pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=True) return vision_outputs if legacy_output else vision_outputs.pooler_output @filter_out_non_signature_kwargs() @auto_docstring def get_qformer_features(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding: bool=False, legacy_output: bool=True) -> Union[torch.FloatTensor, BaseModelOutputWithPooling]: """ legacy_output (`bool`, *optional*, defaults to `True`): Whether to return a model output object or a tensor of features. Returns: qformer_outputs (`BaseModelOutputWithPooling` or `torch.FloatTensor`): The Q-Former outputs. If `legacy_output=False`, the output is a `torch.FloatTensor`. Examples: ```python >>> import torch >>> from transformers import AutoProcessor, Blip2Model >>> from transformers.image_utils import load_image >>> processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b") >>> model = Blip2Model.from_pretrained("Salesforce/blip2-opt-2.7b") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = load_image(url) >>> inputs = processor(images=image, return_tensors="pt") >>> with torch.inference_mode(): ... qformer_outputs = model.get_qformer_features(**inputs) ```""" if legacy_output: warnings.warn('Deprecation notice: In Transformers v4.59, the default return value of `get_qformer_features` will change. Currently, this method returns a model output object, but starting in v4.59, it will return a tensor instead. To opt in to the new behavior now, set `legacy_output=False`.') vision_outputs: BaseModelOutputWithPooling = self.vision_model(pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=True) image_embeds = vision_outputs.last_hidden_state image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device) query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) query_outputs: BaseModelOutputWithPoolingAndCrossAttentions = self.qformer(query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, return_dict=True) return query_outputs if legacy_output else query_outputs.last_hidden_state def get_placeholder_mask(self, input_ids: torch.LongTensor, inputs_embeds: torch.FloatTensor): """ Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`. """ if input_ids is None: special_image_mask = inputs_embeds == self.get_input_embeddings()(torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device)) special_image_mask = special_image_mask.all(-1) else: special_image_mask = input_ids == self.config.image_token_id special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device) return special_image_mask @can_return_tuple @auto_docstring def forward(self, pixel_values: torch.FloatTensor, input_ids: torch.FloatTensor, attention_mask: Optional[torch.LongTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, labels: Optional[torch.LongTensor]=None, interpolate_pos_encoding: bool=False, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, Blip2ForConditionalGenerationModelOutput]: """ decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. Only relevant in case an encoder-decoder language model (like T5) is used. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import Blip2Processor, Blip2Model >>> import torch >>> device = "cuda" if torch.cuda.is_available() else "cpu" >>> processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b") >>> model = Blip2Model.from_pretrained("Salesforce/blip2-opt-2.7b", dtype=torch.float16) >>> model.to(device) # doctest: +IGNORE_RESULT >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> prompt = "Question: how many cats are there? Answer:" >>> inputs = processor(images=image, text=prompt, return_tensors="pt").to(device, torch.float16) >>> outputs = model(**inputs) ```""" vision_outputs = self.vision_model(pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, **kwargs) image_embeds = vision_outputs[0] image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device) query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) query_outputs = self.qformer(query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, **kwargs) query_output = query_outputs[0] if query_output.dtype != image_embeds.dtype: query_output = query_output.to(image_embeds.dtype) language_model_inputs = self.language_projection(query_output) inputs_embeds = self.language_model.get_input_embeddings()(input_ids) if attention_mask is None: attention_mask = torch.ones_like(input_ids) language_model_inputs = language_model_inputs.to(inputs_embeds.device, inputs_embeds.dtype) special_image_mask = self.get_placeholder_mask(input_ids, inputs_embeds=inputs_embeds) inputs_embeds = inputs_embeds.to(language_model_inputs.device).masked_scatter(special_image_mask, language_model_inputs) if self.config.use_decoder_only_language_model: outputs = self.language_model(inputs_embeds=inputs_embeds, attention_mask=attention_mask, **kwargs) logits = outputs[0] loss = None if labels is not None: labels = labels.to(logits.device) logits = logits[:, -labels.size(1):, :] shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous().to(logits.device) loss_fct = CrossEntropyLoss(reduction='mean') loss = loss_fct(shift_logits.view(-1, self.config.text_config.vocab_size), shift_labels.view(-1)) else: outputs = self.language_model(inputs_embeds=inputs_embeds, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, labels=labels, return_dict=True, **kwargs) loss = outputs.loss logits = outputs.logits return Blip2ForConditionalGenerationModelOutput(loss=loss, logits=logits, vision_outputs=vision_outputs, qformer_outputs=query_outputs, language_model_outputs=outputs)
@auto_docstring(custom_intro='\n BLIP-2 Model for generating text and image features. The model consists of a vision encoder, Querying Transformer\n (Q-Former) and a language model.\n ') class Blip2Model(Blip2PreTrainedModel): def __init__(self, config: Blip2Config): pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass def set_output_embeddings(self, new_embeddings): pass def get_output_embeddings(self) -> nn.Module: pass def get_encoder(self): pass def get_decoder(self): pass def _tie_weights(self): pass @filter_out_non_signature_kwargs() @auto_docstring def get_text_features(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.Tensor]=None, decoder_attention_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, legacy_output: bool=True) -> Union[torch.FloatTensor, CausalLMOutputWithPast]: ''' decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) T5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [T5 Training](./t5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. legacy_output (`bool`, *optional*, defaults to `True`): Whether to return a model output object or a tensor of features. Returns: text_outputs (`CausalLMOutputWithPast` or `torch.FloatTensor`): The language model outputs. If `legacy_output=False`, the output is a `torch.FloatTensor`. Examples: ```python >>> import torch >>> from transformers import AutoTokenizer, Blip2Model >>> model = Blip2Model.from_pretrained("Salesforce/blip2-opt-2.7b") >>> tokenizer = AutoTokenizer.from_pretrained("Salesforce/blip2-opt-2.7b") >>> inputs = tokenizer(["a photo of a cat"], padding=True, return_tensors="pt") >>> with torch.inference_mode(): ... text_features = model.get_text_features(**inputs) ```''' pass @filter_out_non_signature_kwargs() @auto_docstring def get_image_features(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding: bool=False, legacy_output: bool=True) -> Union[torch.FloatTensor, CausalLMOutputWithPast]: ''' legacy_output (`bool`, *optional*, defaults to `True`): Whether to return a model output object or a tensor of features. Returns: vision_outputs (`BaseModelOutputWithPooling` or `torch.FloatTensor`): The vision model outputs. If `legacy_output=False`, the output is a `torch.FloatTensor`. Examples: ```python >>> import torch >>> from transformers import AutoProcessor, Blip2Model >>> from transformers.image_utils import load_image >>> model = Blip2Model.from_pretrained("Salesforce/blip2-opt-2.7b") >>> processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-2.7b") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = load_image(url) >>> inputs = processor(images=image, return_tensors="pt") >>> with torch.inference_mode(): ... image_outputs = model.get_image_features(**inputs) ```''' pass @filter_out_non_signature_kwargs() @auto_docstring def get_qformer_features(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding: bool=False, legacy_output: bool=True) -> Union[torch.FloatTensor, BaseModelOutputWithPooling]: ''' legacy_output (`bool`, *optional*, defaults to `True`): Whether to return a model output object or a tensor of features. Returns: qformer_outputs (`BaseModelOutputWithPooling` or `torch.FloatTensor`): The Q-Former outputs. If `legacy_output=False`, the output is a `torch.FloatTensor`. Examples: ```python >>> import torch >>> from transformers import AutoProcessor, Blip2Model >>> from transformers.image_utils import load_image >>> processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b") >>> model = Blip2Model.from_pretrained("Salesforce/blip2-opt-2.7b") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = load_image(url) >>> inputs = processor(images=image, return_tensors="pt") >>> with torch.inference_mode(): ... qformer_outputs = model.get_qformer_features(**inputs) ```''' pass def get_placeholder_mask(self, input_ids: torch.LongTensor, inputs_embeds: torch.FloatTensor): ''' Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`. ''' pass @can_return_tuple @auto_docstring def forward(self, pixel_values: torch.FloatTensor, input_ids: torch.FloatTensor, attention_mask: Optional[torch.LongTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, labels: Optional[torch.LongTensor]=None, interpolate_pos_encoding: bool=False, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, Blip2ForConditionalGenerationModelOutput]: ''' decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. Only relevant in case an encoder-decoder language model (like T5) is used. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import Blip2Processor, Blip2Model >>> import torch >>> device = "cuda" if torch.cuda.is_available() else "cpu" >>> processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b") >>> model = Blip2Model.from_pretrained("Salesforce/blip2-opt-2.7b", dtype=torch.float16) >>> model.to(device) # doctest: +IGNORE_RESULT >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> prompt = "Question: how many cats are there? Answer:" >>> inputs = processor(images=image, text=prompt, return_tensors="pt").to(device, torch.float16) >>> outputs = model(**inputs) ```''' pass
23
5
27
3
17
7
3
0.39
1
9
6
0
12
6
12
13
344
51
212
87
158
82
92
47
79
9
2
2
33
991
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip_2/modeling_blip_2.py
transformers.models.blip_2.modeling_blip_2.Blip2PreTrainedModel
from .configuration_blip_2 import Blip2Config, Blip2QFormerConfig, Blip2VisionConfig from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging, torch_int from torch import nn from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel @auto_docstring class Blip2PreTrainedModel(PreTrainedModel): config: Blip2Config base_model_prefix = 'blip' supports_gradient_checkpointing = True _supports_attention_backend = True _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = True _no_split_modules = ['Blip2Attention', 'Blip2QFormerMultiHeadAttention', 'Blip2EncoderLayer', 'Blip2TextEmbeddings', 'T5Block', 'OPTDecoderLayer'] _skip_keys_device_placement = 'past_key_values' def _init_weights(self, module): """Initialize the weights""" factor = self.config.initializer_range if isinstance(module, (nn.Linear, nn.Conv2d)): module.weight.data.normal_(mean=0.0, std=factor) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=factor) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, Blip2VisionEmbeddings): nn.init.trunc_normal_(module.position_embedding, mean=0.0, std=factor) nn.init.trunc_normal_(module.class_embedding, mean=0.0, std=factor) elif isinstance(module, (Blip2Model, Blip2TextModelWithProjection, Blip2VisionModelWithProjection, Blip2ForConditionalGeneration, Blip2ForImageTextRetrieval)): module.query_tokens.data.zero_()
@auto_docstring class Blip2PreTrainedModel(PreTrainedModel): def _init_weights(self, module): '''Initialize the weights''' pass
3
1
19
2
16
1
7
0.17
1
2
2
7
1
0
1
1
39
5
29
9
27
5
21
9
19
7
1
2
7
992
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip_2/modeling_blip_2.py
transformers.models.blip_2.modeling_blip_2.Blip2QFormerAttention
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from torch import nn from ...processing_utils import Unpack import torch from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging, torch_int from typing import Any, Callable, Optional, Union class Blip2QFormerAttention(nn.Module): def __init__(self, config, is_cross_attention=False): super().__init__() self.attention = Blip2QFormerMultiHeadAttention(config, is_cross_attention) self.output = Blip2QFormerSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices(heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads) self.attention.query = prune_linear_layer(self.attention.query, index) self.attention.key = prune_linear_layer(self.attention.key, index) self.attention.value = prune_linear_layer(self.attention.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads) self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> torch.Tensor: attn_output, _ = self.attention(hidden_states=hidden_states, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, **kwargs) attention_output = self.output(attn_output, hidden_states) return attention_output
class Blip2QFormerAttention(nn.Module): def __init__(self, config, is_cross_attention=False): pass def prune_heads(self, heads): pass def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> torch.Tensor: pass
4
0
15
1
13
1
1
0.07
1
6
2
0
3
3
3
13
47
4
41
20
28
3
22
11
18
2
1
1
4
993
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip_2/modeling_blip_2.py
transformers.models.blip_2.modeling_blip_2.Blip2QFormerEncoder
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging, torch_int from torch import nn from ...processing_utils import Unpack from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithPast, Seq2SeqLMOutput class Blip2QFormerEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([Blip2QFormerLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]) self.gradient_checkpointing = False @can_return_tuple def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, query_length=0, **kwargs: Unpack[TransformersKwargs]): for i in range(self.config.num_hidden_layers): layer_module = self.layer[i] layer_head_mask = head_mask[i] if head_mask is not None else None hidden_states = layer_module(hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, query_length=query_length, **kwargs) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states)
class Blip2QFormerEncoder(nn.Module): def __init__(self, config): pass @can_return_tuple def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, query_length=0, **kwargs: Unpack[TransformersKwargs]): pass
4
0
46
4
42
0
9
0
1
5
2
0
2
3
2
12
93
8
85
28
69
0
35
15
32
16
1
3
17
994
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip_2/modeling_blip_2.py
transformers.models.blip_2.modeling_blip_2.Blip2QFormerIntermediate
from ...activations import ACT2FN import torch from torch import nn class Blip2QFormerIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states
class Blip2QFormerIntermediate(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass
3
0
6
0
6
0
2
0
1
3
0
0
2
2
2
12
13
1
12
5
9
0
11
5
8
2
1
1
3
995
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip_2/modeling_blip_2.py
transformers.models.blip_2.modeling_blip_2.Blip2QFormerLayer
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...processing_utils import Unpack import torch from ...modeling_layers import GradientCheckpointingLayer from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging, torch_int class Blip2QFormerLayer(GradientCheckpointingLayer): def __init__(self, config, layer_idx): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = Blip2QFormerAttention(config) self.layer_idx = layer_idx if layer_idx % config.cross_attention_frequency == 0: self.crossattention = Blip2QFormerAttention(config, is_cross_attention=True) self.has_cross_attention = True else: self.has_cross_attention = False if config.use_qformer_text_input: self.intermediate = Blip2QFormerIntermediate(config) self.output = Blip2QFormerOutput(config) self.intermediate_query = Blip2QFormerIntermediate(config) self.output_query = Blip2QFormerOutput(config) def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, query_length=0, **kwargs: Unpack[TransformersKwargs]): attention_output = self.attention(hidden_states=hidden_states, attention_mask=attention_mask, head_mask=head_mask, **kwargs) if query_length > 0: query_attention_output = attention_output[:, :query_length, :] if self.has_cross_attention: if encoder_hidden_states is None: raise ValueError('encoder_hidden_states must be given for cross-attention layers') query_attention_output = self.crossattention(hidden_states=query_attention_output, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, **kwargs) layer_output = apply_chunking_to_forward(self.feed_forward_chunk_query, self.chunk_size_feed_forward, self.seq_len_dim, query_attention_output) if attention_output.shape[1] > query_length: layer_output_text = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output[:, query_length:, :]) layer_output = torch.cat([layer_output, layer_output_text], dim=1) else: layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output) return layer_output def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output def feed_forward_chunk_query(self, attention_output): intermediate_output = self.intermediate_query(attention_output) layer_output = self.output_query(intermediate_output, attention_output) return layer_output
class Blip2QFormerLayer(GradientCheckpointingLayer): def __init__(self, config, layer_idx): pass def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, query_length=0, **kwargs: Unpack[TransformersKwargs]): pass def feed_forward_chunk(self, attention_output): pass def feed_forward_chunk_query(self, attention_output): pass
5
0
25
3
21
1
3
0.02
1
5
3
0
4
10
4
14
102
14
86
38
71
2
46
28
41
6
1
3
11
996
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip_2/modeling_blip_2.py
transformers.models.blip_2.modeling_blip_2.Blip2QFormerModel
from torch import nn from ...processing_utils import Unpack from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithPast, Seq2SeqLMOutput from .configuration_blip_2 import Blip2Config, Blip2QFormerConfig, Blip2VisionConfig import torch from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging, torch_int from typing import Any, Callable, Optional, Union from ...utils.generic import OutputRecorder, check_model_inputs @auto_docstring(custom_intro='\n BLIP-2 Querying Transformer (Q-Former).\n ') class Blip2QFormerModel(Blip2PreTrainedModel): _supports_attention_backend = False _supports_flash_attn = False _supports_sdpa = False _supports_flex_attn = False _can_record_outputs = {'hidden_states': Blip2QFormerLayer, 'attentions': [OutputRecorder(Blip2QFormerMultiHeadAttention, index=1, layer_name='.attention')], 'cross_attentions': [OutputRecorder(Blip2QFormerMultiHeadAttention, index=1, layer_name='.crossattention')]} def __init__(self, config: Blip2QFormerConfig): super().__init__(config) self.config = config self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.encoder = Blip2QFormerEncoder(config) self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def get_extended_attention_mask(self, attention_mask: torch.Tensor, input_shape: tuple[int], device: torch.device, has_query: bool=False) -> torch.Tensor: """ Makes broadcastable attention and causal masks so that future and masked tokens are ignored. Arguments: attention_mask (`torch.Tensor`): Mask with ones indicating tokens to attend to, zeros for tokens to ignore. input_shape (`tuple[int]`): The shape of the input to the model. device (`torch.device`): The device of the input to the model. Returns: `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`. """ if attention_mask.dim() == 3: extended_attention_mask = attention_mask[:, None, :, :] elif attention_mask.dim() == 2: extended_attention_mask = attention_mask[:, None, None, :] else: raise ValueError(f'Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})') extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 return extended_attention_mask @check_model_inputs @auto_docstring def forward(self, query_embeds: torch.FloatTensor, query_length: Optional[int]=None, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: """ query_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Hidden states to be used in the attention computation. If cross-attention, will be used for the query (i.e., key and value will use the encoder_hidden_states). query_length (`int`, *optional*): Length of the query, usually based on the number of query tokens. If no value is provided, query_length will be inferred by the query_embeds. """ query_length = query_length if query_length is not None else query_embeds.shape[1] if query_embeds is not None else 0 query_embeds = query_embeds.to(self.layernorm.weight.dtype) embedding_output = self.layernorm(query_embeds) embedding_output = self.dropout(embedding_output) input_shape = embedding_output.size()[:-1] batch_size, seq_length = input_shape device = embedding_output.device if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length), device=device) extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, device) if encoder_hidden_states is not None: if encoder_hidden_states.dtype != query_embeds.dtype: encoder_hidden_states = encoder_hidden_states.to(query_embeds.dtype) if isinstance(encoder_hidden_states, list): encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size() else: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if isinstance(encoder_attention_mask, list): encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask] elif encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) encoder_outputs: BaseModelOutput = self.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, query_length=query_length, **kwargs) sequence_output = encoder_outputs.last_hidden_state pooled_output = sequence_output[:, 0, :] return BaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=sequence_output, pooler_output=pooled_output)
@auto_docstring(custom_intro='\n BLIP-2 Querying Transformer (Q-Former).\n ') class Blip2QFormerModel(Blip2PreTrainedModel): def __init__(self, config: Blip2QFormerConfig): pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass def _prune_heads(self, heads_to_prune): ''' Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel ''' pass def get_extended_attention_mask(self, attention_mask: torch.Tensor, input_shape: tuple[int], device: torch.device, has_query: bool=False) -> torch.Tensor: ''' Makes broadcastable attention and causal masks so that future and masked tokens are ignored. Arguments: attention_mask (`torch.Tensor`): Mask with ones indicating tokens to attend to, zeros for tokens to ignore. input_shape (`tuple[int]`): The shape of the input to the model. device (`torch.device`): The device of the input to the model. Returns: `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`. ''' pass @check_model_inputs @auto_docstring def forward(self, query_embeds: torch.FloatTensor, query_length: Optional[int]=None, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: ''' query_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Hidden states to be used in the attention computation. If cross-attention, will be used for the query (i.e., key and value will use the encoder_hidden_states). query_length (`int`, *optional*): Length of the query, usually based on the number of query tokens. If no value is provided, query_length will be inferred by the query_embeds. ''' pass
10
3
30
3
18
9
4
0.53
1
9
3
0
6
4
6
7
190
24
109
44
83
58
55
25
48
13
2
2
21
997
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip_2/modeling_blip_2.py
transformers.models.blip_2.modeling_blip_2.Blip2QFormerMultiHeadAttention
from torch import nn from ...processing_utils import Unpack import torch import math from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging, torch_int class Blip2QFormerMultiHeadAttention(nn.Module): def __init__(self, config, is_cross_attention=False): super().__init__() self.config = config if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')): raise ValueError('The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) if is_cross_attention: self.key = nn.Linear(config.encoder_hidden_size, self.all_head_size) self.value = nn.Linear(config.encoder_hidden_size, self.all_head_size) else: self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.save_attention = False def save_attn_gradients(self, attn_gradients): self.attn_gradients = attn_gradients def get_attn_gradients(self): return self.attn_gradients def save_attention_map(self, attention_map): self.attention_map = attention_map def get_attention_map(self): return self.attention_map def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, **kwargs: Unpack[TransformersKwargs]): is_cross_attention = encoder_hidden_states is not None if is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) mixed_query_layer = self.query(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': seq_length = hidden_states.size()[1] position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) if self.position_embedding_type == 'relative_key': relative_position_scores = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == 'relative_key_query': relative_position_scores_query = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores) if is_cross_attention and self.save_attention: self.save_attention_map(attention_probs) attention_probs.register_hook(self.save_attn_gradients) attention_probs_dropped = self.dropout(attention_probs) if head_mask is not None: attention_probs_dropped = attention_probs_dropped * head_mask context_layer = torch.matmul(attention_probs_dropped, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return (context_layer, attention_probs)
class Blip2QFormerMultiHeadAttention(nn.Module): def __init__(self, config, is_cross_attention=False): pass def save_attn_gradients(self, attn_gradients): pass def get_attn_gradients(self): pass def save_attention_map(self, attention_map): pass def get_attention_map(self): pass def transpose_for_scores(self, x): pass def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, **kwargs: Unpack[TransformersKwargs]): pass
8
0
18
3
13
1
3
0.11
1
3
0
0
7
14
7
17
130
26
95
51
78
10
79
42
71
10
1
2
19
998
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip_2/modeling_blip_2.py
transformers.models.blip_2.modeling_blip_2.Blip2QFormerOutput
import torch from torch import nn class Blip2QFormerOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states
class Blip2QFormerOutput(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: pass
3
0
5
0
5
0
1
0
1
2
0
0
2
3
2
12
12
1
11
6
8
0
11
6
8
1
1
0
2
999
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip_2/modeling_blip_2.py
transformers.models.blip_2.modeling_blip_2.Blip2QFormerSelfOutput
import torch from torch import nn class Blip2QFormerSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states
class Blip2QFormerSelfOutput(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: pass
3
0
5
0
5
0
1
0
1
2
0
0
2
3
2
12
12
1
11
6
8
0
11
6
8
1
1
0
2