id
int64 0
328k
| repository_name
stringlengths 7
58
| file_path
stringlengths 9
302
| class_name
stringlengths 5
256
| human_written_code
stringlengths 16
2.16M
| class_skeleton
stringlengths 18
1.49M
⌀ | total_program_units
int64 1
1.76k
| total_doc_str
int64 0
771
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
297
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
168
| CountClassBase
float64 0
40
| CountClassCoupled
float64 0
583
| CountClassCoupledModified
float64 0
575
| CountClassDerived
float64 0
5.35k
| CountDeclInstanceMethod
float64 0
529
| CountDeclInstanceVariable
float64 0
296
| CountDeclMethod
float64 0
599
| CountDeclMethodAll
float64 0
1.12k
| CountLine
float64 1
40.4k
| CountLineBlank
float64 0
8.16k
| CountLineCode
float64 1
25.7k
| CountLineCodeDecl
float64 1
8.15k
| CountLineCodeExe
float64 0
24.2k
| CountLineComment
float64 0
16.5k
| CountStmt
float64 1
9.71k
| CountStmtDecl
float64 1
8.15k
| CountStmtExe
float64 0
9.69k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
2.9k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1,500
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_audio.py
|
transformers.models.data2vec.modeling_data2vec_audio.Data2VecAudioEncoder
|
from typing import Callable, Optional, Union
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa
import torch
from ...modeling_outputs import BaseModelOutput, CausalLMOutput, SequenceClassifierOutput, TokenClassifierOutput, Wav2Vec2BaseModelOutput, XVectorOutput
from ...integrations.deepspeed import is_deepspeed_zero3_enabled
from ...integrations.fsdp import is_fsdp_managed_module
from torch import nn
class Data2VecAudioEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.pos_conv_embed = Data2VecAudioPositionalConvEmbedding(config)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layers = nn.ModuleList([Data2VecAudioEncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(self, hidden_states: torch.tensor, attention_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if attention_mask is not None:
expand_attention_mask = attention_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2])
hidden_states[~expand_attention_mask] = 0
attention_mask = self._update_full_mask(attention_mask, hidden_states)
position_embeddings = self.pos_conv_embed(hidden_states)
hidden_states = hidden_states + position_embeddings
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states)
synced_gpus = is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self)
for layer in self.layers:
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
dropout_probability = torch.rand([])
skip_the_layer = self.training and dropout_probability < self.config.layerdrop
if not skip_the_layer or synced_gpus:
layer_outputs = layer(hidden_states, attention_mask=attention_mask, output_attentions=output_attentions)
hidden_states = layer_outputs[0]
if skip_the_layer:
layer_outputs = (None, None)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None))
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions)
def _update_full_mask(self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor):
if attention_mask is not None:
if 'flash' in self.config._attn_implementation:
attention_mask = attention_mask if 0 in attention_mask else None
elif self.config._attn_implementation == 'sdpa':
attention_mask = _prepare_4d_attention_mask_for_sdpa(attention_mask, inputs_embeds.dtype)
elif self.config._attn_implementation == 'flex_attention':
if isinstance(attention_mask, torch.Tensor):
attention_mask = make_flex_block_causal_mask(attention_mask, is_causal=False)
else:
attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
return attention_mask
|
class Data2VecAudioEncoder(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.tensor, attention_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True):
pass
def _update_full_mask(self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor):
pass
| 4
| 0
| 41
| 5
| 33
| 3
| 8
| 0.07
| 1
| 8
| 3
| 0
| 2
| 7
| 2
| 12
| 83
| 11
| 67
| 26
| 57
| 5
| 45
| 19
| 42
| 15
| 1
| 3
| 16
|
1,501
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_audio.py
|
transformers.models.data2vec.modeling_data2vec_audio.Data2VecAudioEncoderLayer
|
from ...modeling_layers import GradientCheckpointingLayer
from torch import nn
class Data2VecAudioEncoderLayer(GradientCheckpointingLayer):
def __init__(self, config):
super().__init__()
self.attention = Data2VecAudioAttention(embed_dim=config.hidden_size, num_heads=config.num_attention_heads, dropout=config.attention_dropout, is_decoder=False, config=config)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.feed_forward = Data2VecAudioFeedForward(config)
self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states, attention_mask=None, output_attentions=False):
attn_residual = hidden_states
hidden_states, attn_weights, _ = self.attention(hidden_states, attention_mask=attention_mask, output_attentions=output_attentions)
hidden_states = self.dropout(hidden_states)
hidden_states = attn_residual + hidden_states
hidden_states = self.layer_norm(hidden_states)
hidden_states = hidden_states + self.feed_forward(hidden_states)
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
|
class Data2VecAudioEncoderLayer(GradientCheckpointingLayer):
def __init__(self, config):
pass
def forward(self, hidden_states, attention_mask=None, output_attentions=False):
pass
| 3
| 0
| 16
| 3
| 13
| 0
| 2
| 0
| 1
| 2
| 1
| 0
| 2
| 5
| 2
| 12
| 33
| 6
| 27
| 11
| 24
| 0
| 20
| 11
| 17
| 2
| 1
| 1
| 3
|
1,502
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_audio.py
|
transformers.models.data2vec.modeling_data2vec_audio.Data2VecAudioFeatureEncoder
|
from torch import nn
class Data2VecAudioFeatureEncoder(nn.Module):
"""Construct the features from raw audio waveform"""
def __init__(self, config):
super().__init__()
self.conv_layers = nn.ModuleList([Data2VecAudioConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers)])
self.gradient_checkpointing = False
self._requires_grad = True
def _freeze_parameters(self):
for param in self.parameters():
param.requires_grad = False
self._requires_grad = False
def forward(self, input_values):
hidden_states = input_values[:, None]
if self._requires_grad and self.training:
hidden_states.requires_grad = True
for conv_layer in self.conv_layers:
hidden_states = conv_layer(hidden_states)
return hidden_states
|
class Data2VecAudioFeatureEncoder(nn.Module):
'''Construct the features from raw audio waveform'''
def __init__(self, config):
pass
def _freeze_parameters(self):
pass
def forward(self, input_values):
pass
| 4
| 1
| 9
| 1
| 8
| 0
| 2
| 0.16
| 1
| 3
| 1
| 0
| 3
| 3
| 3
| 13
| 35
| 6
| 25
| 10
| 21
| 4
| 19
| 10
| 15
| 4
| 1
| 2
| 7
|
1,503
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_audio.py
|
transformers.models.data2vec.modeling_data2vec_audio.Data2VecAudioFeatureProjection
|
from torch import nn
class Data2VecAudioFeatureProjection(nn.Module):
def __init__(self, config):
super().__init__()
self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps)
self.projection = nn.Linear(config.conv_dim[-1], config.hidden_size)
self.dropout = nn.Dropout(config.feat_proj_dropout)
def forward(self, hidden_states):
norm_hidden_states = self.layer_norm(hidden_states)
hidden_states = self.projection(norm_hidden_states)
hidden_states = self.dropout(hidden_states)
return (hidden_states, norm_hidden_states)
|
class Data2VecAudioFeatureProjection(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 6
| 0
| 5
| 1
| 1
| 0.09
| 1
| 1
| 0
| 0
| 2
| 3
| 2
| 12
| 13
| 1
| 11
| 7
| 8
| 1
| 11
| 7
| 8
| 1
| 1
| 0
| 2
|
1,504
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_audio.py
|
transformers.models.data2vec.modeling_data2vec_audio.Data2VecAudioFeedForward
|
from torch import nn
from ...activations import ACT2FN
class Data2VecAudioFeedForward(nn.Module):
def __init__(self, config):
super().__init__()
self.intermediate_dropout = nn.Dropout(config.activation_dropout)
self.intermediate_dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
self.output_dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.output_dropout = nn.Dropout(config.hidden_dropout)
def forward(self, hidden_states):
hidden_states = self.intermediate_dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
hidden_states = self.intermediate_dropout(hidden_states)
hidden_states = self.output_dense(hidden_states)
hidden_states = self.output_dropout(hidden_states)
return hidden_states
|
class Data2VecAudioFeedForward(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 10
| 2
| 9
| 0
| 2
| 0
| 1
| 2
| 0
| 0
| 2
| 5
| 2
| 12
| 22
| 4
| 18
| 8
| 15
| 0
| 17
| 8
| 14
| 2
| 1
| 1
| 3
|
1,505
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_audio.py
|
transformers.models.data2vec.modeling_data2vec_audio.Data2VecAudioForAudioFrameClassification
|
from torch.nn import CrossEntropyLoss
from ...modeling_outputs import BaseModelOutput, CausalLMOutput, SequenceClassifierOutput, TokenClassifierOutput, Wav2Vec2BaseModelOutput, XVectorOutput
from torch import nn
import warnings
from ...utils import auto_docstring, is_peft_available, is_torch_flex_attn_available
import torch
from typing import Callable, Optional, Union
@auto_docstring
class Data2VecAudioForAudioFrameClassification(Data2VecAudioPreTrainedModel):
def __init__(self, config):
super().__init__(config)
if hasattr(config, 'add_adapter') and config.add_adapter:
raise ValueError('Audio frame classification does not support the use of Data2VecAudio adapters (config.add_adapter=True)')
self.data2vec_audio = Data2VecAudioModel(config)
num_layers = config.num_hidden_layers + 1
if config.use_weighted_layer_sum:
self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.num_labels = config.num_labels
self.init_weights()
def freeze_feature_extractor(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
warnings.warn('The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. Please use the equivalent `freeze_feature_encoder` method instead.', FutureWarning)
self.freeze_feature_encoder()
def freeze_feature_encoder(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
self.data2vec_audio.feature_extractor._freeze_parameters()
def freeze_base_model(self):
"""
Calling this function will disable the gradient computation for the base model so that its parameters will not
be updated during training. Only the classification head will be updated.
"""
for param in self.data2vec_audio.parameters():
param.requires_grad = False
@auto_docstring
def forward(self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, TokenClassifierOutput]:
"""
input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file
into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library
(`pip install torchcodec`) or the soundfile library (`pip install soundfile`).
To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and conversion
into a tensor of type `torch.FloatTensor`. See [`Data2VecAudioProcessor.__call__`] for details.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
outputs = self.data2vec_audio(input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
if self.config.use_weighted_layer_sum:
hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
hidden_states = torch.stack(hidden_states, dim=1)
norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
else:
hidden_states = outputs[0]
logits = self.classifier(hidden_states)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), torch.argmax(labels.view(-1, self.num_labels), axis=1))
if not return_dict:
output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
return output
return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class Data2VecAudioForAudioFrameClassification(Data2VecAudioPreTrainedModel):
def __init__(self, config):
pass
def freeze_feature_extractor(self):
'''
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
'''
pass
def freeze_feature_encoder(self):
'''
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
'''
pass
def freeze_base_model(self):
'''
Calling this function will disable the gradient computation for the base model so that its parameters will not
be updated during training. Only the classification head will be updated.
'''
pass
@auto_docstring
def forward(self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, TokenClassifierOutput]:
'''
input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file
into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library
(`pip install torchcodec`) or the soundfile library (`pip install soundfile`).
To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and conversion
into a tensor of type `torch.FloatTensor`. See [`Data2VecAudioProcessor.__call__`] for details.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
'''
pass
| 8
| 4
| 18
| 2
| 13
| 4
| 3
| 0.27
| 1
| 7
| 2
| 0
| 5
| 4
| 5
| 8
| 105
| 13
| 73
| 28
| 52
| 20
| 39
| 19
| 33
| 6
| 2
| 1
| 13
|
1,506
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_audio.py
|
transformers.models.data2vec.modeling_data2vec_audio.Data2VecAudioForCTC
|
from typing import Callable, Optional, Union
import torch
import warnings
from ...modeling_outputs import BaseModelOutput, CausalLMOutput, SequenceClassifierOutput, TokenClassifierOutput, Wav2Vec2BaseModelOutput, XVectorOutput
from torch import nn
from ...utils import auto_docstring, is_peft_available, is_torch_flex_attn_available
@auto_docstring(custom_intro='\n Data2VecAudio Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).\n ')
class Data2VecAudioForCTC(Data2VecAudioPreTrainedModel):
def __init__(self, config):
"""
target_lang (`str`, *optional*):
Language id of adapter weights. Adapter weights are stored in the format adapter.<lang>.safetensors or
adapter.<lang>.bin. Only relevant when using an instance of [`Data2VecAudioForCTC`] with adapters. Uses 'eng' by
default.
"""
super().__init__(config)
self.data2vec_audio = Data2VecAudioModel(config)
self.dropout = nn.Dropout(config.final_dropout)
if config.vocab_size is None:
raise ValueError(f"You are trying to instantiate {self.__class__} with a configuration that does not define the vocabulary size of the language model head. Please instantiate the model as follows: `Data2VecAudioForCTC.from_pretrained(..., vocab_size=vocab_size)`. or define `vocab_size` of your model's configuration.")
output_hidden_size = config.output_hidden_size if hasattr(config, 'add_adapter') and config.add_adapter else config.hidden_size
self.lm_head = nn.Linear(output_hidden_size, config.vocab_size)
self.post_init()
def freeze_feature_extractor(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
warnings.warn('The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. Please use the equivalent `freeze_feature_encoder` method instead.', FutureWarning)
self.freeze_feature_encoder()
def freeze_feature_encoder(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
self.data2vec_audio.feature_extractor._freeze_parameters()
@auto_docstring
def forward(self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: Optional[torch.Tensor]=None) -> Union[tuple, CausalLMOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*):
Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to
the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`.
All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,
config.vocab_size - 1]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None and labels.max() >= self.config.vocab_size:
raise ValueError(f'Label values must be <= vocab_size: {self.config.vocab_size}')
outputs = self.data2vec_audio(input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
hidden_states = outputs[0]
hidden_states = self.dropout(hidden_states)
logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
attention_mask = attention_mask if attention_mask is not None else torch.ones_like(input_values, dtype=torch.long)
input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long)
labels_mask = labels >= 0
target_lengths = labels_mask.sum(-1)
flattened_targets = labels.masked_select(labels_mask)
log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1)
with torch.backends.cudnn.flags(enabled=False):
loss = nn.functional.ctc_loss(log_probs, flattened_targets, input_lengths, target_lengths, blank=self.config.pad_token_id, reduction=self.config.ctc_loss_reduction, zero_infinity=self.config.ctc_zero_infinity)
if not return_dict:
output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
return (loss,) + output if loss is not None else output
return CausalLMOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n Data2VecAudio Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).\n ')
class Data2VecAudioForCTC(Data2VecAudioPreTrainedModel):
def __init__(self, config):
'''
target_lang (`str`, *optional*):
Language id of adapter weights. Adapter weights are stored in the format adapter.<lang>.safetensors or
adapter.<lang>.bin. Only relevant when using an instance of [`Data2VecAudioForCTC`] with adapters. Uses 'eng' by
default.
'''
pass
def freeze_feature_extractor(self):
'''
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
'''
pass
def freeze_feature_encoder(self):
'''
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
'''
pass
@auto_docstring
def forward(self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: Optional[torch.Tensor]=None) -> Union[tuple, CausalLMOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*):
Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to
the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`.
All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,
config.vocab_size - 1]`.
'''
pass
| 7
| 4
| 27
| 3
| 18
| 5
| 3
| 0.26
| 1
| 7
| 2
| 0
| 4
| 3
| 4
| 7
| 119
| 16
| 82
| 28
| 61
| 21
| 37
| 19
| 32
| 7
| 2
| 2
| 12
|
1,507
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_audio.py
|
transformers.models.data2vec.modeling_data2vec_audio.Data2VecAudioForSequenceClassification
|
from ...modeling_outputs import BaseModelOutput, CausalLMOutput, SequenceClassifierOutput, TokenClassifierOutput, Wav2Vec2BaseModelOutput, XVectorOutput
import warnings
from typing import Callable, Optional, Union
from torch.nn import CrossEntropyLoss
from ...utils import auto_docstring, is_peft_available, is_torch_flex_attn_available
import torch
from torch import nn
@auto_docstring(custom_intro='\n Data2VecAudio Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like\n SUPERB Keyword Spotting.\n ')
class Data2VecAudioForSequenceClassification(Data2VecAudioPreTrainedModel):
def __init__(self, config):
super().__init__(config)
if hasattr(config, 'add_adapter') and config.add_adapter:
raise ValueError('Sequence classification does not support the use of Data2VecAudio adapters (config.add_adapter=True)')
self.data2vec_audio = Data2VecAudioModel(config)
num_layers = config.num_hidden_layers + 1
if config.use_weighted_layer_sum:
self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size)
self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels)
self.post_init()
def freeze_feature_extractor(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameters will
not be updated during training.
"""
warnings.warn('The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. Please use the equivalent `freeze_feature_encoder` method instead.', FutureWarning)
self.freeze_feature_encoder()
def freeze_feature_encoder(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
self.data2vec_audio.feature_extractor._freeze_parameters()
def freeze_base_model(self):
"""
Calling this function will disable the gradient computation for the base model so that its parameters will not
be updated during training. Only the classification head will be updated.
"""
for param in self.data2vec_audio.parameters():
param.requires_grad = False
@auto_docstring
def forward(self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: Optional[torch.Tensor]=None) -> Union[tuple, SequenceClassifierOutput]:
"""
input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file
into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library
(`pip install torchcodec`) or the soundfile library (`pip install soundfile`).
To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and conversion
into a tensor of type `torch.FloatTensor`. See [`Data2VecAudioProcessor.__call__`] for details.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
outputs = self.data2vec_audio(input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
if self.config.use_weighted_layer_sum:
hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
hidden_states = torch.stack(hidden_states, dim=1)
norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
else:
hidden_states = outputs[0]
hidden_states = self.projector(hidden_states)
if attention_mask is None:
pooled_output = hidden_states.mean(dim=1)
else:
padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask)
expand_padding_mask = padding_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2])
hidden_states[~expand_padding_mask] = 0.0
pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
return (loss,) + output if loss is not None else output
return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n Data2VecAudio Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like\n SUPERB Keyword Spotting.\n ')
class Data2VecAudioForSequenceClassification(Data2VecAudioPreTrainedModel):
def __init__(self, config):
pass
def freeze_feature_extractor(self):
'''
Calling this function will disable the gradient computation for the feature encoder so that its parameters will
not be updated during training.
'''
pass
def freeze_feature_encoder(self):
'''
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
'''
pass
def freeze_base_model(self):
'''
Calling this function will disable the gradient computation for the base model so that its parameters will not
be updated during training. Only the classification head will be updated.
'''
pass
@auto_docstring
def forward(self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: Optional[torch.Tensor]=None) -> Union[tuple, SequenceClassifierOutput]:
'''
input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file
into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library
(`pip install torchcodec`) or the soundfile library (`pip install soundfile`).
To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and conversion
into a tensor of type `torch.FloatTensor`. See [`Data2VecAudioProcessor.__call__`] for details.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
'''
pass
| 8
| 4
| 20
| 2
| 14
| 4
| 3
| 0.26
| 1
| 7
| 2
| 0
| 5
| 4
| 5
| 8
| 114
| 14
| 80
| 31
| 59
| 21
| 46
| 22
| 40
| 8
| 2
| 1
| 15
|
1,508
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_audio.py
|
transformers.models.data2vec.modeling_data2vec_audio.Data2VecAudioForXVector
|
from typing import Callable, Optional, Union
from ...modeling_outputs import BaseModelOutput, CausalLMOutput, SequenceClassifierOutput, TokenClassifierOutput, Wav2Vec2BaseModelOutput, XVectorOutput
from ...utils import auto_docstring, is_peft_available, is_torch_flex_attn_available
import warnings
from torch import nn
import torch
@auto_docstring(custom_intro='\n Data2VecAudio Model with an XVector feature extraction head on top for tasks like Speaker Verification.\n ')
class Data2VecAudioForXVector(Data2VecAudioPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.data2vec_audio = Data2VecAudioModel(config)
num_layers = config.num_hidden_layers + 1
if config.use_weighted_layer_sum:
self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
self.projector = nn.Linear(config.hidden_size, config.tdnn_dim[0])
tdnn_layers = [TDNNLayer(config, i) for i in range(len(config.tdnn_dim))]
self.tdnn = nn.ModuleList(tdnn_layers)
self.feature_extractor = nn.Linear(config.tdnn_dim[-1] * 2, config.xvector_output_dim)
self.classifier = nn.Linear(config.xvector_output_dim, config.xvector_output_dim)
self.objective = AMSoftmaxLoss(config.xvector_output_dim, config.num_labels)
self.init_weights()
def freeze_feature_extractor(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
warnings.warn('The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. Please use the equivalent `freeze_feature_encoder` method instead.', FutureWarning)
self.freeze_feature_encoder()
def freeze_feature_encoder(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
self.data2vec_audio.feature_extractor._freeze_parameters()
def freeze_base_model(self):
"""
Calling this function will disable the gradient computation for the base model so that its parameters will not
be updated during training. Only the classification head will be updated.
"""
for param in self.data2vec_audio.parameters():
param.requires_grad = False
def _get_tdnn_output_lengths(self, input_lengths: Union[torch.LongTensor, int]):
"""
Computes the output length of the TDNN layers
"""
def _conv_out_length(input_length, kernel_size, stride):
return (input_length - kernel_size) // stride + 1
for kernel_size in self.config.tdnn_kernel:
input_lengths = _conv_out_length(input_lengths, kernel_size, 1)
return input_lengths
@auto_docstring
def forward(self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: Optional[torch.Tensor]=None) -> Union[tuple, XVectorOutput]:
"""
input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file
into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library
(`pip install torchcodec`) or the soundfile library (`pip install soundfile`).
To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and conversion
into a tensor of type `torch.FloatTensor`. See [`Data2VecAudioProcessor.__call__`] for details.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
outputs = self.data2vec_audio(input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
if self.config.use_weighted_layer_sum:
hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
hidden_states = torch.stack(hidden_states, dim=1)
norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
else:
hidden_states = outputs[0]
hidden_states = self.projector(hidden_states)
for tdnn_layer in self.tdnn:
hidden_states = tdnn_layer(hidden_states)
if attention_mask is None:
mean_features = hidden_states.mean(dim=1)
std_features = hidden_states.std(dim=1)
else:
feat_extract_output_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(dim=1))
tdnn_output_lengths = self._get_tdnn_output_lengths(feat_extract_output_lengths)
mean_features = []
std_features = []
for i, length in enumerate(tdnn_output_lengths):
mean_features.append(hidden_states[i, :length].mean(dim=0))
std_features.append(hidden_states[i, :length].std(dim=0))
mean_features = torch.stack(mean_features)
std_features = torch.stack(std_features)
statistic_pooling = torch.cat([mean_features, std_features], dim=-1)
output_embeddings = self.feature_extractor(statistic_pooling)
logits = self.classifier(output_embeddings)
loss = None
if labels is not None:
loss = self.objective(logits, labels)
if not return_dict:
output = (logits, output_embeddings) + outputs[_HIDDEN_STATES_START_POSITION:]
return (loss,) + output if loss is not None else output
return XVectorOutput(loss=loss, logits=logits, embeddings=output_embeddings, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n Data2VecAudio Model with an XVector feature extraction head on top for tasks like Speaker Verification.\n ')
class Data2VecAudioForXVector(Data2VecAudioPreTrainedModel):
def __init__(self, config):
pass
def freeze_feature_extractor(self):
'''
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
'''
pass
def freeze_feature_encoder(self):
'''
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
'''
pass
def freeze_base_model(self):
'''
Calling this function will disable the gradient computation for the base model so that its parameters will not
be updated during training. Only the classification head will be updated.
'''
pass
def _get_tdnn_output_lengths(self, input_lengths: Union[torch.LongTensor, int]):
'''
Computes the output length of the TDNN layers
'''
pass
def _conv_out_length(input_length, kernel_size, stride):
pass
@auto_docstring
def forward(self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: Optional[torch.Tensor]=None) -> Union[tuple, XVectorOutput]:
'''
input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file
into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library
(`pip install torchcodec`) or the soundfile library (`pip install soundfile`).
To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and conversion
into a tensor of type `torch.FloatTensor`. See [`Data2VecAudioProcessor.__call__`] for details.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
'''
pass
| 10
| 5
| 19
| 3
| 13
| 4
| 3
| 0.27
| 1
| 11
| 4
| 0
| 6
| 7
| 6
| 9
| 144
| 23
| 96
| 42
| 73
| 26
| 63
| 33
| 55
| 10
| 2
| 2
| 19
|
1,509
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_audio.py
|
transformers.models.data2vec.modeling_data2vec_audio.Data2VecAudioModel
|
from typing import Callable, Optional, Union
import torch
from .configuration_data2vec_audio import Data2VecAudioConfig
from torch import nn
from ...utils import auto_docstring, is_peft_available, is_torch_flex_attn_available
@auto_docstring
class Data2VecAudioModel(Data2VecAudioPreTrainedModel):
def __init__(self, config: Data2VecAudioConfig):
super().__init__(config)
self.config = config
self.feature_extractor = Data2VecAudioFeatureEncoder(config)
self.feature_projection = Data2VecAudioFeatureProjection(config)
if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0:
self.masked_spec_embed = nn.Parameter(torch.Tensor(config.hidden_size).uniform_())
self.encoder = Data2VecAudioEncoder(config)
self.adapter = Data2VecAudioAdapter(config) if config.add_adapter else None
self.post_init()
def freeze_feature_encoder(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
self.feature_extractor._freeze_parameters()
def _mask_hidden_states(self, hidden_states: torch.FloatTensor, mask_time_indices: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.LongTensor]=None):
"""
Masks extracted features along time axis and/or along feature axis according to
[SpecAugment](https://huggingface.co/papers/1904.08779).
"""
if not getattr(self.config, 'apply_spec_augment', True):
return hidden_states
batch_size, sequence_length, hidden_size = hidden_states.size()
if mask_time_indices is not None:
hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
elif self.config.mask_time_prob > 0 and self.training:
mask_time_indices = _compute_mask_indices((batch_size, sequence_length), mask_prob=self.config.mask_time_prob, mask_length=self.config.mask_time_length, attention_mask=attention_mask, min_masks=self.config.mask_time_min_masks)
mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool)
hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
if self.config.mask_feature_prob > 0 and self.training:
mask_feature_indices = _compute_mask_indices((batch_size, hidden_size), mask_prob=self.config.mask_feature_prob, mask_length=self.config.mask_feature_length, min_masks=self.config.mask_feature_min_masks)
mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool)
mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1)
hidden_states[mask_feature_indices] = 0
return hidden_states
@auto_docstring
def forward(self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, mask_time_indices: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, Data2VecAudioBaseModelOutput]:
"""
mask_time_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict
masked extracted features in *config.proj_codevector_dim* space.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
extract_features = self.feature_extractor(input_values)
extract_features = extract_features.transpose(1, 2)
if attention_mask is not None:
attention_mask = self._get_feature_vector_attention_mask(extract_features.shape[1], attention_mask, add_adapter=False)
hidden_states, extract_features = self.feature_projection(extract_features)
hidden_states = self._mask_hidden_states(hidden_states, mask_time_indices=mask_time_indices, attention_mask=attention_mask)
encoder_outputs = self.encoder(hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
hidden_states = encoder_outputs[0]
if self.adapter is not None:
hidden_states = self.adapter(hidden_states)
if not return_dict:
return (hidden_states, extract_features) + encoder_outputs[1:]
return Data2VecAudioBaseModelOutput(last_hidden_state=hidden_states, extract_features=extract_features, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)
|
@auto_docstring
class Data2VecAudioModel(Data2VecAudioPreTrainedModel):
def __init__(self, config: Data2VecAudioConfig):
pass
def freeze_feature_encoder(self):
'''
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
'''
pass
def _mask_hidden_states(self, hidden_states: torch.FloatTensor, mask_time_indices: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.LongTensor]=None):
'''
Masks extracted features along time axis and/or along feature axis according to
[SpecAugment](https://huggingface.co/papers/1904.08779).
'''
pass
@auto_docstring
def forward(self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, mask_time_indices: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, Data2VecAudioBaseModelOutput]:
'''
mask_time_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict
masked extracted features in *config.proj_codevector_dim* space.
'''
pass
| 7
| 3
| 30
| 4
| 22
| 4
| 4
| 0.16
| 1
| 9
| 6
| 0
| 4
| 6
| 4
| 7
| 130
| 20
| 95
| 30
| 69
| 15
| 45
| 16
| 40
| 7
| 2
| 1
| 16
|
1,510
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_audio.py
|
transformers.models.data2vec.modeling_data2vec_audio.Data2VecAudioPadLayer
|
from torch import nn
class Data2VecAudioPadLayer(nn.Module):
def __init__(self, num_conv_pos_embeddings):
super().__init__()
self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0
def forward(self, hidden_states):
if self.num_pad_remove > 0:
hidden_states = hidden_states[:, :, :-self.num_pad_remove]
return hidden_states
|
class Data2VecAudioPadLayer(nn.Module):
def __init__(self, num_conv_pos_embeddings):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 4
| 0
| 4
| 0
| 2
| 0
| 1
| 1
| 0
| 0
| 2
| 1
| 2
| 12
| 9
| 1
| 8
| 4
| 5
| 0
| 8
| 4
| 5
| 2
| 1
| 1
| 4
|
1,511
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_audio.py
|
transformers.models.data2vec.modeling_data2vec_audio.Data2VecAudioPositionalConvEmbedding
|
from torch import nn
class Data2VecAudioPositionalConvEmbedding(nn.Module):
def __init__(self, config):
super().__init__()
self.layers = nn.ModuleList([Data2VecAudioPositionalConvLayer(config) for _ in range(config.num_conv_pos_embeddings)])
def forward(self, hidden_states):
hidden_states = hidden_states.transpose(1, 2)
for layer in self.layers:
hidden_states = layer(hidden_states)
hidden_states = hidden_states.transpose(1, 2)
return hidden_states
|
class Data2VecAudioPositionalConvEmbedding(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 2
| 0
| 1
| 3
| 1
| 0
| 2
| 1
| 2
| 12
| 13
| 1
| 12
| 6
| 9
| 0
| 10
| 5
| 7
| 2
| 1
| 1
| 3
|
1,512
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_audio.py
|
transformers.models.data2vec.modeling_data2vec_audio.Data2VecAudioPositionalConvLayer
|
from torch import nn
from ...activations import ACT2FN
class Data2VecAudioPositionalConvLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.conv = nn.Conv1d(config.hidden_size, config.hidden_size, kernel_size=config.conv_pos_kernel_size, padding=config.conv_pos_kernel_size // 2, groups=config.num_conv_pos_embedding_groups)
self.padding = Data2VecAudioPadLayer(config.conv_pos_kernel_size)
self.activation = ACT2FN[config.feat_extract_activation]
self.layer_norm = nn.LayerNorm(config.hidden_size, elementwise_affine=False)
def forward(self, hidden_states):
hidden_states = self.conv(hidden_states)
hidden_states = self.padding(hidden_states)
hidden_states = hidden_states.transpose(1, 2)
hidden_states = self.layer_norm(hidden_states)
hidden_states = hidden_states.transpose(1, 2)
hidden_states = self.activation(hidden_states)
return hidden_states
|
class Data2VecAudioPositionalConvLayer(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 12
| 1
| 10
| 1
| 1
| 0.05
| 1
| 2
| 1
| 0
| 2
| 4
| 2
| 12
| 25
| 3
| 21
| 7
| 18
| 1
| 15
| 7
| 12
| 1
| 1
| 0
| 2
|
1,513
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_audio.py
|
transformers.models.data2vec.modeling_data2vec_audio.Data2VecAudioPreTrainedModel
|
from ...utils import auto_docstring, is_peft_available, is_torch_flex_attn_available
from .configuration_data2vec_audio import Data2VecAudioConfig
from typing import Callable, Optional, Union
import torch
import math
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from torch import nn
@auto_docstring
class Data2VecAudioPreTrainedModel(PreTrainedModel):
config: Data2VecAudioConfig
base_model_prefix = 'data2vec_audio'
main_input_name = 'input_values'
supports_gradient_checkpointing = True
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, Data2VecAudioFeatureProjection):
k = math.sqrt(1 / module.projection.in_features)
nn.init.uniform_(module.projection.weight, a=-k, b=k)
nn.init.uniform_(module.projection.bias, a=-k, b=k)
elif isinstance(module, Data2VecAudioPositionalConvLayer):
nn.init.constant_(module.conv.bias, 0)
elif isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)):
if module.bias is not None:
module.bias.data.zero_()
if module.weight is not None:
module.weight.data.fill_(1.0)
elif isinstance(module, nn.Conv1d):
nn.init.kaiming_normal_(module.weight)
if module.bias is not None:
k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0]))
nn.init.uniform_(module.bias, a=-k, b=k)
def _get_feat_extract_output_lengths(self, input_lengths: Union[torch.LongTensor, int], add_adapter: Optional[bool]=None):
"""
Computes the output length of the convolutional layers
"""
add_adapter = self.config.add_adapter if add_adapter is None else add_adapter
def _conv_out_length(input_length, kernel_size, stride):
return torch.div(input_length - kernel_size, stride, rounding_mode='floor') + 1
for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride):
input_lengths = _conv_out_length(input_lengths, kernel_size, stride)
if add_adapter:
for _ in range(self.config.num_adapter_layers):
input_lengths = _conv_out_length(input_lengths, 1, self.config.adapter_stride)
return input_lengths
def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor, add_adapter=None):
non_padded_lengths = attention_mask.cumsum(dim=-1)[:, -1]
output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths, add_adapter=add_adapter)
output_lengths = output_lengths.to(torch.long)
batch_size = attention_mask.shape[0]
attention_mask = torch.zeros((batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device)
attention_mask[torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1] = 1
attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
return attention_mask
|
@auto_docstring
class Data2VecAudioPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
'''Initialize the weights'''
pass
def _get_feat_extract_output_lengths(self, input_lengths: Union[torch.LongTensor, int], add_adapter: Optional[bool]=None):
'''
Computes the output length of the convolutional layers
'''
pass
def _conv_out_length(input_length, kernel_size, stride):
pass
def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor, add_adapter=None):
pass
| 6
| 2
| 17
| 3
| 12
| 3
| 4
| 0.28
| 1
| 6
| 2
| 5
| 3
| 0
| 3
| 3
| 82
| 14
| 53
| 21
| 44
| 15
| 43
| 17
| 38
| 10
| 1
| 2
| 17
|
1,514
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_audio.py
|
transformers.models.data2vec.modeling_data2vec_audio.TDNNLayer
|
from ...utils import auto_docstring, is_peft_available, is_torch_flex_attn_available
import warnings
import torch
from torch import nn
class TDNNLayer(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.in_conv_dim = config.tdnn_dim[layer_id - 1] if layer_id > 0 else config.tdnn_dim[layer_id]
self.out_conv_dim = config.tdnn_dim[layer_id]
self.kernel_size = config.tdnn_kernel[layer_id]
self.dilation = config.tdnn_dilation[layer_id]
self.kernel = nn.Linear(self.in_conv_dim * self.kernel_size, self.out_conv_dim)
self.activation = nn.ReLU()
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
if is_peft_available():
from peft.tuners.lora import LoraLayer
if is_peft_available():
if isinstance(self.kernel, LoraLayer):
warnings.warn("Detected LoRA on TDNNLayer. LoRA weights won't be applied due to optimization. You should exclude TDNNLayer from LoRA's target modules.")
hidden_states = hidden_states.transpose(1, 2)
weight = self.kernel.weight.view(self.out_conv_dim, self.kernel_size, self.in_conv_dim).transpose(1, 2)
hidden_states = nn.functional.conv1d(hidden_states, weight, self.kernel.bias, dilation=self.dilation)
hidden_states = hidden_states.transpose(1, 2)
hidden_states = self.activation(hidden_states)
return hidden_states
|
class TDNNLayer(nn.Module):
def __init__(self, config, layer_id=0):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 14
| 2
| 11
| 1
| 3
| 0.04
| 1
| 2
| 0
| 0
| 2
| 6
| 2
| 12
| 29
| 5
| 23
| 11
| 19
| 1
| 20
| 11
| 16
| 3
| 1
| 2
| 5
|
1,515
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_text.py
|
transformers.models.data2vec.modeling_data2vec_text.Data2VecTextAttention
|
from ...utils import TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
import torch.nn as nn
from ...processing_utils import Unpack
import torch
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
from typing import Callable, Optional, Union
from ...cache_utils import Cache, EncoderDecoderCache
class Data2VecTextAttention(nn.Module):
def __init__(self, config, position_embedding_type=None, is_causal=False, layer_idx=None, is_cross_attention=False):
super().__init__()
self.is_cross_attention = is_cross_attention
attention_class = Data2VecTextCrossAttention if is_cross_attention else Data2VecTextSelfAttention
self.self = attention_class(config, position_embedding_type=position_embedding_type, is_causal=is_causal, layer_idx=layer_idx)
self.output = Data2VecTextSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads)
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Cache]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor]:
attention_mask = attention_mask if not self.is_cross_attention else encoder_attention_mask
attention_output, attn_weights = self.self(hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, head_mask=head_mask, past_key_value=past_key_value, cache_position=cache_position, **kwargs)
attention_output = self.output(attention_output, hidden_states)
return (attention_output, attn_weights)
|
class Data2VecTextAttention(nn.Module):
def __init__(self, config, position_embedding_type=None, is_causal=False, layer_idx=None, is_cross_attention=False):
pass
def prune_heads(self, heads):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Cache]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor]:
pass
| 4
| 0
| 15
| 1
| 14
| 1
| 1
| 0.07
| 1
| 5
| 1
| 0
| 3
| 3
| 3
| 13
| 49
| 4
| 43
| 20
| 30
| 3
| 22
| 11
| 18
| 2
| 1
| 1
| 4
|
1,516
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_text.py
|
transformers.models.data2vec.modeling_data2vec_text.Data2VecTextClassificationHead
|
import torch.nn as nn
import torch
class Data2VecTextClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
self.dropout = nn.Dropout(classifier_dropout)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, features, **kwargs):
x = features[:, 0, :]
x = self.dropout(x)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
|
class Data2VecTextClassificationHead(nn.Module):
'''Head for sentence-level classification tasks.'''
def __init__(self, config):
pass
def forward(self, features, **kwargs):
pass
| 3
| 1
| 8
| 0
| 8
| 1
| 2
| 0.12
| 1
| 1
| 0
| 0
| 2
| 3
| 2
| 12
| 20
| 2
| 17
| 8
| 14
| 2
| 15
| 8
| 12
| 2
| 1
| 0
| 3
|
1,517
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_text.py
|
transformers.models.data2vec.modeling_data2vec_text.Data2VecTextEncoder
|
from ...utils import TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
from ...processing_utils import Unpack
from typing import Callable, Optional, Union
import torch.nn as nn
from ...cache_utils import Cache, EncoderDecoderCache
import torch
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
class Data2VecTextEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([Data2VecTextLayer(config, layer_idx=i) for i in range(config.num_hidden_layers)])
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
for i, layer_module in enumerate(self.layer):
layer_head_mask = head_mask[i] if head_mask is not None else None
hidden_states = layer_module(hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=past_key_values, cache_position=cache_position, **kwargs)
return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=past_key_values if use_cache else None)
|
class Data2VecTextEncoder(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
pass
| 3
| 0
| 45
| 4
| 41
| 0
| 9
| 0
| 1
| 8
| 2
| 0
| 2
| 3
| 2
| 12
| 91
| 8
| 83
| 26
| 68
| 0
| 35
| 14
| 32
| 17
| 1
| 3
| 18
|
1,518
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_text.py
|
transformers.models.data2vec.modeling_data2vec_text.Data2VecTextForCausalLM
|
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
import torch.nn as nn
from typing import Callable, Optional, Union
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from ...utils.generic import can_return_tuple, check_model_inputs
import torch
from ...generation import GenerationMixin
@auto_docstring(custom_intro='\n Data2VecText Model with a `language modeling` head on top for CLM fine-tuning.\n ')
class Data2VecTextForCausalLM(Data2VecTextPreTrainedModel, GenerationMixin):
_tied_weights_keys = ['lm_head.decoder.weight', 'lm_head.decoder.bias']
def __init__(self, config):
super().__init__(config)
if not config.is_decoder:
logger.warning('If you want to use `Data2VecTextLMHeadModel` as a standalone, add `is_decoder=True.`')
self.data2vec_text = Data2VecTextModel(config, add_pooling_layer=False)
self.lm_head = Data2VecTextLMHead(config)
self.post_init()
def get_output_embeddings(self):
return self.lm_head.decoder
def set_output_embeddings(self, new_embeddings):
self.lm_head.decoder = new_embeddings
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, past_key_values: Optional[tuple[tuple[torch.FloatTensor]]]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, CausalLMOutputWithCrossAttentions]:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
`[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
Example:
```python
>>> from transformers import AutoTokenizer, Data2VecTextForCausalLM, Data2VecTextConfig
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/data2vec-text-base")
>>> config = Data2VecTextConfig.from_pretrained("facebook/data2vec-text-base")
>>> config.is_decoder = True
>>> model = Data2VecTextForCausalLM.from_pretrained("facebook/data2vec-text-base", config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
```"""
if labels is not None:
use_cache = False
outputs = self.data2vec_text(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, return_dict=True, **kwargs)
sequence_output = outputs[0]
prediction_scores = self.lm_head(sequence_output)
lm_loss = None
if labels is not None:
lm_loss = self.loss_function(prediction_scores, labels, vocab_size=self.config.vocab_size, **kwargs)
return CausalLMOutputWithCrossAttentions(loss=lm_loss, logits=prediction_scores, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions)
|
@auto_docstring(custom_intro='\n Data2VecText Model with a `language modeling` head on top for CLM fine-tuning.\n ')
class Data2VecTextForCausalLM(Data2VecTextPreTrainedModel, GenerationMixin):
def __init__(self, config):
pass
def get_output_embeddings(self):
pass
def set_output_embeddings(self, new_embeddings):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, past_key_values: Optional[tuple[tuple[torch.FloatTensor]]]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, CausalLMOutputWithCrossAttentions]:
'''
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
`[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
Example:
```python
>>> from transformers import AutoTokenizer, Data2VecTextForCausalLM, Data2VecTextConfig
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/data2vec-text-base")
>>> config = Data2VecTextConfig.from_pretrained("facebook/data2vec-text-base")
>>> config.is_decoder = True
>>> model = Data2VecTextForCausalLM.from_pretrained("facebook/data2vec-text-base", config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
```'''
pass
| 8
| 1
| 25
| 3
| 15
| 7
| 2
| 0.44
| 2
| 6
| 3
| 0
| 5
| 2
| 5
| 6
| 136
| 22
| 79
| 34
| 54
| 35
| 32
| 16
| 26
| 6
| 2
| 1
| 12
|
1,519
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_text.py
|
transformers.models.data2vec.modeling_data2vec_text.Data2VecTextForMaskedLM
|
from typing import Callable, Optional, Union
import torch.nn as nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
import torch
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from ...utils.generic import can_return_tuple, check_model_inputs
@auto_docstring
class Data2VecTextForMaskedLM(Data2VecTextPreTrainedModel):
_tied_weights_keys = ['lm_head.decoder.weight', 'lm_head.decoder.bias']
def __init__(self, config):
super().__init__(config)
if config.is_decoder:
logger.warning('If you want to use `Data2VecTextForMaskedLM` make sure `config.is_decoder=False` for bi-directional self-attention.')
self.data2vec_text = Data2VecTextModel(config, add_pooling_layer=False)
self.lm_head = Data2VecTextLMHead(config)
self.post_init()
def get_output_embeddings(self):
return self.lm_head.decoder
def set_output_embeddings(self, new_embeddings):
self.lm_head.decoder = new_embeddings
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, MaskedLMOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
outputs = self.data2vec_text(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, return_dict=True, **kwargs)
sequence_output = outputs[0]
prediction_scores = self.lm_head(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
labels = labels.to(prediction_scores.device)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
return MaskedLMOutput(loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class Data2VecTextForMaskedLM(Data2VecTextPreTrainedModel):
def __init__(self, config):
pass
def get_output_embeddings(self):
pass
def set_output_embeddings(self, new_embeddings):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, MaskedLMOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
'''
pass
| 8
| 1
| 19
| 2
| 15
| 2
| 2
| 0.13
| 1
| 5
| 3
| 0
| 4
| 2
| 4
| 5
| 89
| 12
| 68
| 29
| 42
| 9
| 27
| 14
| 22
| 5
| 2
| 1
| 9
|
1,520
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_text.py
|
transformers.models.data2vec.modeling_data2vec_text.Data2VecTextForMultipleChoice
|
import torch.nn as nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from typing import Callable, Optional, Union
import torch
from ...utils.generic import can_return_tuple, check_model_inputs
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
@auto_docstring
class Data2VecTextForMultipleChoice(Data2VecTextPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.data2vec_text = Data2VecTextModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, MultipleChoiceModelOutput]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
"""
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
flat_inputs_embeds = inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None
outputs = self.data2vec_text(flat_input_ids, position_ids=flat_position_ids, token_type_ids=flat_token_type_ids, attention_mask=flat_attention_mask, head_mask=head_mask, inputs_embeds=flat_inputs_embeds, return_dict=True, **kwargs)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
labels = labels.to(reshaped_logits.device)
loss = loss_fct(reshaped_logits, labels)
return MultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class Data2VecTextForMultipleChoice(Data2VecTextPreTrainedModel):
def __init__(self, config):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, MultipleChoiceModelOutput]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
'''
pass
| 6
| 1
| 38
| 5
| 30
| 4
| 6
| 0.1
| 1
| 4
| 2
| 0
| 2
| 3
| 2
| 3
| 85
| 10
| 68
| 34
| 45
| 7
| 29
| 19
| 26
| 11
| 2
| 1
| 12
|
1,521
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_text.py
|
transformers.models.data2vec.modeling_data2vec_text.Data2VecTextForQuestionAnswering
|
import torch
from ...utils.generic import can_return_tuple, check_model_inputs
import torch.nn as nn
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from typing import Callable, Optional, Union
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
@auto_docstring
class Data2VecTextForQuestionAnswering(Data2VecTextPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.data2vec_text = Data2VecTextModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, QuestionAnsweringModelOutput]:
outputs = self.data2vec_text(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, return_dict=True, **kwargs)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class Data2VecTextForQuestionAnswering(Data2VecTextPreTrainedModel):
def __init__(self, config):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, QuestionAnsweringModelOutput]:
pass
| 6
| 0
| 41
| 5
| 30
| 7
| 4
| 0.19
| 1
| 4
| 2
| 0
| 2
| 3
| 2
| 3
| 90
| 10
| 67
| 30
| 45
| 13
| 32
| 16
| 29
| 7
| 2
| 2
| 8
|
1,522
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_text.py
|
transformers.models.data2vec.modeling_data2vec_text.Data2VecTextForSequenceClassification
|
import torch.nn as nn
from ...utils.generic import can_return_tuple, check_model_inputs
from ...utils import TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
import torch
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...processing_utils import Unpack
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from typing import Callable, Optional, Union
@auto_docstring(custom_intro='\n Data2VecText Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n ')
class Data2VecTextForSequenceClassification(Data2VecTextPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.data2vec_text = Data2VecTextModel(config, add_pooling_layer=False)
self.classifier = Data2VecTextClassificationHead(config)
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, SequenceClassifierOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
outputs = self.data2vec_text(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, return_dict=True, **kwargs)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
labels = labels.to(logits.device)
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = 'single_label_classification'
else:
self.config.problem_type = 'multi_label_classification'
if self.config.problem_type == 'regression':
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == 'single_label_classification':
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == 'multi_label_classification':
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n Data2VecText Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n ')
class Data2VecTextForSequenceClassification(Data2VecTextPreTrainedModel):
def __init__(self, config):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, SequenceClassifierOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
'''
pass
| 6
| 1
| 40
| 4
| 33
| 4
| 7
| 0.1
| 1
| 6
| 3
| 0
| 2
| 4
| 2
| 3
| 88
| 9
| 72
| 26
| 51
| 7
| 34
| 13
| 31
| 12
| 2
| 3
| 13
|
1,523
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_text.py
|
transformers.models.data2vec.modeling_data2vec_text.Data2VecTextForTokenClassification
|
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from ...processing_utils import Unpack
import torch
from ...utils import TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...utils.generic import can_return_tuple, check_model_inputs
import torch.nn as nn
from typing import Callable, Optional, Union
@auto_docstring
class Data2VecTextForTokenClassification(Data2VecTextPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.data2vec_text = Data2VecTextModel(config, add_pooling_layer=False)
classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, TokenClassifierOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
outputs = self.data2vec_text(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, return_dict=True, **kwargs)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
labels = labels.to(logits.device)
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class Data2VecTextForTokenClassification(Data2VecTextPreTrainedModel):
def __init__(self, config):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, TokenClassifierOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
'''
pass
| 6
| 1
| 33
| 5
| 26
| 3
| 4
| 0.08
| 1
| 4
| 2
| 0
| 2
| 4
| 2
| 3
| 74
| 10
| 59
| 27
| 38
| 5
| 24
| 14
| 21
| 5
| 2
| 1
| 7
|
1,524
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_text.py
|
transformers.models.data2vec.modeling_data2vec_text.Data2VecTextIntermediate
|
from ...activations import ACT2FN, gelu
import torch
import torch.nn as nn
class Data2VecTextIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
|
class Data2VecTextIntermediate(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 2
| 0
| 1
| 3
| 0
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 12
| 5
| 9
| 0
| 11
| 5
| 8
| 2
| 1
| 1
| 3
|
1,525
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_text.py
|
transformers.models.data2vec.modeling_data2vec_text.Data2VecTextLMHead
|
import torch
from ...activations import ACT2FN, gelu
import torch.nn as nn
class Data2VecTextLMHead(nn.Module):
"""Data2VecText Head for masked language modeling."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
self.decoder.bias = self.bias
def forward(self, features, **kwargs):
x = self.dense(features)
x = gelu(x)
x = self.layer_norm(x)
x = self.decoder(x)
return x
def _tie_weights(self):
if self.decoder.bias.device.type == 'meta':
self.decoder.bias = self.bias
else:
self.bias = self.decoder.bias
|
class Data2VecTextLMHead(nn.Module):
'''Data2VecText Head for masked language modeling.'''
def __init__(self, config):
pass
def forward(self, features, **kwargs):
pass
def _tie_weights(self):
pass
| 4
| 1
| 8
| 1
| 6
| 1
| 1
| 0.21
| 1
| 1
| 0
| 0
| 3
| 4
| 3
| 13
| 29
| 6
| 19
| 9
| 15
| 4
| 18
| 9
| 14
| 2
| 1
| 1
| 4
|
1,526
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_text.py
|
transformers.models.data2vec.modeling_data2vec_text.Data2VecTextLayer
|
from ...modeling_layers import GradientCheckpointingLayer
from typing import Callable, Optional, Union
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
from ...cache_utils import Cache, EncoderDecoderCache
import torch
import torch.nn as nn
class Data2VecTextLayer(GradientCheckpointingLayer):
def __init__(self, config, layer_idx=None):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = Data2VecTextAttention(config, is_causal=config.is_decoder, layer_idx=layer_idx)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
if not self.is_decoder:
raise ValueError(f'{self} should be used as a decoder model if cross attention is added')
self.crossattention = Data2VecTextAttention(config, position_embedding_type='absolute', is_causal=False, layer_idx=layer_idx, is_cross_attention=True)
self.intermediate = Data2VecTextIntermediate(config)
self.output = Data2VecTextOutput(config)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Cache]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor]:
self_attention_output, _ = self.attention(hidden_states, attention_mask, head_mask, past_key_value=past_key_value, cache_position=cache_position, **kwargs)
attention_output = self_attention_output
if self.is_decoder and encoder_hidden_states is not None:
if not hasattr(self, 'crossattention'):
raise ValueError(f'If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`')
cross_attention_output, _ = self.crossattention(self_attention_output, None, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value=past_key_value, **kwargs)
attention_output = cross_attention_output
layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output)
return layer_output
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
|
class Data2VecTextLayer(GradientCheckpointingLayer):
def __init__(self, config, layer_idx=None):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Cache]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor]:
pass
def feed_forward_chunk(self, attention_output):
pass
| 4
| 0
| 27
| 2
| 23
| 2
| 4
| 0.1
| 1
| 7
| 3
| 0
| 3
| 8
| 3
| 13
| 84
| 9
| 70
| 32
| 57
| 7
| 41
| 23
| 37
| 7
| 1
| 2
| 11
|
1,527
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_text.py
|
transformers.models.data2vec.modeling_data2vec_text.Data2VecTextModel
|
from ...cache_utils import Cache, EncoderDecoderCache
import torch
from ...processing_utils import Unpack
import torch.nn as nn
from ...utils.generic import can_return_tuple, check_model_inputs
from ...utils import TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa
from ...masking_utils import create_causal_mask
from typing import Callable, Optional, Union
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
@auto_docstring
class Data2VecTextModel(Data2VecTextPreTrainedModel):
_no_split_modules = ['Data2VecTextEmbeddings', 'Data2VecTextLayer']
def __init__(self, config, add_pooling_layer=True):
"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
self.gradient_checkpointing = False
self.embeddings = Data2VecTextEmbeddings(config)
self.encoder = Data2VecTextEncoder(config)
self.pooler = Data2VecTextPooler(config) if add_pooling_layer else None
self.position_embedding_type = config.position_embedding_type
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@check_model_inputs
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Union[list[torch.FloatTensor], Cache]]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
return_legacy_cache = False
if use_cache and (not isinstance(past_key_values, Cache)):
logger.warning_once('Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. You should pass an instance of `EncoderDecoderCache` instead, e.g. `past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`.')
return_legacy_cache = True
past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values)
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError('You must specify exactly one of input_ids or inputs_embeds')
if input_ids is not None:
device = input_ids.device
input_shape = input_ids.shape
else:
device = inputs_embeds.device
input_shape = inputs_embeds.shape[:-1]
seq_length = input_shape[1]
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
if cache_position is None:
cache_position = torch.arange(past_key_values_length, past_key_values_length + seq_length, device=device)
embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length)
attention_mask, encoder_attention_mask = self._create_attention_masks(input_shape=input_shape, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, embedding_output=embedding_output, encoder_hidden_states=encoder_hidden_states, cache_position=cache_position, past_key_values=past_key_values)
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
encoder_outputs = self.encoder(embedding_output, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_ids=position_ids, **kwargs)
sequence_output = encoder_outputs.last_hidden_state
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if return_legacy_cache:
encoder_outputs.past_key_values = encoder_outputs.past_key_values.to_legacy_cache()
return BaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values)
def _create_attention_masks(self, input_shape, attention_mask, encoder_attention_mask, embedding_output, encoder_hidden_states, cache_position, past_key_values):
if attention_mask is not None and attention_mask.dim() == 2:
if self.config.is_decoder:
attention_mask = create_causal_mask(config=self.config, input_embeds=embedding_output, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values)
else:
attention_mask = self._update_full_mask(attention_mask, embedding_output)
elif attention_mask is not None and attention_mask.dim() == 3:
if 'flash' in self.config._attn_implementation or self.config._attn_implementation == 'flex_attention':
raise ValueError(f'Passing attention mask with a 3D/4D shape does not work with type {self.config._attn_implementation} - please use either `sdpa` or `eager` instead.')
attention_mask = self.get_extended_attention_mask(attention_mask, input_shape)
if encoder_attention_mask is not None:
if encoder_attention_mask.dim() == 2:
encoder_attention_mask = self._update_cross_attn_mask(encoder_hidden_states, encoder_attention_mask, embedding_output.shape[:2], embedding_output)
else:
if 'flash' in self.config._attn_implementation or self.config._attn_implementation == 'flex_attention':
raise ValueError(f'Passing attention mask with a 3D/4D shape does not work with type {self.config._attn_implementation} - please use either `sdpa` or `eager` instead.')
encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask)
return (attention_mask, encoder_attention_mask)
def _update_full_mask(self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor):
if attention_mask is not None:
if 'flash' in self.config._attn_implementation:
attention_mask = attention_mask if 0 in attention_mask else None
elif self.config._attn_implementation == 'sdpa':
attention_mask = _prepare_4d_attention_mask_for_sdpa(attention_mask, inputs_embeds.dtype)
elif self.config._attn_implementation == 'flex_attention':
if isinstance(attention_mask, torch.Tensor):
attention_mask = make_flex_block_causal_mask(attention_mask, is_causal=False)
else:
attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
return attention_mask
def _update_cross_attn_mask(self, encoder_hidden_states: Union[torch.Tensor, None], encoder_attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor):
if encoder_hidden_states is not None and encoder_attention_mask is not None:
if 'flash' in self.config._attn_implementation:
encoder_attention_mask = encoder_attention_mask if 0 in encoder_attention_mask else None
elif self.config._attn_implementation == 'sdpa':
encoder_attention_mask = _prepare_4d_attention_mask_for_sdpa(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
elif self.config._attn_implementation == 'flex_attention':
if isinstance(encoder_attention_mask, torch.Tensor):
encoder_attention_mask = make_flex_block_causal_mask(encoder_attention_mask, query_length=input_shape[-1], is_causal=False)
else:
encoder_attention_mask = _prepare_4d_attention_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
return encoder_attention_mask
|
@auto_docstring
class Data2VecTextModel(Data2VecTextPreTrainedModel):
def __init__(self, config, add_pooling_layer=True):
'''
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
'''
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, value):
pass
def _prune_heads(self, heads_to_prune):
'''
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
'''
pass
@check_model_inputs
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Union[list[torch.FloatTensor], Cache]]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
pass
def _create_attention_masks(self, input_shape, attention_mask, encoder_attention_mask, embedding_output, encoder_hidden_states, cache_position, past_key_values):
pass
def _update_full_mask(self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor):
pass
def _update_cross_attn_mask(self, encoder_hidden_states: Union[torch.Tensor, None], encoder_attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor):
pass
| 12
| 2
| 30
| 3
| 20
| 7
| 5
| 0.41
| 1
| 8
| 4
| 0
| 5
| 4
| 5
| 6
| 178
| 26
| 108
| 41
| 81
| 44
| 55
| 25
| 49
| 18
| 2
| 2
| 24
|
1,528
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_text.py
|
transformers.models.data2vec.modeling_data2vec_text.Data2VecTextOutput
|
import torch
import torch.nn as nn
class Data2VecTextOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
|
class Data2VecTextOutput(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 3
| 2
| 12
| 12
| 1
| 11
| 6
| 8
| 0
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
1,529
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_text.py
|
transformers.models.data2vec.modeling_data2vec_text.Data2VecTextPooler
|
import torch
import torch.nn as nn
class Data2VecTextPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
|
class Data2VecTextPooler(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 5
| 1
| 1
| 0.2
| 1
| 2
| 0
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 10
| 7
| 7
| 2
| 10
| 7
| 7
| 1
| 1
| 0
| 2
|
1,530
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_text.py
|
transformers.models.data2vec.modeling_data2vec_text.Data2VecTextPreTrainedModel
|
from .configuration_data2vec_text import Data2VecTextConfig
from ...utils import TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
import torch.nn as nn
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
@auto_docstring
class Data2VecTextPreTrainedModel(PreTrainedModel):
config_class = Data2VecTextConfig
base_model_prefix = 'data2vec_text'
supports_gradient_checkpointing = True
_no_split_modules = ['Data2VecTextForTextEmbeddings', 'Data2VecTextLayer']
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_supports_attention_backend = True
_can_record_outputs = {'hidden_states': Data2VecTextLayer, 'attentions': Data2VecTextSelfAttention, 'cross_attentions': Data2VecTextCrossAttention}
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
if hasattr(module, 'bias') and module.bias is not None:
module.bias.data.zero_()
if hasattr(module, 'weight') and module.weight is not None:
module.weight.data.fill_(1.0)
|
@auto_docstring
class Data2VecTextPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
'''Initialize the weights'''
pass
| 3
| 1
| 17
| 0
| 14
| 3
| 8
| 0.37
| 1
| 0
| 0
| 7
| 1
| 0
| 1
| 1
| 28
| 2
| 19
| 6
| 17
| 7
| 17
| 6
| 15
| 8
| 1
| 2
| 8
|
1,531
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_text.py
|
transformers.models.data2vec.modeling_data2vec_text.Data2VecTextSelfAttention
|
import torch
from ...cache_utils import Cache, EncoderDecoderCache
from ...utils import TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
import torch.nn as nn
from ...processing_utils import Unpack
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from typing import Callable, Optional, Union
class Data2VecTextSelfAttention(nn.Module):
def __init__(self, config, position_embedding_type=None, is_causal=False, layer_idx=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')):
raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})')
self.config = config
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.scaling = self.attention_head_size ** (-0.5)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = position_embedding_type or getattr(config, 'position_embedding_type', 'absolute')
if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query':
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
self.is_decoder = config.is_decoder
self.is_causal = is_causal
self.layer_idx = layer_idx
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Cache]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.attention_head_size)
query_layer = self.query(hidden_states).view(*hidden_shape).transpose(1, 2)
key_layer = self.key(hidden_states).view(*hidden_shape).transpose(1, 2)
value_layer = self.value(hidden_states).view(*hidden_shape).transpose(1, 2)
if past_key_value is not None:
current_past_key_value = past_key_value
if isinstance(past_key_value, EncoderDecoderCache):
current_past_key_value = past_key_value.self_attention_cache
key_layer, value_layer = current_past_key_value.update(key_layer, value_layer, self.layer_idx, {'cache_position': cache_position})
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
if self.position_embedding_type != 'absolute':
raise ValueError(f'You are using {self.config._attn_implementation} as attention type. However, non-absolute positional embeddings can not work with them. Please load the model with `attn_implementation="eager"`.')
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, query_layer, key_layer, value_layer, attention_mask, dropout=0.0 if not self.training else self.dropout.p, scaling=self.scaling, head_mask=head_mask, use_cache=past_key_value is not None, **kwargs)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
return (attn_output, attn_weights)
|
class Data2VecTextSelfAttention(nn.Module):
def __init__(self, config, position_embedding_type=None, is_causal=False, layer_idx=None):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Cache]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor]:
pass
| 3
| 0
| 43
| 7
| 31
| 6
| 6
| 0.19
| 1
| 5
| 0
| 0
| 3
| 11
| 3
| 13
| 132
| 22
| 93
| 44
| 80
| 18
| 72
| 35
| 68
| 13
| 1
| 2
| 17
|
1,532
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_text.py
|
transformers.models.data2vec.modeling_data2vec_text.Data2VecTextSelfOutput
|
import torch
import torch.nn as nn
class Data2VecTextSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
|
class Data2VecTextSelfOutput(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 3
| 2
| 12
| 12
| 1
| 11
| 6
| 8
| 0
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
1,533
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_vision.py
|
transformers.models.data2vec.modeling_data2vec_vision.Data2VecVisionAttention
|
from torch import nn
import torch
from typing import Optional, Union
from ...pytorch_utils import compile_compatible_method_lru_cache, find_pruneable_heads_and_indices, prune_linear_layer
from .configuration_data2vec_vision import Data2VecVisionConfig
class Data2VecVisionAttention(nn.Module):
def __init__(self, config: Data2VecVisionConfig, window_size: Optional[tuple]=None) -> None:
super().__init__()
self.attention = DATA2VEC_VISION_SELF_ATTENTION_CLASSES[config._attn_implementation](config, window_size=window_size)
self.output = Data2VecVisionSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads)
self.attention.query = prune_linear_layer(self.attention.query, index)
self.attention.key = prune_linear_layer(self.attention.key, index)
self.attention.value = prune_linear_layer(self.attention.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, relative_position_bias: Optional['Data2VecVisionRelativePositionBias']=None, interpolate_pos_encoding: bool=False, resolution: Optional[tuple[int]]=None) -> Union[tuple[torch.Tensor], tuple[torch.Tensor, torch.Tensor]]:
self_outputs = self.attention(hidden_states, head_mask, output_attentions, relative_position_bias, interpolate_pos_encoding, resolution)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:]
return outputs
|
class Data2VecVisionAttention(nn.Module):
def __init__(self, config: Data2VecVisionConfig, window_size: Optional[tuple]=None) -> None:
pass
def prune_heads(self, heads):
pass
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, relative_position_bias: Optional['Data2VecVisionRelativePositionBias']=None, interpolate_pos_encoding: bool=False, resolution: Optional[tuple[int]]=None) -> Union[tuple[torch.Tensor], tuple[torch.Tensor, torch.Tensor]]:
pass
| 4
| 0
| 14
| 1
| 12
| 1
| 1
| 0.08
| 1
| 8
| 2
| 0
| 3
| 3
| 3
| 13
| 44
| 6
| 36
| 19
| 24
| 3
| 22
| 11
| 18
| 2
| 1
| 1
| 4
|
1,534
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_vision.py
|
transformers.models.data2vec.modeling_data2vec_vision.Data2VecVisionConvModule
|
import torch
from typing import Optional, Union
from torch import nn
class Data2VecVisionConvModule(nn.Module):
"""
A convolutional block that bundles conv/norm/activation layers. This block simplifies the usage of convolution
layers, which are commonly used with a norm layer (e.g., BatchNorm) and activation layer (e.g., ReLU).
Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
"""
def __init__(self, in_channels: int, out_channels: int, kernel_size: Union[int, tuple[int, int]], padding: Union[int, tuple[int, int], str]=0, bias: bool=False, dilation: Union[int, tuple[int, int]]=1) -> None:
super().__init__()
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, padding=padding, bias=bias, dilation=dilation)
self.bn = nn.BatchNorm2d(out_channels)
self.activation = nn.ReLU()
def forward(self, input: torch.Tensor) -> torch.Tensor:
output = self.conv(input)
output = self.bn(output)
output = self.activation(output)
return output
|
class Data2VecVisionConvModule(nn.Module):
'''
A convolutional block that bundles conv/norm/activation layers. This block simplifies the usage of convolution
layers, which are commonly used with a norm layer (e.g., BatchNorm) and activation layer (e.g., ReLU).
Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
'''
def __init__(self, in_channels: int, out_channels: int, kernel_size: Union[int, tuple[int, int]], padding: Union[int, tuple[int, int], str]=0, bias: bool=False, dilation: Union[int, tuple[int, int]]=1) -> None:
pass
def forward(self, input: torch.Tensor) -> torch.Tensor:
pass
| 3
| 1
| 13
| 1
| 13
| 0
| 1
| 0.19
| 1
| 5
| 0
| 0
| 2
| 3
| 2
| 12
| 35
| 4
| 26
| 15
| 15
| 5
| 11
| 7
| 8
| 1
| 1
| 0
| 2
|
1,535
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_vision.py
|
transformers.models.data2vec.modeling_data2vec_vision.Data2VecVisionDropPath
|
from torch import nn
from typing import Optional, Union
import torch
class Data2VecVisionDropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob: Optional[float]=None) -> None:
super().__init__()
self.drop_prob = drop_prob
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return drop_path(hidden_states, self.drop_prob, self.training)
def extra_repr(self) -> str:
return f'p={self.drop_prob}'
|
class Data2VecVisionDropPath(nn.Module):
'''Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).'''
def __init__(self, drop_prob: Optional[float]=None) -> None:
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
def extra_repr(self) -> str:
pass
| 4
| 1
| 2
| 0
| 2
| 0
| 1
| 0.13
| 1
| 4
| 0
| 0
| 3
| 1
| 3
| 13
| 12
| 3
| 8
| 5
| 4
| 1
| 8
| 5
| 4
| 1
| 1
| 0
| 3
|
1,536
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_vision.py
|
transformers.models.data2vec.modeling_data2vec_vision.Data2VecVisionEmbeddings
|
from typing import Optional, Union
from .configuration_data2vec_vision import Data2VecVisionConfig
import collections.abc
import torch
from ...utils import auto_docstring, logging, torch_int
from torch import nn
import warnings
class Data2VecVisionEmbeddings(nn.Module):
"""
Construct the CLS token, position and patch embeddings. Optionally, also the mask token.
"""
def __init__(self, config: Data2VecVisionConfig) -> None:
super().__init__()
self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
if config.use_mask_token:
self.mask_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
else:
self.mask_token = None
self.patch_embeddings = Data2VecVisionPatchEmbeddings(config)
self.patch_size = config.patch_size
self.image_size = config.image_size if isinstance(config.image_size, collections.abc.Iterable) else (config.image_size, config.image_size)
num_patches = self.patch_embeddings.num_patches
if config.use_absolute_position_embeddings:
self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.hidden_size))
else:
self.position_embeddings = None
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
"""
num_patches = embeddings.shape[1] - 1
num_positions = self.position_embeddings.shape[1] - 1
if not torch.jit.is_tracing() and num_patches == num_positions and (height == width):
return self.position_embeddings
class_pos_embed = self.position_embeddings[:, :1]
patch_pos_embed = self.position_embeddings[:, 1:]
dim = embeddings.shape[-1]
new_height = height // self.patch_size
new_width = width // self.patch_size
sqrt_num_positions = torch_int(num_positions ** 0.5)
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
patch_pos_embed = nn.functional.interpolate(patch_pos_embed, size=(new_height, new_width), mode='bicubic', align_corners=False)
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed, patch_pos_embed), dim=1)
def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor]=None, interpolate_pos_encoding: Optional[bool]=None) -> torch.Tensor:
if self.position_embeddings is not None and interpolate_pos_encoding is not None:
warnings.warn('`interpolate_pos_encoding` argument has no effect for BEiTEmbeddings, embeddings are always interpolated to the input image size. The argument will be removed in transformers v4.51.0.')
_, _, height, width = pixel_values.shape
embeddings, (patch_height, patch_width) = self.patch_embeddings(pixel_values)
batch_size, seq_len, _ = embeddings.size()
if bool_masked_pos is not None:
mask_tokens = self.mask_token.expand(batch_size, seq_len, -1)
w = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens)
embeddings = embeddings * (1 - w) + mask_tokens * w
cls_tokens = self.cls_token.expand(batch_size, -1, -1)
embeddings = torch.cat((cls_tokens, embeddings), dim=1)
if self.position_embeddings is not None:
embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
embeddings = self.dropout(embeddings)
return (embeddings, (patch_height, patch_width))
|
class Data2VecVisionEmbeddings(nn.Module):
'''
Construct the CLS token, position and patch embeddings. Optionally, also the mask token.
'''
def __init__(self, config: Data2VecVisionConfig) -> None:
pass
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
'''
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
'''
pass
def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor]=None, interpolate_pos_encoding: Optional[bool]=None) -> torch.Tensor:
pass
| 4
| 2
| 30
| 5
| 22
| 3
| 4
| 0.2
| 1
| 7
| 2
| 0
| 3
| 7
| 3
| 13
| 99
| 20
| 66
| 31
| 57
| 13
| 47
| 26
| 43
| 5
| 1
| 2
| 11
|
1,537
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_vision.py
|
transformers.models.data2vec.modeling_data2vec_vision.Data2VecVisionEncoder
|
from torch import nn
import torch
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput, SemanticSegmenterOutput
from typing import Optional, Union
from .configuration_data2vec_vision import Data2VecVisionConfig
class Data2VecVisionEncoder(nn.Module):
def __init__(self, config: Data2VecVisionConfig, window_size: Optional[tuple]=None) -> None:
super().__init__()
self.config = config
self.has_relative_position_bias = config.use_shared_relative_position_bias
if self.has_relative_position_bias:
self.relative_position_bias = Data2VecVisionRelativePositionBias(config, window_size=window_size)
dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.num_hidden_layers, device='cpu')]
self.layer = nn.ModuleList([Data2VecVisionLayer(config, window_size=window_size if config.use_relative_position_bias else None, drop_path_rate=dpr[i]) for i in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, output_hidden_states: bool=False, interpolate_pos_encoding: bool=False, resolution: Optional[tuple[int, int]]=None, return_dict: bool=True) -> Union[tuple, BaseModelOutput]:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if self.has_relative_position_bias:
height, width = resolution
window_size = (height // self.config.patch_size, width // self.config.patch_size)
relative_position_bias = self.relative_position_bias(window_size, interpolate_pos_encoding=interpolate_pos_encoding, dim_size=hidden_states.shape[1])
else:
relative_position_bias = None
layer_head_mask = head_mask[i] if head_mask is not None else None
layer_outputs = layer_module(hidden_states, head_mask=layer_head_mask, output_attentions=output_attentions, relative_position_bias=relative_position_bias, interpolate_pos_encoding=interpolate_pos_encoding, resolution=resolution)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None))
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions)
|
class Data2VecVisionEncoder(nn.Module):
def __init__(self, config: Data2VecVisionConfig, window_size: Optional[tuple]=None) -> None:
pass
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, output_hidden_states: bool=False, interpolate_pos_encoding: bool=False, resolution: Optional[tuple[int, int]]=None, return_dict: bool=True) -> Union[tuple, BaseModelOutput]:
pass
| 3
| 0
| 41
| 4
| 36
| 1
| 7
| 0.01
| 1
| 11
| 4
| 0
| 2
| 4
| 2
| 12
| 83
| 9
| 73
| 25
| 61
| 1
| 31
| 16
| 28
| 11
| 1
| 2
| 14
|
1,538
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_vision.py
|
transformers.models.data2vec.modeling_data2vec_vision.Data2VecVisionFCNHead
|
import torch
from typing import Optional, Union
from torch import nn
from .configuration_data2vec_vision import Data2VecVisionConfig
class Data2VecVisionFCNHead(nn.Module):
"""
Fully Convolution Networks for Semantic Segmentation. This head is implemented of
[FCNNet](https://huggingface.co/papers/1411.4038>).
Args:
config (Data2VecVisionConfig): Configuration.
in_channels
kernel_size (int): The kernel size for convs in the head. Default: 3.
dilation (int): The dilation rate for convs in the head. Default: 1.
Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
"""
def __init__(self, config: Data2VecVisionConfig, in_index: int=2, kernel_size: int=3, dilation: Union[int, tuple[int, int]]=1) -> None:
super().__init__()
self.in_channels = config.hidden_size
self.channels = config.auxiliary_channels
self.num_convs = config.auxiliary_num_convs
self.concat_input = config.auxiliary_concat_input
self.in_index = in_index
conv_padding = kernel_size // 2 * dilation
convs = []
convs.append(Data2VecVisionConvModule(self.in_channels, self.channels, kernel_size=kernel_size, padding=conv_padding, dilation=dilation))
for i in range(self.num_convs - 1):
convs.append(Data2VecVisionConvModule(self.channels, self.channels, kernel_size=kernel_size, padding=conv_padding, dilation=dilation))
if self.num_convs == 0:
self.convs = nn.Identity()
else:
self.convs = nn.Sequential(*convs)
if self.concat_input:
self.conv_cat = Data2VecVisionConvModule(self.in_channels + self.channels, self.channels, kernel_size=kernel_size, padding=kernel_size // 2)
self.classifier = nn.Conv2d(self.channels, config.num_labels, kernel_size=1)
def forward(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = encoder_hidden_states[self.in_index]
output = self.convs(hidden_states)
if self.concat_input:
output = self.conv_cat(torch.cat([hidden_states, output], dim=1))
output = self.classifier(output)
return output
|
class Data2VecVisionFCNHead(nn.Module):
'''
Fully Convolution Networks for Semantic Segmentation. This head is implemented of
[FCNNet](https://huggingface.co/papers/1411.4038>).
Args:
config (Data2VecVisionConfig): Configuration.
in_channels
kernel_size (int): The kernel size for convs in the head. Default: 3.
dilation (int): The dilation rate for convs in the head. Default: 1.
Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
'''
def __init__(self, config: Data2VecVisionConfig, in_index: int=2, kernel_size: int=3, dilation: Union[int, tuple[int, int]]=1) -> None:
pass
def forward(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 1
| 23
| 1
| 21
| 1
| 3
| 0.26
| 1
| 6
| 2
| 0
| 2
| 8
| 2
| 12
| 61
| 7
| 43
| 22
| 34
| 11
| 26
| 16
| 23
| 4
| 1
| 1
| 6
|
1,539
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_vision.py
|
transformers.models.data2vec.modeling_data2vec_vision.Data2VecVisionForImageClassification
|
from .configuration_data2vec_vision import Data2VecVisionConfig
from typing import Optional, Union
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput, SemanticSegmenterOutput
from ...utils import auto_docstring, logging, torch_int
import torch
from torch import nn
@auto_docstring(custom_intro='\n Data2VecVision Model transformer with an image classification head on top (a linear layer on top of the average of\n the final hidden states of the patch tokens) e.g. for ImageNet.\n ')
class Data2VecVisionForImageClassification(Data2VecVisionPreTrainedModel):
def __init__(self, config: Data2VecVisionConfig) -> None:
super().__init__(config)
self.num_labels = config.num_labels
self.data2vec_vision = Data2VecVisionModel(config, add_pooling_layer=True)
self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
self.post_init()
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Union[tuple, ImageClassifierOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.data2vec_vision(pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict)
pooled_output = outputs.pooler_output if return_dict else outputs[1]
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config)
if not return_dict:
output = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n Data2VecVision Model transformer with an image classification head on top (a linear layer on top of the average of\n the final hidden states of the patch tokens) e.g. for ImageNet.\n ')
class Data2VecVisionForImageClassification(Data2VecVisionPreTrainedModel):
def __init__(self, config: Data2VecVisionConfig) -> None:
pass
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Union[tuple, ImageClassifierOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
'''
pass
| 5
| 1
| 37
| 4
| 29
| 4
| 8
| 0.12
| 1
| 8
| 3
| 0
| 2
| 3
| 2
| 3
| 82
| 9
| 65
| 22
| 46
| 8
| 32
| 12
| 29
| 13
| 2
| 3
| 15
|
1,540
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_vision.py
|
transformers.models.data2vec.modeling_data2vec_vision.Data2VecVisionForSemanticSegmentation
|
from typing import Optional, Union
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput, SemanticSegmenterOutput
from .configuration_data2vec_vision import Data2VecVisionConfig
import torch
from torch.nn import CrossEntropyLoss
from torch import nn
from ...utils import auto_docstring, logging, torch_int
@auto_docstring
class Data2VecVisionForSemanticSegmentation(Data2VecVisionPreTrainedModel):
def __init__(self, config: Data2VecVisionConfig) -> None:
super().__init__(config)
self.num_labels = config.num_labels
self.data2vec_vision = Data2VecVisionModel(config, add_pooling_layer=False)
if len(self.config.out_indices) != 4:
raise ValueError('Data2VecVisionForSemanticSegmentation requires config.out_indices to be a list of 4 integers, specifying which features to use from the backbone. One can use [3, 5, 7, 11] in case of a base-sized architecture.')
self.fpn1 = nn.Sequential(nn.ConvTranspose2d(config.hidden_size, config.hidden_size, kernel_size=2, stride=2), nn.BatchNorm2d(config.hidden_size), nn.GELU(), nn.ConvTranspose2d(config.hidden_size, config.hidden_size, kernel_size=2, stride=2))
self.fpn2 = nn.Sequential(nn.ConvTranspose2d(config.hidden_size, config.hidden_size, kernel_size=2, stride=2))
self.fpn3 = nn.Identity()
self.fpn4 = nn.MaxPool2d(kernel_size=2, stride=2)
self.decode_head = Data2VecVisionUperHead(config)
self.auxiliary_head = Data2VecVisionFCNHead(config) if config.use_auxiliary_head else None
self.post_init()
def compute_loss(self, logits, auxiliary_logits, labels):
upsampled_logits = nn.functional.interpolate(logits, size=labels.shape[-2:], mode='bilinear', align_corners=False)
if auxiliary_logits is not None:
upsampled_auxiliary_logits = nn.functional.interpolate(auxiliary_logits, size=labels.shape[-2:], mode='bilinear', align_corners=False)
loss_fct = CrossEntropyLoss(ignore_index=self.config.semantic_loss_ignore_index)
main_loss = loss_fct(upsampled_logits, labels)
loss = main_loss
if auxiliary_logits is not None:
auxiliary_loss = loss_fct(upsampled_auxiliary_logits, labels)
loss += self.config.auxiliary_loss_weight * auxiliary_loss
return loss
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Union[tuple, SemanticSegmenterOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> from transformers import AutoImageProcessor, Data2VecVisionForSemanticSegmentation
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("facebook/data2vec-vision-base")
>>> model = Data2VecVisionForSemanticSegmentation.from_pretrained("facebook/data2vec-vision-base")
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> # logits are of shape (batch_size, num_labels, height, width)
>>> logits = outputs.logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
if labels is not None and self.config.num_labels == 1:
raise ValueError('The number of labels should be greater than one')
outputs = self.data2vec_vision(pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=True, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict)
encoder_hidden_states = outputs.hidden_states if return_dict else outputs[1]
features = [feature for idx, feature in enumerate(encoder_hidden_states) if idx + 1 in self.config.out_indices]
batch_size = pixel_values.shape[0]
patch_resolution = self.config.image_size // self.config.patch_size
features = [x[:, 1:, :].permute(0, 2, 1).reshape(batch_size, -1, patch_resolution, patch_resolution) for x in features]
ops = [self.fpn1, self.fpn2, self.fpn3, self.fpn4]
for i in range(len(features)):
features[i] = ops[i](features[i])
logits = self.decode_head(features)
auxiliary_logits = None
if self.auxiliary_head is not None:
auxiliary_logits = self.auxiliary_head(features)
loss = None
if labels is not None:
loss = self.compute_loss(logits, auxiliary_logits, labels)
if not return_dict:
if output_hidden_states:
output = (logits,) + outputs[1:]
else:
output = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return SemanticSegmenterOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions)
|
@auto_docstring
class Data2VecVisionForSemanticSegmentation(Data2VecVisionPreTrainedModel):
def __init__(self, config: Data2VecVisionConfig) -> None:
pass
def compute_loss(self, logits, auxiliary_logits, labels):
pass
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Union[tuple, SemanticSegmenterOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> from transformers import AutoImageProcessor, Data2VecVisionForSemanticSegmentation
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("facebook/data2vec-vision-base")
>>> model = Data2VecVisionForSemanticSegmentation.from_pretrained("facebook/data2vec-vision-base")
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> # logits are of shape (batch_size, num_labels, height, width)
>>> logits = outputs.logits
```'''
pass
| 6
| 1
| 47
| 7
| 31
| 9
| 6
| 0.29
| 1
| 12
| 5
| 0
| 3
| 8
| 3
| 4
| 145
| 23
| 95
| 39
| 80
| 28
| 52
| 29
| 48
| 12
| 2
| 2
| 18
|
1,541
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_vision.py
|
transformers.models.data2vec.modeling_data2vec_vision.Data2VecVisionIntermediate
|
import torch
from torch import nn
from .configuration_data2vec_vision import Data2VecVisionConfig
from ...activations import ACT2FN
class Data2VecVisionIntermediate(nn.Module):
def __init__(self, config: Data2VecVisionConfig) -> None:
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
|
class Data2VecVisionIntermediate(nn.Module):
def __init__(self, config: Data2VecVisionConfig) -> None:
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 1
| 6
| 0
| 2
| 0
| 1
| 4
| 1
| 0
| 2
| 2
| 2
| 12
| 14
| 2
| 12
| 5
| 9
| 0
| 11
| 5
| 8
| 2
| 1
| 1
| 3
|
1,542
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_vision.py
|
transformers.models.data2vec.modeling_data2vec_vision.Data2VecVisionLayer
|
from torch import nn
import torch
from ...modeling_layers import GradientCheckpointingLayer
from typing import Optional, Union
from .configuration_data2vec_vision import Data2VecVisionConfig
class Data2VecVisionLayer(GradientCheckpointingLayer):
"""This corresponds to the Block class in the timm implementation."""
def __init__(self, config: Data2VecVisionConfig, window_size: Optional[tuple]=None, drop_path_rate: float=0.0) -> None:
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = Data2VecVisionAttention(config, window_size=window_size)
self.intermediate = Data2VecVisionIntermediate(config)
self.output = Data2VecVisionOutput(config)
self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.drop_path = Data2VecVisionDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity()
self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
init_values = config.layer_scale_init_value
if init_values > 0:
self.lambda_1 = nn.Parameter(init_values * torch.ones(config.hidden_size), requires_grad=True)
self.lambda_2 = nn.Parameter(init_values * torch.ones(config.hidden_size), requires_grad=True)
else:
self.lambda_1, self.lambda_2 = (None, None)
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, relative_position_bias: Optional[torch.Tensor]=None, interpolate_pos_encoding: bool=False, resolution: Optional[tuple[int, int]]=None) -> Union[tuple[torch.Tensor], tuple[torch.Tensor, torch.Tensor]]:
self_attention_outputs = self.attention(self.layernorm_before(hidden_states), head_mask, output_attentions=output_attentions, relative_position_bias=relative_position_bias, interpolate_pos_encoding=interpolate_pos_encoding, resolution=resolution)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:]
if self.lambda_1 is not None:
attention_output = self.lambda_1 * attention_output
hidden_states = self.drop_path(attention_output) + hidden_states
layer_output = self.layernorm_after(hidden_states)
layer_output = self.intermediate(layer_output)
layer_output = self.output(layer_output)
if self.lambda_2 is not None:
layer_output = self.lambda_2 * layer_output
layer_output = self.drop_path(layer_output) + hidden_states
outputs = (layer_output,) + outputs
return outputs
|
class Data2VecVisionLayer(GradientCheckpointingLayer):
'''This corresponds to the Block class in the timm implementation.'''
def __init__(self, config: Data2VecVisionConfig, window_size: Optional[tuple]=None, drop_path_rate: float=0.0) -> None:
pass
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, relative_position_bias: Optional[torch.Tensor]=None, interpolate_pos_encoding: bool=False, resolution: Optional[tuple[int, int]]=None) -> Union[tuple[torch.Tensor], tuple[torch.Tensor, torch.Tensor]]:
pass
| 3
| 1
| 31
| 5
| 24
| 3
| 3
| 0.14
| 1
| 11
| 5
| 0
| 2
| 10
| 2
| 12
| 65
| 11
| 49
| 28
| 36
| 7
| 31
| 18
| 28
| 3
| 1
| 1
| 6
|
1,543
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_vision.py
|
transformers.models.data2vec.modeling_data2vec_vision.Data2VecVisionModel
|
from ...utils import auto_docstring, logging, torch_int
import torch
from torch import nn
from .configuration_data2vec_vision import Data2VecVisionConfig
from typing import Optional, Union
@auto_docstring
class Data2VecVisionModel(Data2VecVisionPreTrainedModel):
def __init__(self, config: Data2VecVisionConfig, add_pooling_layer: bool=False) -> None:
"""
add_pooling_layer (bool, *optional*, defaults to `False`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
self.embeddings = Data2VecVisionEmbeddings(config)
self.encoder = Data2VecVisionEncoder(config, window_size=self.embeddings.patch_embeddings.patch_shape)
self.layernorm = nn.Identity() if config.use_mean_pooling else nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.pooler = Data2VecVisionPooler(config) if add_pooling_layer else None
self.post_init()
def get_input_embeddings(self):
return self.embeddings.patch_embeddings
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@auto_docstring
def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Union[tuple, Data2VecVisionModelOutputWithPooling]:
"""
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*):
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output, _ = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos)
resolution = pixel_values.shape[2:]
encoder_outputs = self.encoder(embedding_output, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, resolution=resolution, return_dict=return_dict, interpolate_pos_encoding=interpolate_pos_encoding)
sequence_output = encoder_outputs[0]
sequence_output = self.layernorm(sequence_output)
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
head_outputs = (sequence_output, pooled_output) if pooled_output is not None else (sequence_output,)
return head_outputs + encoder_outputs[1:]
return Data2VecVisionModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)
|
@auto_docstring
class Data2VecVisionModel(Data2VecVisionPreTrainedModel):
def __init__(self, config: Data2VecVisionConfig, add_pooling_layer: bool=False) -> None:
'''
add_pooling_layer (bool, *optional*, defaults to `False`):
Whether to add a pooling layer
'''
pass
def get_input_embeddings(self):
pass
def _prune_heads(self, heads_to_prune):
'''
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
'''
pass
@auto_docstring
def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Union[tuple, Data2VecVisionModelOutputWithPooling]:
'''
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*):
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
'''
pass
| 7
| 3
| 20
| 2
| 14
| 4
| 3
| 0.22
| 1
| 9
| 5
| 0
| 4
| 5
| 4
| 5
| 90
| 11
| 65
| 27
| 43
| 14
| 29
| 17
| 24
| 7
| 2
| 1
| 13
|
1,544
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_vision.py
|
transformers.models.data2vec.modeling_data2vec_vision.Data2VecVisionModelOutputWithPooling
|
from ...utils import auto_docstring, logging, torch_int
from dataclasses import dataclass
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput, SemanticSegmenterOutput
@dataclass
@auto_docstring(custom_intro='\n Class for outputs of [`Data2VecVisionModel`].\n ')
class Data2VecVisionModelOutputWithPooling(BaseModelOutputWithPooling):
"""
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
Average of the last layer hidden states of the patch tokens (excluding the *[CLS]* token) if
*config.use_mean_pooling* is set to True. If set to False, then the final hidden state of the *[CLS]* token
will be returned.
"""
|
@dataclass
@auto_docstring(custom_intro='\n Class for outputs of [`Data2VecVisionModel`].\n ')
class Data2VecVisionModelOutputWithPooling(BaseModelOutputWithPooling):
'''
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
Average of the last layer hidden states of the patch tokens (excluding the *[CLS]* token) if
*config.use_mean_pooling* is set to True. If set to False, then the final hidden state of the *[CLS]* token
will be returned.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 19
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 23
| 3
| 1
| 1
| 0
| 19
| 1
| 1
| 0
| 0
| 2
| 0
| 0
|
1,545
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_vision.py
|
transformers.models.data2vec.modeling_data2vec_vision.Data2VecVisionOutput
|
import torch
from torch import nn
from .configuration_data2vec_vision import Data2VecVisionConfig
class Data2VecVisionOutput(nn.Module):
def __init__(self, config: Data2VecVisionConfig) -> None:
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
|
class Data2VecVisionOutput(nn.Module):
def __init__(self, config: Data2VecVisionConfig) -> None:
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 1
| 4
| 0
| 1
| 0
| 1
| 3
| 1
| 0
| 2
| 2
| 2
| 12
| 11
| 2
| 9
| 5
| 6
| 0
| 9
| 5
| 6
| 1
| 1
| 0
| 2
|
1,546
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_vision.py
|
transformers.models.data2vec.modeling_data2vec_vision.Data2VecVisionPatchEmbeddings
|
import torch
import collections.abc
from torch import nn
class Data2VecVisionPatchEmbeddings(nn.Module):
"""
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
Transformer.
"""
def __init__(self, config):
super().__init__()
image_size, patch_size = (config.image_size, config.patch_size)
num_channels, hidden_size = (config.num_channels, config.hidden_size)
image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
num_patches = image_size[1] // patch_size[1] * (image_size[0] // patch_size[0])
patch_shape = (image_size[0] // patch_size[0], image_size[1] // patch_size[1])
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.num_patches = num_patches
self.patch_shape = patch_shape
self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
batch_size, num_channels, height, width = pixel_values.shape
if num_channels != self.num_channels:
raise ValueError('Make sure that the channel dimension of the pixel values match with the one set in the configuration.')
embeddings = self.projection(pixel_values)
patch_height, patch_width = (embeddings.shape[2], embeddings.shape[3])
embeddings = embeddings.flatten(2).transpose(1, 2)
return (embeddings, (patch_height, patch_width))
|
class Data2VecVisionPatchEmbeddings(nn.Module):
'''
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
Transformer.
'''
def __init__(self, config):
pass
def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
pass
| 3
| 1
| 22
| 3
| 18
| 1
| 3
| 0.16
| 1
| 4
| 0
| 0
| 2
| 6
| 2
| 12
| 51
| 8
| 37
| 20
| 30
| 6
| 27
| 16
| 24
| 3
| 1
| 1
| 6
|
1,547
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_vision.py
|
transformers.models.data2vec.modeling_data2vec_vision.Data2VecVisionPooler
|
import torch
from torch import nn
from .configuration_data2vec_vision import Data2VecVisionConfig
class Data2VecVisionPooler(nn.Module):
def __init__(self, config: Data2VecVisionConfig) -> None:
super().__init__()
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) if config.use_mean_pooling else None
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
if self.layernorm is not None:
patch_tokens = hidden_states[:, 1:, :]
pooled_output = self.layernorm(patch_tokens.mean(1))
else:
pooled_output = hidden_states[:, 0]
return pooled_output
|
class Data2VecVisionPooler(nn.Module):
def __init__(self, config: Data2VecVisionConfig) -> None:
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 8
| 1
| 6
| 1
| 2
| 0.15
| 1
| 3
| 1
| 0
| 2
| 1
| 2
| 12
| 17
| 2
| 13
| 6
| 10
| 2
| 10
| 6
| 7
| 2
| 1
| 1
| 4
|
1,548
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_vision.py
|
transformers.models.data2vec.modeling_data2vec_vision.Data2VecVisionPreTrainedModel
|
from ...utils import auto_docstring, logging, torch_int
from .configuration_data2vec_vision import Data2VecVisionConfig
from ...modeling_utils import PreTrainedModel
from torch import nn
@auto_docstring
class Data2VecVisionPreTrainedModel(PreTrainedModel):
config: Data2VecVisionConfig
base_model_prefix = 'data2vec_vision'
main_input_name = 'pixel_values'
supports_gradient_checkpointing = True
_no_split_modules = ['Data2VecVisionLayer']
_keys_to_ignore_on_load_unexpected = ['.*relative_position_index.*']
_supports_sdpa = True
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d, nn.ConvTranspose2d)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, Data2VecVisionEmbeddings):
module.cls_token.data.zero_()
if module.mask_token is not None:
module.mask_token.data.zero_()
if module.position_embeddings is not None:
module.position_embeddings.data.zero_()
elif isinstance(module, Data2VecVisionRelativePositionBias):
module.relative_position_bias_table.data.zero_()
elif isinstance(module, Data2VecVisionLayer):
if module.lambda_1 is not None:
module.lambda_1.data.fill_(self.config.layer_scale_init_value)
module.lambda_2.data.fill_(self.config.layer_scale_init_value)
|
@auto_docstring
class Data2VecVisionPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
'''Initialize the weights'''
pass
| 3
| 1
| 15
| 0
| 12
| 3
| 6
| 0.35
| 1
| 0
| 0
| 3
| 1
| 0
| 1
| 1
| 29
| 2
| 20
| 9
| 18
| 7
| 18
| 9
| 16
| 6
| 1
| 2
| 6
|
1,549
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_vision.py
|
transformers.models.data2vec.modeling_data2vec_vision.Data2VecVisionPyramidPoolingBlock
|
from torch import nn
import torch
class Data2VecVisionPyramidPoolingBlock(nn.Module):
def __init__(self, pool_scale: int, in_channels: int, channels: int) -> None:
super().__init__()
self.layers = [nn.AdaptiveAvgPool2d(pool_scale), Data2VecVisionConvModule(in_channels, channels, kernel_size=1)]
for i, layer in enumerate(self.layers):
self.add_module(str(i), layer)
def forward(self, input: torch.Tensor) -> torch.Tensor:
hidden_state = input
for layer in self.layers:
hidden_state = layer(hidden_state)
return hidden_state
|
class Data2VecVisionPyramidPoolingBlock(nn.Module):
def __init__(self, pool_scale: int, in_channels: int, channels: int) -> None:
pass
def forward(self, input: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 7
| 0
| 7
| 0
| 2
| 0
| 1
| 6
| 1
| 0
| 2
| 1
| 2
| 12
| 15
| 1
| 14
| 7
| 11
| 0
| 11
| 7
| 8
| 2
| 1
| 1
| 4
|
1,550
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_vision.py
|
transformers.models.data2vec.modeling_data2vec_vision.Data2VecVisionPyramidPoolingModule
|
import torch
from torch import nn
class Data2VecVisionPyramidPoolingModule(nn.Module):
"""
Pyramid Pooling Module (PPM) used in PSPNet.
Args:
pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
Module.
in_channels (int): Input channels.
channels (int): Channels after modules, before conv_seg.
align_corners (bool): align_corners argument of F.interpolate.
Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
"""
def __init__(self, pool_scales: tuple[int, ...], in_channels: int, channels: int, align_corners: bool) -> None:
super().__init__()
self.pool_scales = pool_scales
self.align_corners = align_corners
self.in_channels = in_channels
self.channels = channels
self.blocks = []
for i, pool_scale in enumerate(pool_scales):
block = Data2VecVisionPyramidPoolingBlock(pool_scale=pool_scale, in_channels=in_channels, channels=channels)
self.blocks.append(block)
self.add_module(str(i), block)
def forward(self, x: torch.Tensor) -> list[torch.Tensor]:
ppm_outs = []
for ppm in self.blocks:
ppm_out = ppm(x)
upsampled_ppm_out = nn.functional.interpolate(ppm_out, size=x.size()[2:], mode='bilinear', align_corners=self.align_corners)
ppm_outs.append(upsampled_ppm_out)
return ppm_outs
|
class Data2VecVisionPyramidPoolingModule(nn.Module):
'''
Pyramid Pooling Module (PPM) used in PSPNet.
Args:
pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
Module.
in_channels (int): Input channels.
channels (int): Channels after modules, before conv_seg.
align_corners (bool): align_corners argument of F.interpolate.
Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
'''
def __init__(self, pool_scales: tuple[int, ...], in_channels: int, channels: int, align_corners: bool) -> None:
pass
def forward(self, x: torch.Tensor) -> list[torch.Tensor]:
pass
| 3
| 1
| 11
| 0
| 11
| 0
| 2
| 0.43
| 1
| 7
| 1
| 0
| 2
| 5
| 2
| 12
| 37
| 4
| 23
| 14
| 20
| 10
| 19
| 14
| 16
| 2
| 1
| 1
| 4
|
1,551
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_vision.py
|
transformers.models.data2vec.modeling_data2vec_vision.Data2VecVisionRelativePositionBias
|
from torch import nn
import torch
from ...utils import auto_docstring, logging, torch_int
from ...pytorch_utils import compile_compatible_method_lru_cache, find_pruneable_heads_and_indices, prune_linear_layer
from .configuration_data2vec_vision import Data2VecVisionConfig
class Data2VecVisionRelativePositionBias(nn.Module):
def __init__(self, config: Data2VecVisionConfig, window_size: tuple) -> None:
super().__init__()
self.window_size = window_size
self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
self.relative_position_bias_table = nn.Parameter(torch.zeros(self.num_relative_distance, config.num_attention_heads))
@compile_compatible_method_lru_cache(maxsize=10)
def generate_relative_position_index(self, window_size: tuple[int, int]) -> torch.Tensor:
"""
This method creates the relative position index, modified to support arbitrary window sizes,
as introduced in [MiDaS v3.1](https://huggingface.co/papers/2307.14460).
"""
num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
window_area = window_size[0] * window_size[1]
grid = torch.meshgrid(torch.arange(window_size[0]), torch.arange(window_size[1]), indexing='ij')
coords = torch.stack(grid)
coords_flatten = torch.flatten(coords, 1)
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]
relative_coords = relative_coords.permute(1, 2, 0).contiguous()
relative_coords[:, :, 0] += window_size[0] - 1
relative_coords[:, :, 1] += window_size[1] - 1
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
relative_position_index = torch.zeros(size=(window_area + 1,) * 2, dtype=relative_coords.dtype)
relative_position_index[1:, 1:] = relative_coords.sum(-1)
relative_position_index[0, 0:] = num_relative_distance - 3
relative_position_index[0:, 0] = num_relative_distance - 2
relative_position_index[0, 0] = num_relative_distance - 1
return relative_position_index
def forward(self, window_size, interpolate_pos_encoding: bool=False, dim_size=None) -> torch.Tensor:
"""
Modification of timm.models.beit.py: Attention._get_rel_pos_bias to support arbitrary window sizes.
"""
old_height = 2 * self.window_size[0] - 1
old_width = 2 * self.window_size[1] - 1
new_height = 2 * window_size[0] - 1
new_width = 2 * window_size[1] - 1
old_relative_position_bias_table = self.relative_position_bias_table
old_num_relative_distance = self.num_relative_distance
new_num_relative_distance = new_height * new_width + 3
old_sub_table = old_relative_position_bias_table[:old_num_relative_distance - 3]
old_sub_table = old_sub_table.reshape(1, old_width, old_height, -1).permute(0, 3, 1, 2)
new_sub_table = nn.functional.interpolate(old_sub_table, size=(torch_int(new_height), torch_int(new_width)), mode='bilinear')
new_sub_table = new_sub_table.permute(0, 2, 3, 1).reshape(new_num_relative_distance - 3, -1)
new_relative_position_bias_table = torch.cat([new_sub_table, old_relative_position_bias_table[old_num_relative_distance - 3:]])
relative_position_index = self.generate_relative_position_index(window_size)
relative_position_bias = new_relative_position_bias_table[relative_position_index.view(-1)]
relative_position_bias = relative_position_bias.view(window_size[0] * window_size[1] + 1, window_size[0] * window_size[1] + 1, -1)
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous()
if interpolate_pos_encoding:
relative_position_bias = nn.functional.interpolate(relative_position_bias.unsqueeze(1), size=(dim_size, dim_size), mode='bilinear', align_corners=False).squeeze(1)
return relative_position_bias.unsqueeze(0)
|
class Data2VecVisionRelativePositionBias(nn.Module):
def __init__(self, config: Data2VecVisionConfig, window_size: tuple) -> None:
pass
@compile_compatible_method_lru_cache(maxsize=10)
def generate_relative_position_index(self, window_size: tuple[int, int]) -> torch.Tensor:
'''
This method creates the relative position index, modified to support arbitrary window sizes,
as introduced in [MiDaS v3.1](https://huggingface.co/papers/2307.14460).
'''
pass
def forward(self, window_size, interpolate_pos_encoding: bool=False, dim_size=None) -> torch.Tensor:
'''
Modification of timm.models.beit.py: Attention._get_rel_pos_bias to support arbitrary window sizes.
'''
pass
| 5
| 2
| 27
| 4
| 19
| 6
| 2
| 0.32
| 1
| 6
| 1
| 0
| 3
| 4
| 3
| 13
| 84
| 13
| 59
| 27
| 55
| 19
| 46
| 27
| 42
| 3
| 1
| 1
| 5
|
1,552
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_vision.py
|
transformers.models.data2vec.modeling_data2vec_vision.Data2VecVisionSdpaSelfAttention
|
import torch
from typing import Optional, Union
import math
class Data2VecVisionSdpaSelfAttention(Data2VecVisionSelfAttention):
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, relative_position_bias: Optional[torch.Tensor]=None, interpolate_pos_encoding: bool=False, resolution: Optional[tuple[int]]=None) -> Union[tuple[torch.Tensor], tuple[torch.Tensor, torch.Tensor]]:
if output_attentions or head_mask is not None:
logger.warning_once('`Data2VecVisionSdpaSelfAttention` is used but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True` or `head_mask`. Falling back to the manual attention implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.')
return super().forward(hidden_states=hidden_states, head_mask=head_mask, output_attentions=output_attentions, relative_position_bias=relative_position_bias, interpolate_pos_encoding=interpolate_pos_encoding, resolution=resolution)
batch_size, seq_length, _ = hidden_states.shape
query_layer = self.query(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
key_layer = self.key(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
value_layer = self.value(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
attn_bias = None
if self.has_relative_position_bias:
height, width = resolution
window_size = (height // self.config.patch_size, width // self.config.patch_size)
attn_bias = self.relative_position_bias(window_size, interpolate_pos_encoding, dim_size=hidden_states.shape[1])
if relative_position_bias is not None:
if attn_bias is None:
attn_bias = relative_position_bias
else:
attn_bias += relative_position_bias
scaling = 1 / math.sqrt(self.attention_head_size)
context_layer = torch.nn.functional.scaled_dot_product_attention(query_layer, key_layer, value_layer, attn_mask=attn_bias, dropout_p=self.config.attention_probs_dropout_prob if self.training else 0.0, is_causal=False, scale=scaling)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return (context_layer, None)
|
class Data2VecVisionSdpaSelfAttention(Data2VecVisionSelfAttention):
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, relative_position_bias: Optional[torch.Tensor]=None, interpolate_pos_encoding: bool=False, resolution: Optional[tuple[int]]=None) -> Union[tuple[torch.Tensor], tuple[torch.Tensor, torch.Tensor]]:
pass
| 2
| 0
| 59
| 4
| 54
| 1
| 6
| 0.02
| 1
| 4
| 0
| 0
| 1
| 0
| 1
| 14
| 60
| 4
| 55
| 20
| 45
| 1
| 24
| 12
| 22
| 6
| 2
| 2
| 6
|
1,553
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_vision.py
|
transformers.models.data2vec.modeling_data2vec_vision.Data2VecVisionSelfAttention
|
from .configuration_data2vec_vision import Data2VecVisionConfig
from typing import Optional, Union
import torch
import math
from torch import nn
class Data2VecVisionSelfAttention(nn.Module):
def __init__(self, config: Data2VecVisionConfig, window_size: Optional[tuple]=None) -> None:
super().__init__()
self.config = config
if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')):
raise ValueError(f'The hidden size {config.hidden_size} is not a multiple of the number of attention heads {config.num_attention_heads}.')
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=False)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.has_relative_position_bias = bool(window_size)
if self.has_relative_position_bias:
self.relative_position_bias = Data2VecVisionRelativePositionBias(config, window_size=window_size)
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, relative_position_bias: Optional[torch.Tensor]=None, interpolate_pos_encoding: bool=False, resolution: Optional[tuple[int]]=None) -> Union[tuple[torch.Tensor], tuple[torch.Tensor, torch.Tensor]]:
batch_size, seq_length, _ = hidden_states.shape
query_layer = self.query(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
key_layer = self.key(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
value_layer = self.value(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if self.has_relative_position_bias:
height, width = resolution
window_size = (height // self.config.patch_size, width // self.config.patch_size)
attention_scores = attention_scores + self.relative_position_bias(window_size, interpolate_pos_encoding, dim_size=hidden_states.shape[1])
if relative_position_bias is not None:
attention_scores = attention_scores + relative_position_bias
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
attention_probs = self.dropout(attention_probs)
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
|
class Data2VecVisionSelfAttention(nn.Module):
def __init__(self, config: Data2VecVisionConfig, window_size: Optional[tuple]=None) -> None:
pass
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, relative_position_bias: Optional[torch.Tensor]=None, interpolate_pos_encoding: bool=False, resolution: Optional[tuple[int]]=None) -> Union[tuple[torch.Tensor], tuple[torch.Tensor, torch.Tensor]]:
pass
| 3
| 0
| 26
| 5
| 19
| 2
| 3
| 0.12
| 1
| 8
| 2
| 1
| 3
| 9
| 3
| 13
| 82
| 18
| 57
| 33
| 45
| 7
| 43
| 25
| 39
| 5
| 1
| 1
| 9
|
1,554
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_vision.py
|
transformers.models.data2vec.modeling_data2vec_vision.Data2VecVisionSelfOutput
|
from .configuration_data2vec_vision import Data2VecVisionConfig
import torch
from torch import nn
class Data2VecVisionSelfOutput(nn.Module):
"""
The residual connection is defined in Data2VecVisionLayer instead of here (as is the case with other models), due to the
layernorm applied before each block.
"""
def __init__(self, config: Data2VecVisionConfig) -> None:
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor, gamma=None) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
|
class Data2VecVisionSelfOutput(nn.Module):
'''
The residual connection is defined in Data2VecVisionLayer instead of here (as is the case with other models), due to the
layernorm applied before each block.
'''
def __init__(self, config: Data2VecVisionConfig) -> None:
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor, gamma=None) -> torch.Tensor:
pass
| 3
| 1
| 5
| 1
| 4
| 0
| 1
| 0.44
| 1
| 3
| 1
| 0
| 2
| 2
| 2
| 12
| 16
| 3
| 9
| 5
| 6
| 4
| 9
| 5
| 6
| 1
| 1
| 0
| 2
|
1,555
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/data2vec/modeling_data2vec_vision.py
|
transformers.models.data2vec.modeling_data2vec_vision.Data2VecVisionUperHead
|
import torch
from .configuration_data2vec_vision import Data2VecVisionConfig
from torch import nn
class Data2VecVisionUperHead(nn.Module):
"""
Unified Perceptual Parsing for Scene Understanding. This head is the implementation of
[UPerNet](https://huggingface.co/papers/1807.10221).
Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
"""
def __init__(self, config: Data2VecVisionConfig) -> None:
super().__init__()
self.pool_scales = config.pool_scales
self.in_channels = [config.hidden_size] * 4
self.channels = config.hidden_size
self.align_corners = False
self.classifier = nn.Conv2d(self.channels, config.num_labels, kernel_size=1)
self.psp_modules = Data2VecVisionPyramidPoolingModule(self.pool_scales, self.in_channels[-1], self.channels, align_corners=self.align_corners)
self.bottleneck = Data2VecVisionConvModule(self.in_channels[-1] + len(self.pool_scales) * self.channels, self.channels, kernel_size=3, padding=1)
self.lateral_convs = nn.ModuleList()
self.fpn_convs = nn.ModuleList()
for in_channels in self.in_channels[:-1]:
l_conv = Data2VecVisionConvModule(in_channels, self.channels, kernel_size=1)
fpn_conv = Data2VecVisionConvModule(self.channels, self.channels, kernel_size=3, padding=1)
self.lateral_convs.append(l_conv)
self.fpn_convs.append(fpn_conv)
self.fpn_bottleneck = Data2VecVisionConvModule(len(self.in_channels) * self.channels, self.channels, kernel_size=3, padding=1)
def psp_forward(self, inputs):
x = inputs[-1]
psp_outs = [x]
psp_outs.extend(self.psp_modules(x))
psp_outs = torch.cat(psp_outs, dim=1)
output = self.bottleneck(psp_outs)
return output
def forward(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor:
laterals = [lateral_conv(encoder_hidden_states[i]) for i, lateral_conv in enumerate(self.lateral_convs)]
laterals.append(self.psp_forward(encoder_hidden_states))
used_backbone_levels = len(laterals)
for i in range(used_backbone_levels - 1, 0, -1):
prev_shape = laterals[i - 1].shape[2:]
laterals[i - 1] = laterals[i - 1] + nn.functional.interpolate(laterals[i], size=prev_shape, mode='bilinear', align_corners=self.align_corners)
fpn_outs = [self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels - 1)]
fpn_outs.append(laterals[-1])
for i in range(used_backbone_levels - 1, 0, -1):
fpn_outs[i] = nn.functional.interpolate(fpn_outs[i], size=fpn_outs[0].shape[2:], mode='bilinear', align_corners=self.align_corners)
fpn_outs = torch.cat(fpn_outs, dim=1)
output = self.fpn_bottleneck(fpn_outs)
output = self.classifier(output)
return output
|
class Data2VecVisionUperHead(nn.Module):
'''
Unified Perceptual Parsing for Scene Understanding. This head is the implementation of
[UPerNet](https://huggingface.co/papers/1807.10221).
Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
'''
def __init__(self, config: Data2VecVisionConfig) -> None:
pass
def psp_forward(self, inputs):
pass
def forward(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 4
| 1
| 24
| 3
| 19
| 3
| 2
| 0.24
| 1
| 7
| 3
| 0
| 3
| 10
| 3
| 13
| 83
| 13
| 59
| 25
| 55
| 14
| 40
| 25
| 36
| 3
| 1
| 1
| 6
|
1,556
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dbrx/configuration_dbrx.py
|
transformers.models.dbrx.configuration_dbrx.DbrxAttentionConfig
|
from typing import Any, Optional
from ...configuration_utils import PretrainedConfig
class DbrxAttentionConfig(PretrainedConfig):
"""Configuration class for Dbrx Attention.
[`DbrxAttention`] class. It is used to instantiate attention layers
according to the specified arguments, defining the layers architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
attn_pdrop (`float`, *optional*, defaults to 0.0):
The dropout probability for the attention layers.
clip_qkv (`float`, *optional*):
If set, clip the queries, keys, and values in the attention layer to this value.
kv_n_heads (`int`, *optional*, defaults to 1): For grouped_query_attention only, allow user to specify number of kv heads.
rope_theta (`float`, *optional*, defaults to 10000.0): The base frequency for rope.
"""
base_config_key = 'attn_config'
def __init__(self, attn_pdrop: float=0.0, clip_qkv: Optional[float]=None, kv_n_heads: int=1, rope_theta: float=10000.0, **kwargs: Any):
super().__init__(**kwargs)
self.attn_pdrop = attn_pdrop
self.clip_qkv = clip_qkv
self.kv_n_heads = kv_n_heads
self.rope_theta = rope_theta
for k in ['model_type', 'attn_implementation', 'transformers_version', '_commit_hash', 'torch_dtype', 'dtype']:
if k in kwargs:
kwargs.pop(k)
if len(kwargs) != 0:
raise ValueError(f'Found unknown kwargs={kwargs!r}')
|
class DbrxAttentionConfig(PretrainedConfig):
'''Configuration class for Dbrx Attention.
[`DbrxAttention`] class. It is used to instantiate attention layers
according to the specified arguments, defining the layers architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
attn_pdrop (`float`, *optional*, defaults to 0.0):
The dropout probability for the attention layers.
clip_qkv (`float`, *optional*):
If set, clip the queries, keys, and values in the attention layer to this value.
kv_n_heads (`int`, *optional*, defaults to 1): For grouped_query_attention only, allow user to specify number of kv heads.
rope_theta (`float`, *optional*, defaults to 10000.0): The base frequency for rope.
'''
def __init__(self, attn_pdrop: float=0.0, clip_qkv: Optional[float]=None, kv_n_heads: int=1, rope_theta: float=10000.0, **kwargs: Any):
pass
| 2
| 1
| 19
| 1
| 18
| 0
| 4
| 0.65
| 1
| 5
| 0
| 0
| 1
| 4
| 1
| 1
| 39
| 6
| 20
| 15
| 11
| 13
| 13
| 8
| 11
| 4
| 1
| 2
| 4
|
1,557
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dbrx/configuration_dbrx.py
|
transformers.models.dbrx.configuration_dbrx.DbrxConfig
|
from ...configuration_utils import PretrainedConfig
from typing import Any, Optional
class DbrxConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`DbrxModel`]. It is used to instantiate a Dbrx model according to the
specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a different configuration to that of the [databricks/dbrx-instruct](https://huggingface.co/databricks/dbrx-instruct) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
d_model (`int`, *optional*, defaults to 2048):
Dimensionality of the embeddings and hidden states.
n_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
n_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer encoder.
max_seq_len (`int`, *optional*, defaults to 2048):
The maximum sequence length of the model.
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the Dbrx model. Defines the maximum number of different tokens that can be represented by
the `inputs_ids` passed when calling [`DbrxModel`].
resid_pdrop (`float`, *optional*, defaults to 0.0):
The dropout probability applied to the attention output before combining with residual.
emb_pdrop (`float`, *optional*, defaults to 0.0):
The dropout probability for the embedding layer.
attn_config (`dict`, *optional*):
A dictionary used to configure the model's attention module.
ffn_config (`dict`, *optional*):
A dictionary used to configure the model's FFN module.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
output_router_logits (`bool`, *optional*, defaults to `False`):
Whether or not the router logits should be returned by the model. Enabling this will also
allow the model to output the auxiliary loss. See [here]() for more details.
Example:
```python
>>> from transformers import DbrxConfig, DbrxModel
>>> # Initializing a Dbrx configuration
>>> configuration = DbrxConfig(n_layers=2, d_model=256, n_heads=8, vocab_size=128)
>>> # Initializing a model (with random weights) from the configuration
>>> model = DbrxModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = 'dbrx'
sub_configs = {'attn_config': DbrxAttentionConfig, 'ffn_config': DbrxFFNConfig}
attribute_map = {'num_attention_heads': 'n_heads', 'hidden_size': 'd_model', 'num_hidden_layers': 'n_layers', 'max_position_embeddings': 'max_seq_len'}
def __init__(self, d_model: int=2048, n_heads: int=16, n_layers: int=24, max_seq_len: int=2048, vocab_size: int=32000, resid_pdrop: float=0.0, emb_pdrop: float=0.0, attn_config: Optional[DbrxAttentionConfig]=None, ffn_config: Optional[DbrxFFNConfig]=None, use_cache: bool=True, initializer_range: float=0.02, output_router_logits: bool=False, **kwargs: Any):
if attn_config is None:
self.attn_config = DbrxAttentionConfig()
elif isinstance(attn_config, dict):
self.attn_config = DbrxAttentionConfig(**attn_config)
else:
self.attn_config = attn_config
if ffn_config is None:
self.ffn_config = DbrxFFNConfig()
elif isinstance(ffn_config, dict):
self.ffn_config = DbrxFFNConfig(**ffn_config)
else:
self.ffn_config = ffn_config
self.d_model = d_model
self.n_heads = n_heads
self.n_layers = n_layers
self.max_seq_len = max_seq_len
self.vocab_size = vocab_size
self.resid_pdrop = resid_pdrop
self.emb_pdrop = emb_pdrop
self.use_cache = use_cache
self.initializer_range = initializer_range
self.output_router_logits = output_router_logits
self.num_key_value_heads = self.attn_config.kv_n_heads
tie_word_embeddings = kwargs.pop('tie_word_embeddings', False)
if tie_word_embeddings:
raise ValueError('tie_word_embeddings is not supported for DBRX models.')
super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)
|
class DbrxConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`DbrxModel`]. It is used to instantiate a Dbrx model according to the
specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a different configuration to that of the [databricks/dbrx-instruct](https://huggingface.co/databricks/dbrx-instruct) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
d_model (`int`, *optional*, defaults to 2048):
Dimensionality of the embeddings and hidden states.
n_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
n_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer encoder.
max_seq_len (`int`, *optional*, defaults to 2048):
The maximum sequence length of the model.
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the Dbrx model. Defines the maximum number of different tokens that can be represented by
the `inputs_ids` passed when calling [`DbrxModel`].
resid_pdrop (`float`, *optional*, defaults to 0.0):
The dropout probability applied to the attention output before combining with residual.
emb_pdrop (`float`, *optional*, defaults to 0.0):
The dropout probability for the embedding layer.
attn_config (`dict`, *optional*):
A dictionary used to configure the model's attention module.
ffn_config (`dict`, *optional*):
A dictionary used to configure the model's FFN module.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
output_router_logits (`bool`, *optional*, defaults to `False`):
Whether or not the router logits should be returned by the model. Enabling this will also
allow the model to output the auxiliary loss. See [here]() for more details.
Example:
```python
>>> from transformers import DbrxConfig, DbrxModel
>>> # Initializing a Dbrx configuration
>>> configuration = DbrxConfig(n_layers=2, d_model=256, n_heads=8, vocab_size=128)
>>> # Initializing a model (with random weights) from the configuration
>>> model = DbrxModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
'''
def __init__(self, d_model: int=2048, n_heads: int=16, n_layers: int=24, max_seq_len: int=2048, vocab_size: int=32000, resid_pdrop: float=0.0, emb_pdrop: float=0.0, attn_config: Optional[DbrxAttentionConfig]=None, ffn_config: Optional[DbrxFFNConfig]=None, use_cache: bool=True, initializer_range: float=0.02, output_router_logits: bool=False, **kwargs: Any):
pass
| 2
| 1
| 47
| 4
| 43
| 0
| 6
| 0.85
| 1
| 9
| 2
| 0
| 1
| 13
| 1
| 1
| 111
| 15
| 52
| 34
| 35
| 44
| 28
| 19
| 26
| 6
| 1
| 1
| 6
|
1,558
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dbrx/configuration_dbrx.py
|
transformers.models.dbrx.configuration_dbrx.DbrxFFNConfig
|
from typing import Any, Optional
from ...configuration_utils import PretrainedConfig
class DbrxFFNConfig(PretrainedConfig):
"""Configuration class for Dbrx FFN.
[`DbrxFFN`] class. It is used to instantiate feedforward layers according to
the specified arguments, defining the layers architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
ffn_act_fn (`dict`, *optional*, defaults to `None`): A dict specifying activation function for the FFN.
The dict should have a key 'name' with the value being the name of the activation function along with
any additional keyword arguments. If `None`, then set to `{"name": "silu"}`.
ffn_hidden_size (`int`, *optional*, defaults to 3584): The hidden size of the feedforward network.
moe_num_experts (`int`, *optional*, defaults to 4): The number of experts in the mixture of experts layer.
moe_top_k (`int`, *optional*, defaults to 1): The number of experts to use in the mixture of experts layer.
moe_jitter_eps (`float`, *optional*, defaults to `None`): If not `None`, the jitter epsilon for the mixture of experts layer.
moe_loss_weight (`float`, *optional*, defaults to 0.01): The loss weight for the mixture of experts layer.
moe_normalize_expert_weights (`float`, *optional*, defaults to 1.0): The normalization factor for the expert weights.
"""
base_config_key = 'ffn_config'
def __init__(self, ffn_act_fn: Optional[dict]=None, ffn_hidden_size: int=3584, moe_num_experts: int=4, moe_top_k: int=1, moe_jitter_eps: Optional[float]=None, moe_loss_weight: float=0.01, moe_normalize_expert_weights: Optional[float]=1.0, **kwargs: Any):
super().__init__()
if ffn_act_fn is None:
ffn_act_fn = {'name': 'silu'}
self.ffn_act_fn = ffn_act_fn
self.ffn_hidden_size = ffn_hidden_size
self.moe_num_experts = moe_num_experts
self.moe_top_k = moe_top_k
self.moe_jitter_eps = moe_jitter_eps
self.moe_loss_weight = moe_loss_weight
self.moe_normalize_expert_weights = moe_normalize_expert_weights
for k in ['model_type', 'attn_implementation', 'transformers_version', '_commit_hash', 'torch_dtype', 'dtype']:
if k in kwargs:
kwargs.pop(k)
if len(kwargs) != 0:
raise ValueError(f'Found unknown kwargs={kwargs!r}')
|
class DbrxFFNConfig(PretrainedConfig):
'''Configuration class for Dbrx FFN.
[`DbrxFFN`] class. It is used to instantiate feedforward layers according to
the specified arguments, defining the layers architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
ffn_act_fn (`dict`, *optional*, defaults to `None`): A dict specifying activation function for the FFN.
The dict should have a key 'name' with the value being the name of the activation function along with
any additional keyword arguments. If `None`, then set to `{"name": "silu"}`.
ffn_hidden_size (`int`, *optional*, defaults to 3584): The hidden size of the feedforward network.
moe_num_experts (`int`, *optional*, defaults to 4): The number of experts in the mixture of experts layer.
moe_top_k (`int`, *optional*, defaults to 1): The number of experts to use in the mixture of experts layer.
moe_jitter_eps (`float`, *optional*, defaults to `None`): If not `None`, the jitter epsilon for the mixture of experts layer.
moe_loss_weight (`float`, *optional*, defaults to 0.01): The loss weight for the mixture of experts layer.
moe_normalize_expert_weights (`float`, *optional*, defaults to 1.0): The normalization factor for the expert weights.
'''
def __init__(self, ffn_act_fn: Optional[dict]=None, ffn_hidden_size: int=3584, moe_num_experts: int=4, moe_top_k: int=1, moe_jitter_eps: Optional[float]=None, moe_loss_weight: float=0.01, moe_normalize_expert_weights: Optional[float]=1.0, **kwargs: Any):
pass
| 2
| 1
| 27
| 1
| 26
| 0
| 5
| 0.57
| 1
| 6
| 0
| 0
| 1
| 7
| 1
| 1
| 50
| 6
| 28
| 21
| 16
| 16
| 18
| 11
| 16
| 5
| 1
| 2
| 5
|
1,559
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dbrx/modeling_dbrx.py
|
transformers.models.dbrx.modeling_dbrx.DbrxAttention
|
from torch import nn
import torch
from ...cache_utils import Cache, DynamicCache, StaticCache
import math
from ...utils.deprecation import deprecate_kwarg
from .configuration_dbrx import DbrxConfig
from typing import Any, Optional, Union
class DbrxAttention(nn.Module):
"""Multi-head self attention."""
def __init__(self, config: DbrxConfig, block_idx: Optional[int]=None):
super().__init__()
self.config = config
self.hidden_size = config.d_model
self.num_heads = config.n_heads
self.head_dim = self.hidden_size // self.num_heads
self.max_position_embeddings = config.max_seq_len
self.block_idx = block_idx
if block_idx is None:
logger.warning_once(f'Instantiating {self.__class__.__name__} without passing a `block_idx` is not recommended and will ' + 'lead to errors during the forward call if caching is used. Please make sure to provide a `block_idx` ' + 'when creating this class.')
attn_config = config.attn_config
self.attn_pdrop = attn_config.attn_pdrop
self.clip_qkv = attn_config.clip_qkv
self.num_key_value_heads = attn_config.kv_n_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.rope_theta = attn_config.rope_theta
self.is_causal = True
self.Wqkv = nn.Linear(self.hidden_size, self.hidden_size + 2 * self.num_key_value_heads * self.head_dim, bias=False)
self.out_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
self.rotary_emb = DbrxRotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings, base=self.rope_theta)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_ids: torch.LongTensor, attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None, **kwargs: Any) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[Cache]]:
bsz, q_len, _ = hidden_states.size()
qkv_states = self.Wqkv(hidden_states)
min_val = -self.clip_qkv if self.clip_qkv is not None else None
max_val = self.clip_qkv
qkv_states = qkv_states.clamp(min=min_val, max=max_val)
query_states, key_states, value_states = qkv_states.split([self.hidden_size, self.num_key_value_heads * self.head_dim, self.num_key_value_heads * self.head_dim], dim=2)
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
cos, sin = self.rotary_emb(value_states, position_ids)
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.block_idx, cache_kwargs)
key_states = repeat_kv(key_states, self.num_key_value_groups)
value_states = repeat_kv(value_states, self.num_key_value_groups)
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, :key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=self.attn_pdrop, training=self.training)
attn_output = torch.matmul(attn_weights, value_states)
if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
raise ValueError(f'`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is' + f' {attn_output.size()}')
attn_output = attn_output.transpose(1, 2).contiguous()
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
attn_output = self.out_proj(attn_output)
if not output_attentions:
attn_weights = None
return (attn_output, attn_weights)
|
class DbrxAttention(nn.Module):
'''Multi-head self attention.'''
def __init__(self, config: DbrxConfig, block_idx: Optional[int]=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_ids: torch.LongTensor, attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None, **kwargs: Any) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[Cache]]:
pass
| 4
| 1
| 50
| 8
| 41
| 2
| 4
| 0.05
| 1
| 9
| 3
| 2
| 2
| 15
| 2
| 12
| 103
| 17
| 83
| 39
| 70
| 4
| 53
| 29
| 50
| 6
| 1
| 1
| 8
|
1,560
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dbrx/modeling_dbrx.py
|
transformers.models.dbrx.modeling_dbrx.DbrxBlock
|
from ...cache_utils import Cache, DynamicCache, StaticCache
from ...modeling_layers import GradientCheckpointingLayer
import torch
from typing import Any, Optional, Union
from torch import nn
from .configuration_dbrx import DbrxConfig
from ...utils.deprecation import deprecate_kwarg
class DbrxBlock(GradientCheckpointingLayer):
def __init__(self, config: DbrxConfig, block_idx: int):
super().__init__()
self.hidden_size = config.d_model
self.resid_pdrop = config.resid_pdrop
self.block_idx = block_idx
self.norm_attn_norm = DbrxNormAttentionNorm(config=config, block_idx=block_idx)
self.ffn = DbrxFFN(config=config)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, output_router_logits: Optional[bool]=False, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, **kwargs: Any) -> Union[tuple[torch.Tensor], tuple[torch.Tensor, Optional[torch.Tensor]], tuple[torch.Tensor, Optional[Cache]], tuple[torch.Tensor, Optional[torch.Tensor], Optional[Cache]], tuple[torch.Tensor, Optional[torch.Tensor], Optional[torch.Tensor]], tuple[torch.Tensor, Optional[Cache], Optional[torch.Tensor]], tuple[torch.Tensor, Optional[torch.Tensor], Optional[Cache], Optional[torch.Tensor]]]:
"""Forward function for DbrxBlock.
Args:
hidden_states (`torch.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
position_ids (`torch.LongTensor`): position ids of shape `(batch, seq_len)`
attention_mask (`torch.Tensor`, *optional*): attention mask of size (batch_size, sequence_length)
if flash attention is used or (batch_size, 1, query_sequence_length, key_sequence_length)
if default attention is used.
past_key_values (`Cache`, *optional*): cached past key and value projection states
output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all
attention layers. See `attentions` under returned tensors for more detail.
output_router_logits (`bool`, *optional*): Whether or not to return the router logits.
use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are
returned and can be used to speed up decoding (see `past_key_values`).
cache_position (`torch.LongTensor`, *optional*): position ids of the cache
"""
resid_states, hidden_states, self_attn_weights = self.norm_attn_norm(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, **kwargs)
hidden_states, router_logits = self.ffn(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.resid_pdrop, training=self.training)
hidden_states = resid_states + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
if output_router_logits:
outputs += (router_logits,)
return outputs
|
class DbrxBlock(GradientCheckpointingLayer):
def __init__(self, config: DbrxConfig, block_idx: int):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, output_router_logits: Optional[bool]=False, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, **kwargs: Any) -> Union[tuple[torch.Tensor], tuple[torch.Tensor, Optional[torch.Tensor]], tuple[torch.Tensor, Optional[Cache]], tuple[torch.Tensor, Optional[torch.Tensor], Optional[Cache]], tuple[torch.Tensor, Optional[torch.Tensor], Optional[torch.Tensor]], tuple[torch.Tensor, Optional[Cache], Optional[torch.Tensor]], tuple[torch.Tensor, Optional[torch.Tensor], Optional[Cache], Optional[torch.Tensor]]]:
'''Forward function for DbrxBlock.
Args:
hidden_states (`torch.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
position_ids (`torch.LongTensor`): position ids of shape `(batch, seq_len)`
attention_mask (`torch.Tensor`, *optional*): attention mask of size (batch_size, sequence_length)
if flash attention is used or (batch_size, 1, query_sequence_length, key_sequence_length)
if default attention is used.
past_key_values (`Cache`, *optional*): cached past key and value projection states
output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all
attention layers. See `attentions` under returned tensors for more detail.
output_router_logits (`bool`, *optional*): Whether or not to return the router logits.
use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are
returned and can be used to speed up decoding (see `past_key_values`).
cache_position (`torch.LongTensor`, *optional*): position ids of the cache
'''
pass
| 4
| 1
| 38
| 4
| 26
| 9
| 3
| 0.33
| 1
| 9
| 4
| 0
| 2
| 5
| 2
| 12
| 78
| 9
| 52
| 30
| 30
| 17
| 21
| 11
| 18
| 4
| 1
| 1
| 5
|
1,561
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dbrx/modeling_dbrx.py
|
transformers.models.dbrx.modeling_dbrx.DbrxExpertGLU
|
from ...activations import ACT2FN
import torch
from torch import nn
class DbrxExpertGLU(nn.Module):
def __init__(self, hidden_size: int, ffn_hidden_size: int, moe_num_experts: int, ffn_act_fn: dict):
super().__init__()
self.hidden_size = hidden_size
self.ffn_hidden_size = ffn_hidden_size
self.moe_num_experts = moe_num_experts
self.w1 = nn.Parameter(torch.empty(moe_num_experts * ffn_hidden_size, hidden_size))
self.v1 = nn.Parameter(torch.empty(moe_num_experts * ffn_hidden_size, hidden_size))
self.w2 = nn.Parameter(torch.empty(moe_num_experts * ffn_hidden_size, hidden_size))
act_fn_name = ffn_act_fn.get('name', 'silu')
self.activation_fn = ACT2FN[act_fn_name]
def forward(self, x: torch.Tensor, expert_w1: torch.Tensor, expert_v1: torch.Tensor, expert_w2: torch.Tensor) -> torch.Tensor:
gate_proj = x.matmul(expert_w1.t())
up_proj = x.matmul(expert_v1.t())
gate_proj = self.activation_fn(gate_proj)
intermediate_states = gate_proj * up_proj
down_proj = intermediate_states.matmul(expert_w2)
return down_proj
|
class DbrxExpertGLU(nn.Module):
def __init__(self, hidden_size: int, ffn_hidden_size: int, moe_num_experts: int, ffn_act_fn: dict):
pass
def forward(self, x: torch.Tensor, expert_w1: torch.Tensor, expert_v1: torch.Tensor, expert_w2: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 11
| 1
| 10
| 0
| 1
| 0
| 1
| 4
| 0
| 0
| 2
| 7
| 2
| 12
| 23
| 3
| 20
| 17
| 15
| 0
| 18
| 15
| 15
| 1
| 1
| 0
| 2
|
1,562
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dbrx/modeling_dbrx.py
|
transformers.models.dbrx.modeling_dbrx.DbrxExperts
|
import torch
from torch import nn
class DbrxExperts(nn.Module):
def __init__(self, hidden_size: int, ffn_hidden_size: int, moe_num_experts: int, ffn_act_fn: dict):
super().__init__()
self.moe_num_experts = moe_num_experts
self.mlp = DbrxExpertGLU(hidden_size=hidden_size, ffn_hidden_size=ffn_hidden_size, moe_num_experts=moe_num_experts, ffn_act_fn=ffn_act_fn)
def forward(self, x: torch.Tensor, weights: torch.Tensor, top_weights: torch.Tensor, top_experts: torch.LongTensor) -> torch.Tensor:
bsz, q_len, hidden_size = x.shape
x = x.view(-1, hidden_size)
out = torch.zeros_like(x)
expert_mask = nn.functional.one_hot(top_experts, num_classes=self.moe_num_experts).permute(2, 1, 0)
w1_chunked = self.mlp.w1.view(self.mlp.moe_num_experts, self.mlp.ffn_hidden_size, self.mlp.hidden_size).chunk(self.moe_num_experts, dim=0)
v1_chunked = self.mlp.v1.view(self.mlp.moe_num_experts, self.mlp.ffn_hidden_size, self.mlp.hidden_size).chunk(self.moe_num_experts, dim=0)
w2_chunked = self.mlp.w2.view(self.mlp.moe_num_experts, self.mlp.ffn_hidden_size, self.mlp.hidden_size).chunk(self.moe_num_experts, dim=0)
w1_chunked = [w1.squeeze(dim=0) for w1 in w1_chunked]
v1_chunked = [v1.squeeze(dim=0) for v1 in v1_chunked]
w2_chunked = [w2.squeeze(dim=0) for w2 in w2_chunked]
for expert_idx in range(0, self.moe_num_experts):
topk_idx, token_idx = torch.where(expert_mask[expert_idx])
if token_idx.shape[0] == 0:
continue
token_list = token_idx
topk_list = topk_idx
expert_tokens = x[None, token_list].reshape(-1, hidden_size)
expert_out = self.mlp(expert_tokens, w1_chunked[expert_idx], v1_chunked[expert_idx], w2_chunked[expert_idx]) * top_weights[token_list, topk_list, None]
out.index_add_(0, token_idx, expert_out)
out = out.reshape(bsz, q_len, hidden_size)
return out
|
class DbrxExperts(nn.Module):
def __init__(self, hidden_size: int, ffn_hidden_size: int, moe_num_experts: int, ffn_act_fn: dict):
pass
def forward(self, x: torch.Tensor, weights: torch.Tensor, top_weights: torch.Tensor, top_experts: torch.LongTensor) -> torch.Tensor:
pass
| 3
| 0
| 25
| 3
| 21
| 2
| 2
| 0.07
| 1
| 6
| 1
| 0
| 2
| 2
| 2
| 12
| 52
| 6
| 43
| 19
| 38
| 3
| 27
| 17
| 24
| 3
| 1
| 2
| 4
|
1,563
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dbrx/modeling_dbrx.py
|
transformers.models.dbrx.modeling_dbrx.DbrxFFN
|
import torch
from .configuration_dbrx import DbrxConfig
from torch import nn
class DbrxFFN(nn.Module):
def __init__(self, config: DbrxConfig):
super().__init__()
ffn_config = config.ffn_config
self.router = DbrxRouter(hidden_size=config.d_model, moe_num_experts=ffn_config.moe_num_experts, moe_top_k=ffn_config.moe_top_k, moe_jitter_eps=ffn_config.moe_jitter_eps, moe_normalize_expert_weights=ffn_config.moe_normalize_expert_weights)
self.experts = DbrxExperts(hidden_size=config.d_model, ffn_hidden_size=ffn_config.ffn_hidden_size, moe_num_experts=ffn_config.moe_num_experts, ffn_act_fn=ffn_config.ffn_act_fn)
def forward(self, x: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
weights, top_weights, top_experts = self.router(x)
out = self.experts(x, weights, top_weights, top_experts)
return (out, weights)
|
class DbrxFFN(nn.Module):
def __init__(self, config: DbrxConfig):
pass
def forward(self, x: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
pass
| 3
| 0
| 11
| 1
| 10
| 0
| 1
| 0
| 1
| 5
| 3
| 0
| 2
| 2
| 2
| 12
| 24
| 3
| 21
| 8
| 18
| 0
| 10
| 8
| 7
| 1
| 1
| 0
| 2
|
1,564
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dbrx/modeling_dbrx.py
|
transformers.models.dbrx.modeling_dbrx.DbrxFlashAttention2
|
from ...utils.deprecation import deprecate_kwarg
from ...modeling_flash_attention_utils import flash_attn_supports_top_left_mask, is_flash_attn_available
from ...cache_utils import Cache, DynamicCache, StaticCache
from typing import Any, Optional, Union
import torch
class DbrxFlashAttention2(DbrxAttention):
"""Dbrx flash attention module.
This module inherits from `DbrxAttention` as the weights of the module stays
untouched. The only required change would be on the forward pass where it
calls the public API of flash attention.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._flash_attn_uses_top_left_mask = flash_attn_supports_top_left_mask()
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None, **kwargs: Any) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
if isinstance(past_key_values, StaticCache):
raise ValueError('`static` cache implementation is not compatible with `attn_implementation==flash_attention_2` make sure to use `sdpa` in the mean time, and open an issue at https://github.com/huggingface/transformers')
logger.info('Implicitly setting `output_attentions` to False as it is not supported in Flash Attention.')
output_attentions = False
bsz, q_len, _ = hidden_states.size()
qkv_states = self.Wqkv(hidden_states)
if self.clip_qkv is not None:
qkv_states = qkv_states.clamp(min=-self.clip_qkv, max=self.clip_qkv)
query_states, key_states, value_states = qkv_states.split([self.hidden_size, self.num_key_value_heads * self.head_dim, self.num_key_value_heads * self.head_dim], dim=2)
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
cos, sin = self.rotary_emb(value_states, position_ids)
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.block_idx, cache_kwargs)
query_states = query_states.transpose(1, 2)
key_states = key_states.transpose(1, 2)
value_states = value_states.transpose(1, 2)
dropout_rate = self.attn_pdrop if self.training else 0.0
input_dtype = query_states.dtype
device_type = query_states.device.type if query_states.device.type != 'mps' else 'cpu'
if input_dtype == torch.float32:
if torch.is_autocast_enabled():
target_dtype = torch.get_autocast_dtype(device_type) if hasattr(torch, 'get_autocast_dtype') else torch.get_autocast_gpu_dtype()
elif hasattr(self.config, '_pre_quantization_dtype'):
target_dtype = self.config._pre_quantization_dtype
else:
target_dtype = query_states.dtype
logger.warning_once('The input hidden states seems to be silently casted in float32, this might be ' + 'related to the fact you have upcasted embedding or layer norm layers in ' + f'float32. We will cast back the input in {target_dtype}.')
query_states = query_states.to(target_dtype)
key_states = key_states.to(target_dtype)
value_states = value_states.to(target_dtype)
attn_output = _flash_attention_forward(query_states, key_states, value_states, attention_mask, q_len, position_ids=position_ids, dropout=dropout_rate, is_causal=self.is_causal, use_top_left_mask=self._flash_attn_uses_top_left_mask)
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
attn_output = self.out_proj(attn_output)
if not output_attentions:
attn_weights = None
return (attn_output, attn_weights)
|
class DbrxFlashAttention2(DbrxAttention):
'''Dbrx flash attention module.
This module inherits from `DbrxAttention` as the weights of the module stays
untouched. The only required change would be on the forward pass where it
calls the public API of flash attention.
'''
def __init__(self, *args, **kwargs):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None, **kwargs: Any) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
pass
| 4
| 1
| 55
| 8
| 39
| 8
| 5
| 0.27
| 1
| 7
| 2
| 0
| 2
| 5
| 2
| 14
| 118
| 19
| 78
| 28
| 65
| 21
| 42
| 14
| 39
| 9
| 2
| 2
| 10
|
1,565
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dbrx/modeling_dbrx.py
|
transformers.models.dbrx.modeling_dbrx.DbrxForCausalLM
|
from ...utils import auto_docstring, is_torch_flex_attn_available, logging
from torch import nn
from ...cache_utils import Cache, DynamicCache, StaticCache
from .configuration_dbrx import DbrxConfig
import torch
from ...modeling_outputs import MoeCausalLMOutputWithPast, MoeModelOutputWithPast
from ...generation import GenerationMixin
from typing import Any, Optional, Union
@auto_docstring(custom_intro='\n The DBRX Model transformer for causal language modeling.\n ')
class DbrxForCausalLM(DbrxPreTrainedModel, GenerationMixin):
def __init__(self, config: DbrxConfig):
super().__init__(config)
self.transformer = DbrxModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.moe_loss_weight = config.ffn_config.moe_loss_weight
self.num_experts = config.ffn_config.moe_num_experts
self.num_experts_per_tok = config.ffn_config.moe_top_k
self.post_init()
def get_input_embeddings(self) -> nn.Embedding:
return self.transformer.get_input_embeddings()
def set_input_embeddings(self, value: nn.Embedding):
self.transformer.set_input_embeddings(value)
def get_output_embeddings(self) -> nn.Linear:
return self.lm_head
def set_output_embeddings(self, new_embeddings: nn.Linear):
self.lm_head = new_embeddings
def set_decoder(self, decoder: DbrxModel):
self.transformer = decoder
def get_decoder(self) -> DbrxModel:
return self.transformer
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_router_logits: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs) -> Union[tuple, MoeCausalLMOutputWithPast]:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>> from transformers import AutoTokenizer, DbrxForCausalLM
>> model = DbrxForCausalLM.from_pretrained("databricks/dbrx-instruct")
>> tokenizer = AutoTokenizer.from_pretrained("databricks/dbrx-instruct")
>> prompt = "Hey, are you conscious? Can you talk to me?"
>> inputs = tokenizer(prompt, return_tensors="pt")
>> # Generate
>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\\nI'm not conscious, but I can talk to you."
```
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
output_router_logits = output_router_logits if output_router_logits is not None else self.config.output_router_logits
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.transformer(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_router_logits=output_router_logits, return_dict=return_dict, cache_position=cache_position)
hidden_states = outputs[0]
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits, labels, vocab_size=self.config.vocab_size, **kwargs)
aux_loss = None
if output_router_logits:
aux_loss = load_balancing_loss_func(outputs.router_logits if return_dict else outputs[-1], self.num_experts, self.num_experts_per_tok, attention_mask)
if labels is not None and loss is not None:
loss += self.moe_loss_weight * aux_loss.to(loss.device)
if not return_dict:
output = (logits,) + outputs[1:]
if output_router_logits:
output = (aux_loss,) + output
return (loss,) + output if loss is not None else output
return MoeCausalLMOutputWithPast(loss=loss, aux_loss=aux_loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, router_logits=outputs.router_logits)
|
@auto_docstring(custom_intro='\n The DBRX Model transformer for causal language modeling.\n ')
class DbrxForCausalLM(DbrxPreTrainedModel, GenerationMixin):
def __init__(self, config: DbrxConfig):
pass
def get_input_embeddings(self) -> nn.Embedding:
pass
def set_input_embeddings(self, value: nn.Embedding):
pass
def get_output_embeddings(self) -> nn.Linear:
pass
def set_output_embeddings(self, new_embeddings: nn.Linear):
pass
def set_decoder(self, decoder: DbrxModel):
pass
def get_decoder(self) -> DbrxModel:
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_router_logits: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs) -> Union[tuple, MoeCausalLMOutputWithPast]:
'''
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>> from transformers import AutoTokenizer, DbrxForCausalLM
>> model = DbrxForCausalLM.from_pretrained("databricks/dbrx-instruct")
>> tokenizer = AutoTokenizer.from_pretrained("databricks/dbrx-instruct")
>> prompt = "Hey, are you conscious? Can you talk to me?"
>> inputs = tokenizer(prompt, return_tensors="pt")
>> # Generate
>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```
'''
pass
| 11
| 1
| 17
| 2
| 12
| 4
| 3
| 0.31
| 2
| 9
| 4
| 0
| 8
| 6
| 8
| 9
| 149
| 22
| 98
| 39
| 70
| 30
| 45
| 22
| 36
| 13
| 2
| 2
| 20
|
1,566
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dbrx/modeling_dbrx.py
|
transformers.models.dbrx.modeling_dbrx.DbrxModel
|
from typing import Any, Optional, Union
from ...modeling_attn_mask_utils import AttentionMaskConverter
import torch
from ...utils import auto_docstring, is_torch_flex_attn_available, logging
from ...modeling_outputs import MoeCausalLMOutputWithPast, MoeModelOutputWithPast
from torch import nn
from ...cache_utils import Cache, DynamicCache, StaticCache
from .configuration_dbrx import DbrxConfig
@auto_docstring
class DbrxModel(DbrxPreTrainedModel):
"""Transformer decoder consisting of *config.num_hidden_layers*. Each layer is a [`DbrxBlock`] layer.
Args:
config ([`DbrxConfig`]): Model configuration class with all parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
def __init__(self, config: DbrxConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.emb_pdrop = config.emb_pdrop
self.wte = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
self.blocks = nn.ModuleList([DbrxBlock(config, block_idx) for block_idx in range(config.n_layers)])
self.norm_f = nn.LayerNorm(config.d_model, bias=False)
self.gradient_checkpointing = False
self.post_init()
def get_input_embeddings(self) -> nn.Embedding:
return self.wte
def set_input_embeddings(self, value: nn.Embedding):
self.wte = value
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_router_logits: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs) -> Union[tuple, MoeModelOutputWithPast]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
output_router_logits = output_router_logits if output_router_logits is not None else self.config.output_router_logits
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError('You must specify exactly one of input_ids or inputs_embeds')
if self.gradient_checkpointing and self.training and use_cache:
logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.')
use_cache = False
if inputs_embeds is None:
inputs_embeds = self.wte(input_ids)
inputs_embeds = nn.functional.dropout(inputs_embeds, p=self.emb_pdrop, training=self.training)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions)
hidden_states = inputs_embeds
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_router_logits = () if output_router_logits else None
for block in self.blocks:
if output_hidden_states:
all_hidden_states += (hidden_states,)
block_outputs = block(hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, output_router_logits=output_router_logits, use_cache=use_cache, cache_position=cache_position)
hidden_states = block_outputs[0]
if output_attentions:
all_self_attns += (block_outputs[1],)
if output_router_logits:
all_router_logits += (block_outputs[-1],)
hidden_states = self.norm_f(hidden_states)
if output_hidden_states:
all_hidden_states += (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns, all_router_logits] if v is not None))
return MoeModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, router_logits=all_router_logits)
def _update_causal_mask(self, attention_mask: Union[torch.Tensor, 'BlockMask'], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool=False):
if self.config._attn_implementation == 'flash_attention_2':
if attention_mask is not None and (attention_mask == 0.0).any():
return attention_mask
return None
if self.config._attn_implementation == 'flex_attention':
if isinstance(attention_mask, torch.Tensor):
attention_mask = make_flex_block_causal_mask(attention_mask)
return attention_mask
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
using_compilable_cache = past_key_values.is_compileable if past_key_values is not None else False
if self.config._attn_implementation == 'sdpa' and (not using_compilable_cache) and (not output_attentions):
if AttentionMaskConverter._ignore_causal_mask_sdpa(attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training):
return None
dtype = input_tensor.dtype
sequence_length = input_tensor.shape[1]
if using_compilable_cache:
target_length = past_key_values.get_max_cache_shape()
else:
target_length = attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1
causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, cache_position=cache_position, batch_size=input_tensor.shape[0])
if self.config._attn_implementation == 'sdpa' and attention_mask is not None and (attention_mask.device.type in ['cuda', 'xpu', 'npu']) and (not output_attentions):
min_dtype = torch.finfo(dtype).min
causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
return causal_mask
@staticmethod
def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs):
"""
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
Args:
attention_mask (`torch.Tensor`):
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
`(batch_size, 1, query_length, key_value_length)`.
sequence_length (`int`):
The sequence length being processed.
target_length (`int`):
The target length: when generating with static cache, the mask should be as long as the static cache,
to account for the 0 padding, the part of the cache that is not filled yet.
dtype (`torch.dtype`):
The dtype to use for the 4D attention mask.
cache_position (`torch.Tensor`):
Indices depicting the position of the input sequence tokens in the sequence.
batch_size (`torch.Tensor`):
Batch size.
"""
if attention_mask is not None and attention_mask.dim() == 4:
causal_mask = attention_mask
else:
min_dtype = torch.finfo(dtype).min
causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device)
if sequence_length != 1:
causal_mask = torch.triu(causal_mask, diagonal=1)
causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1)
causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
if attention_mask is not None:
causal_mask = causal_mask.clone()
mask_length = attention_mask.shape[-1]
padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device)
padding_mask = padding_mask == 0
causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype)
return causal_mask
|
@auto_docstring
class DbrxModel(DbrxPreTrainedModel):
'''Transformer decoder consisting of *config.num_hidden_layers*. Each layer is a [`DbrxBlock`] layer.
Args:
config ([`DbrxConfig`]): Model configuration class with all parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
def __init__(self, config: DbrxConfig):
pass
def get_input_embeddings(self) -> nn.Embedding:
pass
def set_input_embeddings(self, value: nn.Embedding):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_router_logits: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs) -> Union[tuple, MoeModelOutputWithPast]:
pass
def _update_causal_mask(self, attention_mask: Union[torch.Tensor, 'BlockMask'], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool=False):
pass
@staticmethod
def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs):
'''
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
Args:
attention_mask (`torch.Tensor`):
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
`(batch_size, 1, query_length, key_value_length)`.
sequence_length (`int`):
The sequence length being processed.
target_length (`int`):
The target length: when generating with static cache, the mask should be as long as the static cache,
to account for the 0 padding, the part of the cache that is not filled yet.
dtype (`torch.dtype`):
The dtype to use for the 4D attention mask.
cache_position (`torch.Tensor`):
Indices depicting the position of the input sequence tokens in the sequence.
batch_size (`torch.Tensor`):
Batch size.
'''
pass
| 10
| 2
| 45
| 5
| 35
| 6
| 7
| 0.22
| 1
| 14
| 7
| 0
| 5
| 7
| 6
| 7
| 290
| 37
| 210
| 68
| 171
| 46
| 105
| 36
| 98
| 28
| 2
| 2
| 44
|
1,567
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dbrx/modeling_dbrx.py
|
transformers.models.dbrx.modeling_dbrx.DbrxNormAttentionNorm
|
from .configuration_dbrx import DbrxConfig
import torch
from typing import Any, Optional, Union
from torch import nn
from ...utils.deprecation import deprecate_kwarg
from ...cache_utils import Cache, DynamicCache, StaticCache
class DbrxNormAttentionNorm(nn.Module):
def __init__(self, config: DbrxConfig, block_idx: Optional[int]=None):
super().__init__()
self.block_idx = block_idx
self.resid_pdrop = config.resid_pdrop
self.norm_1 = nn.LayerNorm(config.d_model, bias=False)
self.attn = DBRX_ATTENTION_CLASSES[config._attn_implementation](config=config, block_idx=block_idx)
self.norm_2 = nn.LayerNorm(config.d_model, bias=False)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_ids: torch.LongTensor, attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None, **kwargs: Any) -> tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor], Optional[Cache]]:
residual_states = hidden_states
hidden_states = self.norm_1(hidden_states).to(hidden_states.dtype)
hidden_states, attn_weights = self.attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, **kwargs)
hidden_states = nn.functional.dropout(hidden_states, p=self.resid_pdrop, training=self.training)
hidden_states = hidden_states + residual_states
residual_states = hidden_states
hidden_states = self.norm_2(hidden_states).to(hidden_states.dtype)
return (residual_states, hidden_states, attn_weights)
|
class DbrxNormAttentionNorm(nn.Module):
def __init__(self, config: DbrxConfig, block_idx: Optional[int]=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_ids: torch.LongTensor, attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None, **kwargs: Any) -> tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor], Optional[Cache]]:
pass
| 4
| 0
| 21
| 2
| 19
| 0
| 1
| 0
| 1
| 7
| 2
| 0
| 2
| 5
| 2
| 12
| 44
| 5
| 39
| 20
| 26
| 0
| 17
| 10
| 14
| 1
| 1
| 0
| 2
|
1,568
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dbrx/modeling_dbrx.py
|
transformers.models.dbrx.modeling_dbrx.DbrxPreTrainedModel
|
from .configuration_dbrx import DbrxConfig
from ...modeling_utils import PreTrainedModel
from torch import nn
from ...utils import auto_docstring, is_torch_flex_attn_available, logging
@auto_docstring
class DbrxPreTrainedModel(PreTrainedModel):
config: DbrxConfig
base_model_prefix = 'transformer'
supports_gradient_checkpointing = True
_no_split_modules = ['DbrxBlock']
_skip_keys_device_placement = ['past_key_values']
_supports_flash_attn = True
_supports_sdpa = True
_can_compile_fullgraph = False
def _init_weights(self, module: nn.Module):
std = self.config.initializer_range
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.weight.data.fill_(1.0)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, DbrxExpertGLU):
module.w1.data.normal_(mean=0.0, std=std)
module.v1.data.normal_(mean=0.0, std=std)
module.w2.data.normal_(mean=0.0, std=std)
|
@auto_docstring
class DbrxPreTrainedModel(PreTrainedModel):
def _init_weights(self, module: nn.Module):
pass
| 3
| 0
| 18
| 0
| 18
| 0
| 8
| 0.03
| 1
| 1
| 1
| 2
| 1
| 0
| 1
| 1
| 30
| 1
| 29
| 13
| 27
| 1
| 26
| 13
| 24
| 8
| 1
| 2
| 8
|
1,569
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dbrx/modeling_dbrx.py
|
transformers.models.dbrx.modeling_dbrx.DbrxRotaryEmbedding
|
from torch import nn
import torch
class DbrxRotaryEmbedding(nn.Module):
inv_freq: torch.Tensor
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
super().__init__()
self.dim = dim
self.max_position_embeddings = max_position_embeddings
self.base = base
inv_freq = 1.0 / self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float() / self.dim)
self.register_buffer('inv_freq', tensor=inv_freq, persistent=False)
@torch.no_grad()
def forward(self, x, position_ids, seq_len=None):
self.inv_freq.to(x.device)
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type
device_type = device_type if device_type != 'mps' else 'cpu'
with torch.autocast(device_type=device_type, enabled=False):
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos()
sin = emb.sin()
return (cos.to(dtype=x.dtype), sin.to(dtype=x.dtype))
|
class DbrxRotaryEmbedding(nn.Module):
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
pass
@torch.no_grad()
def forward(self, x, position_ids, seq_len=None):
pass
| 4
| 0
| 12
| 1
| 10
| 2
| 2
| 0.14
| 1
| 3
| 0
| 0
| 2
| 3
| 2
| 12
| 27
| 3
| 21
| 15
| 17
| 3
| 20
| 14
| 17
| 2
| 1
| 1
| 3
|
1,570
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dbrx/modeling_dbrx.py
|
transformers.models.dbrx.modeling_dbrx.DbrxRouter
|
import torch
from typing import Any, Optional, Union
from torch import nn
class DbrxRouter(nn.Module):
def __init__(self, hidden_size: int, moe_num_experts: int, moe_top_k: int, moe_jitter_eps: Optional[float], moe_normalize_expert_weights: Optional[float]):
super().__init__()
self.hidden_size = hidden_size
self.moe_num_experts = moe_num_experts
self.moe_top_k = moe_top_k
self.moe_jitter_eps = moe_jitter_eps
self.moe_normalize_expert_weights = moe_normalize_expert_weights
self.layer = nn.Linear(self.hidden_size, self.moe_num_experts, bias=False)
def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.LongTensor]:
if self.training and self.moe_jitter_eps is not None:
hidden_states *= torch.empty_like(hidden_states).uniform_(1.0 - self.moe_jitter_eps, 1.0 + self.moe_jitter_eps)
hidden_states = hidden_states.view(-1, hidden_states.shape[-1])
weights = self.layer(hidden_states).softmax(dim=-1, dtype=torch.float32)
top_weights, top_experts = torch.topk(weights, self.moe_top_k, dim=-1)
top_weights_scale = torch.norm(top_weights, p=self.moe_normalize_expert_weights, dim=-1, keepdim=True) if self.moe_normalize_expert_weights is not None else 1.0
top_weights = top_weights / top_weights_scale
weights = weights.to(hidden_states.dtype)
top_weights = top_weights.to(hidden_states.dtype)
return (weights, top_weights, top_experts)
|
class DbrxRouter(nn.Module):
def __init__(self, hidden_size: int, moe_num_experts: int, moe_top_k: int, moe_jitter_eps: Optional[float], moe_normalize_expert_weights: Optional[float]):
pass
def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.LongTensor]:
pass
| 3
| 0
| 18
| 2
| 16
| 0
| 2
| 0
| 1
| 4
| 0
| 0
| 2
| 6
| 2
| 12
| 37
| 4
| 33
| 19
| 23
| 0
| 20
| 12
| 17
| 3
| 1
| 1
| 4
|
1,571
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dbrx/modeling_dbrx.py
|
transformers.models.dbrx.modeling_dbrx.DbrxSdpaAttention
|
import torch
from typing import Any, Optional, Union
from ...utils.deprecation import deprecate_kwarg
from ...cache_utils import Cache, DynamicCache, StaticCache
class DbrxSdpaAttention(DbrxAttention):
"""
Dbrx attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
`DbrxAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
SDPA API.
"""
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
if output_attentions:
logger.warning_once('DbrxModel is using DbrxSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.')
return super().forward(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position)
bsz, q_len, _ = hidden_states.size()
qkv_states = self.Wqkv(hidden_states)
if self.clip_qkv is not None:
qkv_states = qkv_states.clamp(min=-self.clip_qkv, max=self.clip_qkv)
query_states, key_states, value_states = qkv_states.split([self.hidden_size, self.num_key_value_heads * self.head_dim, self.num_key_value_heads * self.head_dim], dim=2)
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
cos, sin = self.rotary_emb(value_states, position_ids, seq_len=None)
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, None)
if past_key_values is not None:
cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.block_idx, cache_kwargs)
key_states = repeat_kv(key_states, self.num_key_value_groups)
value_states = repeat_kv(value_states, self.num_key_value_groups)
causal_mask = attention_mask
if attention_mask is not None:
causal_mask = causal_mask[:, :, :, :key_states.shape[-2]]
if query_states.device.type == 'cuda' and causal_mask is not None:
query_states = query_states.contiguous()
key_states = key_states.contiguous()
value_states = value_states.contiguous()
is_causal = causal_mask is None and q_len > 1
attn_output = torch.nn.functional.scaled_dot_product_attention(query_states, key_states, value_states, attn_mask=causal_mask, dropout_p=self.attn_pdrop if self.training else 0.0, is_causal=is_causal)
attn_output = attn_output.transpose(1, 2).contiguous()
attn_output = attn_output.view(bsz, q_len, -1)
attn_output = self.out_proj(attn_output)
return (attn_output, None)
|
class DbrxSdpaAttention(DbrxAttention):
'''
Dbrx attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
`DbrxAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
SDPA API.
'''
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
pass
| 3
| 1
| 86
| 14
| 66
| 6
| 8
| 0.16
| 1
| 4
| 1
| 0
| 1
| 3
| 1
| 13
| 93
| 15
| 67
| 22
| 56
| 11
| 33
| 10
| 31
| 8
| 2
| 1
| 8
|
1,572
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deberta/configuration_deberta.py
|
transformers.models.deberta.configuration_deberta.DebertaConfig
|
from ...configuration_utils import PretrainedConfig
class DebertaConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`DebertaModel`] or a [`TFDebertaModel`]. It is
used to instantiate a DeBERTa model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the DeBERTa
[microsoft/deberta-base](https://huggingface.co/microsoft/deberta-base) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Arguments:
vocab_size (`int`, *optional*, defaults to 50265):
Vocabulary size of the DeBERTa model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`DebertaModel`] or [`TFDebertaModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"`, `"gelu"`, `"tanh"`, `"gelu_fast"`, `"mish"`, `"linear"`, `"sigmoid"` and `"gelu_new"`
are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 0):
The vocabulary size of the `token_type_ids` passed when calling [`DebertaModel`] or [`TFDebertaModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
relative_attention (`bool`, *optional*, defaults to `False`):
Whether use relative position encoding.
max_relative_positions (`int`, *optional*, defaults to 1):
The range of relative positions `[-max_position_embeddings, max_position_embeddings]`. Use the same value
as `max_position_embeddings`.
pad_token_id (`int`, *optional*, defaults to 0):
The value used to pad input_ids.
position_biased_input (`bool`, *optional*, defaults to `True`):
Whether add absolute position embedding to content embedding.
pos_att_type (`list[str]`, *optional*):
The type of relative position attention, it can be a combination of `["p2c", "c2p"]`, e.g. `["p2c"]`,
`["p2c", "c2p"]`.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
legacy (`bool`, *optional*, defaults to `True`):
Whether or not the model should use the legacy `LegacyDebertaOnlyMLMHead`, which does not work properly
for mask infilling tasks.
Example:
```python
>>> from transformers import DebertaConfig, DebertaModel
>>> # Initializing a DeBERTa microsoft/deberta-base style configuration
>>> configuration = DebertaConfig()
>>> # Initializing a model (with random weights) from the microsoft/deberta-base style configuration
>>> model = DebertaModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'deberta'
def __init__(self, vocab_size=50265, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=0, initializer_range=0.02, layer_norm_eps=1e-07, relative_attention=False, max_relative_positions=-1, pad_token_id=0, position_biased_input=True, pos_att_type=None, pooler_dropout=0, pooler_hidden_act='gelu', legacy=True, **kwargs):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.relative_attention = relative_attention
self.max_relative_positions = max_relative_positions
self.pad_token_id = pad_token_id
self.position_biased_input = position_biased_input
if isinstance(pos_att_type, str):
pos_att_type = [x.strip() for x in pos_att_type.lower().split('|')]
self.pos_att_type = pos_att_type
self.vocab_size = vocab_size
self.layer_norm_eps = layer_norm_eps
self.pooler_hidden_size = kwargs.get('pooler_hidden_size', hidden_size)
self.pooler_dropout = pooler_dropout
self.pooler_hidden_act = pooler_hidden_act
self.legacy = legacy
|
class DebertaConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`DebertaModel`] or a [`TFDebertaModel`]. It is
used to instantiate a DeBERTa model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the DeBERTa
[microsoft/deberta-base](https://huggingface.co/microsoft/deberta-base) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Arguments:
vocab_size (`int`, *optional*, defaults to 50265):
Vocabulary size of the DeBERTa model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`DebertaModel`] or [`TFDebertaModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"`, `"gelu"`, `"tanh"`, `"gelu_fast"`, `"mish"`, `"linear"`, `"sigmoid"` and `"gelu_new"`
are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 0):
The vocabulary size of the `token_type_ids` passed when calling [`DebertaModel`] or [`TFDebertaModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
relative_attention (`bool`, *optional*, defaults to `False`):
Whether use relative position encoding.
max_relative_positions (`int`, *optional*, defaults to 1):
The range of relative positions `[-max_position_embeddings, max_position_embeddings]`. Use the same value
as `max_position_embeddings`.
pad_token_id (`int`, *optional*, defaults to 0):
The value used to pad input_ids.
position_biased_input (`bool`, *optional*, defaults to `True`):
Whether add absolute position embedding to content embedding.
pos_att_type (`list[str]`, *optional*):
The type of relative position attention, it can be a combination of `["p2c", "c2p"]`, e.g. `["p2c"]`,
`["p2c", "c2p"]`.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
legacy (`bool`, *optional*, defaults to `True`):
Whether or not the model should use the legacy `LegacyDebertaOnlyMLMHead`, which does not work properly
for mask infilling tasks.
Example:
```python
>>> from transformers import DebertaConfig, DebertaModel
>>> # Initializing a DeBERTa microsoft/deberta-base style configuration
>>> configuration = DebertaConfig()
>>> # Initializing a model (with random weights) from the microsoft/deberta-base style configuration
>>> model = DebertaModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=50265, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=0, initializer_range=0.02, layer_norm_eps=1e-07, relative_attention=False, max_relative_positions=-1, pad_token_id=0, position_biased_input=True, pos_att_type=None, pooler_dropout=0, pooler_hidden_act='gelu', legacy=True, **kwargs):
pass
| 2
| 1
| 53
| 4
| 48
| 1
| 2
| 1.28
| 1
| 2
| 0
| 0
| 1
| 21
| 1
| 1
| 127
| 13
| 50
| 47
| 25
| 64
| 27
| 24
| 25
| 2
| 1
| 1
| 2
|
1,573
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deberta/configuration_deberta.py
|
transformers.models.deberta.configuration_deberta.DebertaOnnxConfig
|
from ...onnx import OnnxConfig
from collections.abc import Mapping
from typing import TYPE_CHECKING, Any, Union
from collections import OrderedDict
class DebertaOnnxConfig(OnnxConfig):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
if self.task == 'multiple-choice':
dynamic_axis = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
dynamic_axis = {0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)])
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)])
@property
def default_onnx_opset(self) -> int:
return 12
def generate_dummy_inputs(self, preprocessor: Union['PreTrainedTokenizerBase', 'FeatureExtractionMixin'], batch_size: int=-1, seq_length: int=-1, num_choices: int=-1, is_pair: bool=False, num_channels: int=3, image_width: int=40, image_height: int=40, tokenizer: 'PreTrainedTokenizerBase'=None) -> Mapping[str, Any]:
dummy_inputs = super().generate_dummy_inputs(preprocessor=preprocessor)
if self._config.type_vocab_size == 0 and 'token_type_ids' in dummy_inputs:
del dummy_inputs['token_type_ids']
return dummy_inputs
|
class DebertaOnnxConfig(OnnxConfig):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
pass
@property
def default_onnx_opset(self) -> int:
pass
def generate_dummy_inputs(self, preprocessor: Union['PreTrainedTokenizerBase', 'FeatureExtractionMixin'], batch_size: int=-1, seq_length: int=-1, num_choices: int=-1, is_pair: bool=False, num_channels: int=3, image_width: int=40, image_height: int=40, tokenizer: 'PreTrainedTokenizerBase'=None) -> Mapping[str, Any]:
pass
| 6
| 0
| 10
| 0
| 10
| 0
| 2
| 0
| 1
| 6
| 0
| 0
| 3
| 0
| 3
| 3
| 35
| 2
| 33
| 20
| 15
| 0
| 15
| 6
| 11
| 3
| 1
| 1
| 6
|
1,574
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deberta/modeling_deberta.py
|
transformers.models.deberta.modeling_deberta.ContextPooler
|
from torch import nn
from ...activations import ACT2FN
class ContextPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.pooler_hidden_size, config.pooler_hidden_size)
self.dropout = nn.Dropout(config.pooler_dropout)
self.config = config
def forward(self, hidden_states):
context_token = hidden_states[:, 0]
context_token = self.dropout(context_token)
pooled_output = self.dense(context_token)
pooled_output = ACT2FN[self.config.pooler_hidden_act](pooled_output)
return pooled_output
@property
def output_dim(self):
return self.config.hidden_size
|
class ContextPooler(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states):
pass
@property
def output_dim(self):
pass
| 5
| 0
| 5
| 0
| 4
| 1
| 1
| 0.13
| 1
| 1
| 0
| 0
| 3
| 3
| 3
| 13
| 20
| 3
| 15
| 10
| 10
| 2
| 14
| 9
| 10
| 1
| 1
| 0
| 3
|
1,575
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deberta/modeling_deberta.py
|
transformers.models.deberta.modeling_deberta.DebertaAttention
|
from torch import nn
from typing import Optional, Union
import torch
class DebertaAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = DisentangledSelfAttention(config)
self.output = DebertaSelfOutput(config)
self.config = config
def forward(self, hidden_states, attention_mask, output_attentions: bool=False, query_states=None, relative_pos=None, rel_embeddings=None) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
self_output, att_matrix = self.self(hidden_states, attention_mask, output_attentions, query_states=query_states, relative_pos=relative_pos, rel_embeddings=rel_embeddings)
if query_states is None:
query_states = hidden_states
attention_output = self.output(self_output, query_states)
if output_attentions:
return (attention_output, att_matrix)
else:
return (attention_output, None)
|
class DebertaAttention(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states, attention_mask, output_attentions: bool=False, query_states=None, relative_pos=None, rel_embeddings=None) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
pass
| 3
| 0
| 15
| 1
| 15
| 0
| 2
| 0
| 1
| 5
| 2
| 0
| 2
| 3
| 2
| 12
| 32
| 2
| 30
| 16
| 19
| 0
| 14
| 8
| 11
| 3
| 1
| 1
| 4
|
1,576
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deberta/modeling_deberta.py
|
transformers.models.deberta.modeling_deberta.DebertaEmbeddings
|
from torch import nn
import torch
class DebertaEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
pad_token_id = getattr(config, 'pad_token_id', 0)
self.embedding_size = getattr(config, 'embedding_size', config.hidden_size)
self.word_embeddings = nn.Embedding(config.vocab_size, self.embedding_size, padding_idx=pad_token_id)
self.position_biased_input = getattr(config, 'position_biased_input', True)
if not self.position_biased_input:
self.position_embeddings = None
else:
self.position_embeddings = nn.Embedding(config.max_position_embeddings, self.embedding_size)
if config.type_vocab_size > 0:
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, self.embedding_size)
else:
self.token_type_embeddings = None
if self.embedding_size != config.hidden_size:
self.embed_proj = nn.Linear(self.embedding_size, config.hidden_size, bias=False)
else:
self.embed_proj = None
self.LayerNorm = DebertaLayerNorm(config.hidden_size, config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.config = config
self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False)
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, mask=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
if self.position_embeddings is not None:
position_embeddings = self.position_embeddings(position_ids.long())
else:
position_embeddings = torch.zeros_like(inputs_embeds)
embeddings = inputs_embeds
if self.position_biased_input:
embeddings = embeddings + position_embeddings
if self.token_type_embeddings is not None:
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = embeddings + token_type_embeddings
if self.embed_proj is not None:
embeddings = self.embed_proj(embeddings)
embeddings = self.LayerNorm(embeddings)
if mask is not None:
if mask.dim() != embeddings.dim():
if mask.dim() == 4:
mask = mask.squeeze(1).squeeze(1)
mask = mask.unsqueeze(2)
mask = mask.to(embeddings.dtype)
embeddings = embeddings * mask
embeddings = self.dropout(embeddings)
return embeddings
|
class DebertaEmbeddings(nn.Module):
'''Construct the embeddings from word, position and token_type embeddings.'''
def __init__(self, config):
pass
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, mask=None, inputs_embeds=None):
pass
| 3
| 1
| 38
| 8
| 29
| 1
| 8
| 0.03
| 1
| 2
| 1
| 0
| 2
| 9
| 2
| 12
| 79
| 18
| 59
| 18
| 56
| 2
| 52
| 18
| 49
| 12
| 1
| 3
| 16
|
1,577
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deberta/modeling_deberta.py
|
transformers.models.deberta.modeling_deberta.DebertaEncoder
|
from torch import nn
from typing import Optional, Union
import torch
from ...modeling_outputs import BaseModelOutput, MaskedLMOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
class DebertaEncoder(nn.Module):
"""Modified BertEncoder with relative position bias support"""
def __init__(self, config):
super().__init__()
self.layer = nn.ModuleList([DebertaLayer(config) for _ in range(config.num_hidden_layers)])
self.relative_attention = getattr(config, 'relative_attention', False)
if self.relative_attention:
self.max_relative_positions = getattr(config, 'max_relative_positions', -1)
if self.max_relative_positions < 1:
self.max_relative_positions = config.max_position_embeddings
self.rel_embeddings = nn.Embedding(self.max_relative_positions * 2, config.hidden_size)
self.gradient_checkpointing = False
def get_rel_embedding(self):
rel_embeddings = self.rel_embeddings.weight if self.relative_attention else None
return rel_embeddings
def get_attention_mask(self, attention_mask):
if attention_mask.dim() <= 2:
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
attention_mask = extended_attention_mask * extended_attention_mask.squeeze(-2).unsqueeze(-1)
elif attention_mask.dim() == 3:
attention_mask = attention_mask.unsqueeze(1)
return attention_mask
def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None):
if self.relative_attention and relative_pos is None:
if query_states is not None:
relative_pos = build_relative_position(query_states, hidden_states)
else:
relative_pos = build_relative_position(hidden_states, hidden_states)
return relative_pos
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, output_hidden_states: bool=True, output_attentions: bool=False, query_states=None, relative_pos=None, return_dict: bool=True):
attention_mask = self.get_attention_mask(attention_mask)
relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos)
all_hidden_states: Optional[tuple[torch.Tensor]] = (hidden_states,) if output_hidden_states else None
all_attentions = () if output_attentions else None
next_kv = hidden_states
rel_embeddings = self.get_rel_embedding()
for i, layer_module in enumerate(self.layer):
hidden_states, att_m = layer_module(next_kv, attention_mask, query_states=query_states, relative_pos=relative_pos, rel_embeddings=rel_embeddings, output_attentions=output_attentions)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if query_states is not None:
query_states = hidden_states
else:
next_kv = hidden_states
if output_attentions:
all_attentions = all_attentions + (att_m,)
if not return_dict:
return tuple((v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None))
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions)
|
class DebertaEncoder(nn.Module):
'''Modified BertEncoder with relative position bias support'''
def __init__(self, config):
pass
def get_rel_embedding(self):
pass
def get_attention_mask(self, attention_mask):
pass
def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, output_hidden_states: bool=True, output_attentions: bool=False, query_states=None, relative_pos=None, return_dict: bool=True):
pass
| 6
| 1
| 17
| 2
| 15
| 0
| 4
| 0.01
| 1
| 8
| 2
| 0
| 5
| 5
| 5
| 5
| 91
| 13
| 77
| 28
| 62
| 1
| 47
| 19
| 41
| 9
| 1
| 2
| 20
|
1,578
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deberta/modeling_deberta.py
|
transformers.models.deberta.modeling_deberta.DebertaForMaskedLM
|
from ...utils import auto_docstring, logging
import torch
from ...modeling_outputs import BaseModelOutput, MaskedLMOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from typing import Optional, Union
@auto_docstring
class DebertaForMaskedLM(DebertaPreTrainedModel):
_tied_weights_keys = ['cls.predictions.decoder.weight', 'cls.predictions.decoder.bias']
def __init__(self, config):
super().__init__(config)
self.legacy = config.legacy
self.deberta = DebertaModel(config)
if self.legacy:
self.cls = LegacyDebertaOnlyMLMHead(config)
else:
self._tied_weights_keys = ['lm_predictions.lm_head.weight', 'deberta.embeddings.word_embeddings.weight']
self.lm_predictions = DebertaOnlyMLMHead(config)
self.post_init()
def get_output_embeddings(self):
if self.legacy:
return self.cls.predictions.decoder
else:
return self.lm_predictions.lm_head.dense
def set_output_embeddings(self, new_embeddings):
if self.legacy:
self.cls.predictions.decoder = new_embeddings
self.cls.predictions.bias = new_embeddings.bias
else:
self.lm_predictions.lm_head.dense = new_embeddings
self.lm_predictions.lm_head.bias = new_embeddings.bias
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, MaskedLMOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.deberta(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = outputs[0]
if self.legacy:
prediction_scores = self.cls(sequence_output)
else:
prediction_scores = self.lm_predictions(sequence_output, self.deberta.embeddings.word_embeddings)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[1:]
return (masked_lm_loss,) + output if masked_lm_loss is not None else output
return MaskedLMOutput(loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class DebertaForMaskedLM(DebertaPreTrainedModel):
def __init__(self, config):
pass
def get_output_embeddings(self):
pass
def set_output_embeddings(self, new_embeddings):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, MaskedLMOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
'''
pass
| 7
| 1
| 19
| 2
| 16
| 2
| 3
| 0.11
| 1
| 7
| 4
| 0
| 4
| 4
| 4
| 5
| 92
| 11
| 74
| 28
| 49
| 8
| 36
| 16
| 31
| 6
| 2
| 1
| 12
|
1,579
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deberta/modeling_deberta.py
|
transformers.models.deberta.modeling_deberta.DebertaForQuestionAnswering
|
from typing import Optional, Union
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from torch import nn
from ...modeling_outputs import BaseModelOutput, MaskedLMOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
import torch
from ...utils import auto_docstring, logging
@auto_docstring
class DebertaForQuestionAnswering(DebertaPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.deberta = DebertaModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, start_positions: Optional[torch.Tensor]=None, end_positions: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, QuestionAnsweringModelOutput]:
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.deberta(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[1:]
return (total_loss,) + output if total_loss is not None else output
return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class DebertaForQuestionAnswering(DebertaPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, start_positions: Optional[torch.Tensor]=None, end_positions: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, QuestionAnsweringModelOutput]:
pass
| 5
| 0
| 40
| 5
| 29
| 7
| 4
| 0.19
| 1
| 5
| 2
| 0
| 2
| 3
| 2
| 3
| 92
| 10
| 69
| 29
| 44
| 13
| 32
| 16
| 29
| 7
| 2
| 2
| 8
|
1,580
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deberta/modeling_deberta.py
|
transformers.models.deberta.modeling_deberta.DebertaForSequenceClassification
|
from typing import Optional, Union
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from torch import nn
from ...modeling_outputs import BaseModelOutput, MaskedLMOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
import torch
from ...utils import auto_docstring, logging
@auto_docstring(custom_intro='\n DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n ')
class DebertaForSequenceClassification(DebertaPreTrainedModel):
def __init__(self, config):
super().__init__(config)
num_labels = getattr(config, 'num_labels', 2)
self.num_labels = num_labels
self.deberta = DebertaModel(config)
self.pooler = ContextPooler(config)
output_dim = self.pooler.output_dim
self.classifier = nn.Linear(output_dim, num_labels)
drop_out = getattr(config, 'cls_dropout', None)
drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out
self.dropout = nn.Dropout(drop_out)
self.post_init()
def get_input_embeddings(self):
return self.deberta.get_input_embeddings()
def set_input_embeddings(self, new_embeddings):
self.deberta.set_input_embeddings(new_embeddings)
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, SequenceClassifierOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.deberta(input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
encoder_layer = outputs[0]
pooled_output = self.pooler(encoder_layer)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
loss_fn = nn.MSELoss()
logits = logits.view(-1).to(labels.dtype)
loss = loss_fn(logits, labels.view(-1))
elif labels.dim() == 1 or labels.size(-1) == 1:
label_index = (labels >= 0).nonzero()
labels = labels.long()
if label_index.size(0) > 0:
labeled_logits = torch.gather(logits, 0, label_index.expand(label_index.size(0), logits.size(1)))
labels = torch.gather(labels, 0, label_index.view(-1))
loss_fct = CrossEntropyLoss()
loss = loss_fct(labeled_logits.view(-1, self.num_labels).float(), labels.view(-1))
else:
loss = torch.tensor(0).to(logits)
else:
log_softmax = nn.LogSoftmax(-1)
loss = -(log_softmax(logits) * labels).sum(-1).mean()
elif self.config.problem_type == 'regression':
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == 'single_label_classification':
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == 'multi_label_classification':
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n ')
class DebertaForSequenceClassification(DebertaPreTrainedModel):
def __init__(self, config):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, new_embeddings):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, SequenceClassifierOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
'''
pass
| 7
| 1
| 25
| 2
| 21
| 2
| 4
| 0.09
| 1
| 6
| 3
| 0
| 4
| 5
| 4
| 5
| 109
| 11
| 90
| 36
| 68
| 8
| 53
| 24
| 48
| 13
| 2
| 4
| 17
|
1,581
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deberta/modeling_deberta.py
|
transformers.models.deberta.modeling_deberta.DebertaForTokenClassification
|
from ...utils import auto_docstring, logging
import torch
from ...modeling_outputs import BaseModelOutput, MaskedLMOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from typing import Optional, Union
@auto_docstring
class DebertaForTokenClassification(DebertaPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.deberta = DebertaModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, TokenClassifierOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.deberta(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class DebertaForTokenClassification(DebertaPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, TokenClassifierOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
'''
pass
| 5
| 1
| 28
| 4
| 22
| 3
| 3
| 0.1
| 1
| 5
| 2
| 0
| 2
| 4
| 2
| 3
| 64
| 9
| 50
| 25
| 30
| 5
| 22
| 13
| 19
| 5
| 2
| 1
| 6
|
1,582
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deberta/modeling_deberta.py
|
transformers.models.deberta.modeling_deberta.DebertaIntermediate
|
from torch import nn
from ...activations import ACT2FN
import torch
class DebertaIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
|
class DebertaIntermediate(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 2
| 0
| 1
| 3
| 0
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 12
| 5
| 9
| 0
| 11
| 5
| 8
| 2
| 1
| 1
| 3
|
1,583
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deberta/modeling_deberta.py
|
transformers.models.deberta.modeling_deberta.DebertaLMPredictionHead
|
from torch import nn
import torch
from ...activations import ACT2FN
class DebertaLMPredictionHead(nn.Module):
"""https://github.com/microsoft/DeBERTa/blob/master/DeBERTa/deberta/bert.py#L270"""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps, elementwise_affine=True)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
def forward(self, hidden_states, word_embeddings):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
hidden_states = torch.matmul(hidden_states, word_embeddings.weight.t()) + self.bias
return hidden_states
|
class DebertaLMPredictionHead(nn.Module):
'''https://github.com/microsoft/DeBERTa/blob/master/DeBERTa/deberta/bert.py#L270'''
def __init__(self, config):
pass
def forward(self, hidden_states, word_embeddings):
pass
| 3
| 1
| 10
| 2
| 9
| 1
| 2
| 0.17
| 1
| 2
| 0
| 0
| 2
| 4
| 2
| 12
| 25
| 5
| 18
| 7
| 15
| 3
| 15
| 7
| 12
| 2
| 1
| 1
| 3
|
1,584
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deberta/modeling_deberta.py
|
transformers.models.deberta.modeling_deberta.DebertaLayer
|
from ...modeling_layers import GradientCheckpointingLayer
from typing import Optional, Union
import torch
class DebertaLayer(GradientCheckpointingLayer):
def __init__(self, config):
super().__init__()
self.attention = DebertaAttention(config)
self.intermediate = DebertaIntermediate(config)
self.output = DebertaOutput(config)
def forward(self, hidden_states, attention_mask, query_states=None, relative_pos=None, rel_embeddings=None, output_attentions: bool=False) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
attention_output, att_matrix = self.attention(hidden_states, attention_mask, output_attentions=output_attentions, query_states=query_states, relative_pos=relative_pos, rel_embeddings=rel_embeddings)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
if output_attentions:
return (layer_output, att_matrix)
else:
return (layer_output, None)
|
class DebertaLayer(GradientCheckpointingLayer):
def __init__(self, config):
pass
def forward(self, hidden_states, attention_mask, query_states=None, relative_pos=None, rel_embeddings=None, output_attentions: bool=False) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
pass
| 3
| 0
| 15
| 1
| 14
| 0
| 2
| 0
| 1
| 6
| 3
| 0
| 2
| 3
| 2
| 12
| 31
| 2
| 29
| 17
| 18
| 0
| 13
| 9
| 10
| 2
| 1
| 1
| 3
|
1,585
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deberta/modeling_deberta.py
|
transformers.models.deberta.modeling_deberta.DebertaLayerNorm
|
from torch import nn
import torch
class DebertaLayerNorm(nn.Module):
"""LayerNorm module (epsilon inside the square root)."""
def __init__(self, size, eps=1e-12):
super().__init__()
self.weight = nn.Parameter(torch.ones(size))
self.bias = nn.Parameter(torch.zeros(size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_type = hidden_states.dtype
hidden_states = hidden_states.float()
mean = hidden_states.mean(-1, keepdim=True)
variance = (hidden_states - mean).pow(2).mean(-1, keepdim=True)
hidden_states = (hidden_states - mean) / torch.sqrt(variance + self.variance_epsilon)
hidden_states = hidden_states.to(input_type)
y = self.weight * hidden_states + self.bias
return y
|
class DebertaLayerNorm(nn.Module):
'''LayerNorm module (epsilon inside the square root).'''
def __init__(self, size, eps=1e-12):
pass
def forward(self, hidden_states):
pass
| 3
| 1
| 7
| 0
| 7
| 0
| 1
| 0.07
| 1
| 1
| 0
| 0
| 2
| 3
| 2
| 12
| 18
| 2
| 15
| 10
| 12
| 1
| 15
| 10
| 12
| 1
| 1
| 0
| 2
|
1,586
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deberta/modeling_deberta.py
|
transformers.models.deberta.modeling_deberta.DebertaModel
|
from ...modeling_outputs import BaseModelOutput, MaskedLMOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from ...utils import auto_docstring, logging
import torch
from typing import Optional, Union
@auto_docstring
class DebertaModel(DebertaPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.embeddings = DebertaEmbeddings(config)
self.encoder = DebertaEncoder(config)
self.z_steps = 0
self.config = config
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, new_embeddings):
self.embeddings.word_embeddings = new_embeddings
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
raise NotImplementedError('The prune function is not implemented in DeBERTa model.')
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds')
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
embedding_output = self.embeddings(input_ids=input_ids, token_type_ids=token_type_ids, position_ids=position_ids, mask=attention_mask, inputs_embeds=inputs_embeds)
encoder_outputs = self.encoder(embedding_output, attention_mask, output_hidden_states=True, output_attentions=output_attentions, return_dict=return_dict)
encoded_layers = encoder_outputs[1]
if self.z_steps > 1:
hidden_states = encoded_layers[-2]
layers = [self.encoder.layer[-1] for _ in range(self.z_steps)]
query_states = encoded_layers[-1]
rel_embeddings = self.encoder.get_rel_embedding()
attention_mask = self.encoder.get_attention_mask(attention_mask)
rel_pos = self.encoder.get_rel_pos(embedding_output)
for layer in layers[1:]:
query_states = layer(hidden_states, attention_mask, output_attentions=False, query_states=query_states, relative_pos=rel_pos, rel_embeddings=rel_embeddings)
encoded_layers.append(query_states)
sequence_output = encoded_layers[-1]
if not return_dict:
return (sequence_output,) + encoder_outputs[1 if output_hidden_states else 2:]
return BaseModelOutput(last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states if output_hidden_states else None, attentions=encoder_outputs.attentions)
|
@auto_docstring
class DebertaModel(DebertaPreTrainedModel):
def __init__(self, config):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, new_embeddings):
pass
def _prune_heads(self, heads_to_prune):
'''
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
'''
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]:
pass
| 8
| 1
| 20
| 2
| 17
| 1
| 4
| 0.06
| 1
| 9
| 3
| 0
| 5
| 4
| 5
| 6
| 109
| 14
| 90
| 33
| 68
| 5
| 46
| 22
| 40
| 15
| 2
| 2
| 19
|
1,587
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deberta/modeling_deberta.py
|
transformers.models.deberta.modeling_deberta.DebertaOnlyMLMHead
|
from torch import nn
class DebertaOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.lm_head = DebertaLMPredictionHead(config)
def forward(self, sequence_output, word_embeddings):
prediction_scores = self.lm_head(sequence_output, word_embeddings)
return prediction_scores
|
class DebertaOnlyMLMHead(nn.Module):
def __init__(self, config):
pass
def forward(self, sequence_output, word_embeddings):
pass
| 3
| 0
| 3
| 0
| 3
| 0
| 1
| 0.14
| 1
| 2
| 1
| 0
| 2
| 1
| 2
| 12
| 9
| 1
| 7
| 5
| 4
| 1
| 7
| 5
| 4
| 1
| 1
| 0
| 2
|
1,588
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deberta/modeling_deberta.py
|
transformers.models.deberta.modeling_deberta.DebertaOutput
|
from torch import nn
class DebertaOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = DebertaLayerNorm(config.hidden_size, config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.config = config
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
|
class DebertaOutput(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states, input_tensor):
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 1
| 0
| 1
| 2
| 1
| 0
| 2
| 4
| 2
| 12
| 13
| 1
| 12
| 7
| 9
| 0
| 12
| 7
| 9
| 1
| 1
| 0
| 2
|
1,589
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deberta/modeling_deberta.py
|
transformers.models.deberta.modeling_deberta.DebertaPreTrainedModel
|
from torch import nn
from ...utils import auto_docstring, logging
from .configuration_deberta import DebertaConfig
from ...modeling_utils import PreTrainedModel
@auto_docstring
class DebertaPreTrainedModel(PreTrainedModel):
config: DebertaConfig
base_model_prefix = 'deberta'
_keys_to_ignore_on_load_unexpected = ['position_embeddings']
supports_gradient_checkpointing = True
def _init_weights(self, module):
"""Initialize the weights."""
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, (nn.LayerNorm, DebertaLayerNorm)):
module.weight.data.fill_(1.0)
module.bias.data.zero_()
elif isinstance(module, DisentangledSelfAttention):
module.q_bias.data.zero_()
module.v_bias.data.zero_()
elif isinstance(module, (LegacyDebertaLMPredictionHead, DebertaLMPredictionHead)):
module.bias.data.zero_()
|
@auto_docstring
class DebertaPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
'''Initialize the weights.'''
pass
| 3
| 1
| 12
| 0
| 9
| 3
| 5
| 0.5
| 1
| 0
| 0
| 5
| 1
| 0
| 1
| 1
| 23
| 2
| 14
| 6
| 12
| 7
| 13
| 6
| 11
| 5
| 1
| 2
| 5
|
1,590
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deberta/modeling_deberta.py
|
transformers.models.deberta.modeling_deberta.DebertaSelfOutput
|
from torch import nn
class DebertaSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = DebertaLayerNorm(config.hidden_size, config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
|
class DebertaSelfOutput(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states, input_tensor):
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 2
| 1
| 0
| 2
| 3
| 2
| 12
| 12
| 1
| 11
| 6
| 8
| 0
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
1,591
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deberta/modeling_deberta.py
|
transformers.models.deberta.modeling_deberta.DisentangledSelfAttention
|
from torch import nn
from typing import Optional, Union
import torch
class DisentangledSelfAttention(nn.Module):
"""
Disentangled self-attention module
Parameters:
config (`str`):
A model config class instance with the configuration to build a new model. The schema is similar to
*BertConfig*, for more details, please refer [`DebertaConfig`]
"""
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})')
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.in_proj = nn.Linear(config.hidden_size, self.all_head_size * 3, bias=False)
self.q_bias = nn.Parameter(torch.zeros(self.all_head_size, dtype=torch.float))
self.v_bias = nn.Parameter(torch.zeros(self.all_head_size, dtype=torch.float))
self.pos_att_type = config.pos_att_type if config.pos_att_type is not None else []
self.relative_attention = getattr(config, 'relative_attention', False)
self.talking_head = getattr(config, 'talking_head', False)
if self.talking_head:
self.head_logits_proj = nn.Linear(config.num_attention_heads, config.num_attention_heads, bias=False)
self.head_weights_proj = nn.Linear(config.num_attention_heads, config.num_attention_heads, bias=False)
else:
self.head_logits_proj = None
self.head_weights_proj = None
if self.relative_attention:
self.max_relative_positions = getattr(config, 'max_relative_positions', -1)
if self.max_relative_positions < 1:
self.max_relative_positions = config.max_position_embeddings
self.pos_dropout = nn.Dropout(config.hidden_dropout_prob)
if 'c2p' in self.pos_att_type:
self.pos_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=False)
if 'p2c' in self.pos_att_type:
self.pos_q_proj = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, -1)
x = x.view(new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, output_attentions: bool=False, query_states: Optional[torch.Tensor]=None, relative_pos: Optional[torch.Tensor]=None, rel_embeddings: Optional[torch.Tensor]=None) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
"""
Call the module
Args:
hidden_states (`torch.FloatTensor`):
Input states to the module usually the output from previous layer, it will be the Q,K and V in
*Attention(Q,K,V)*
attention_mask (`torch.BoolTensor`):
An attention mask matrix of shape [*B*, *N*, *N*] where *B* is the batch size, *N* is the maximum
sequence length in which element [i,j] = *1* means the *i* th token in the input can attend to the *j*
th token.
output_attentions (`bool`, *optional*):
Whether return the attention matrix.
query_states (`torch.FloatTensor`, *optional*):
The *Q* state in *Attention(Q,K,V)*.
relative_pos (`torch.LongTensor`):
The relative position encoding between the tokens in the sequence. It's of shape [*B*, *N*, *N*] with
values ranging in [*-max_relative_positions*, *max_relative_positions*].
rel_embeddings (`torch.FloatTensor`):
The embedding of relative distances. It's a tensor of shape [\\(2 \\times
\\text{max_relative_positions}\\), *hidden_size*].
"""
if query_states is None:
qp = self.in_proj(hidden_states)
query_layer, key_layer, value_layer = self.transpose_for_scores(qp).chunk(3, dim=-1)
else:
ws = self.in_proj.weight.chunk(self.num_attention_heads * 3, dim=0)
qkvw = [torch.cat([ws[i * 3 + k] for i in range(self.num_attention_heads)], dim=0) for k in range(3)]
q = torch.matmul(qkvw[0], query_states.t().to(dtype=qkvw[0].dtype))
k = torch.matmul(qkvw[1], hidden_states.t().to(dtype=qkvw[1].dtype))
v = torch.matmul(qkvw[2], hidden_states.t().to(dtype=qkvw[2].dtype))
query_layer, key_layer, value_layer = [self.transpose_for_scores(x) for x in [q, k, v]]
query_layer = query_layer + self.transpose_for_scores(self.q_bias[None, None, :])
value_layer = value_layer + self.transpose_for_scores(self.v_bias[None, None, :])
rel_att: int = 0
scale_factor = 1 + len(self.pos_att_type)
scale = scaled_size_sqrt(query_layer, scale_factor)
query_layer = query_layer / scale.to(dtype=query_layer.dtype)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.relative_attention and rel_embeddings is not None and (relative_pos is not None):
rel_embeddings = self.pos_dropout(rel_embeddings)
rel_att = self.disentangled_att_bias(query_layer, key_layer, relative_pos, rel_embeddings, scale_factor)
if rel_att is not None:
attention_scores = attention_scores + rel_att
if self.head_logits_proj is not None:
attention_scores = self.head_logits_proj(attention_scores.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
attention_mask = attention_mask.bool()
attention_scores = attention_scores.masked_fill(~attention_mask, torch.finfo(query_layer.dtype).min)
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
attention_probs = self.dropout(attention_probs)
if self.head_weights_proj is not None:
attention_probs = self.head_weights_proj(attention_probs.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (-1,)
context_layer = context_layer.view(new_context_layer_shape)
if not output_attentions:
return (context_layer, None)
return (context_layer, attention_probs)
def disentangled_att_bias(self, query_layer: torch.Tensor, key_layer: torch.Tensor, relative_pos: torch.Tensor, rel_embeddings: torch.Tensor, scale_factor: int):
if relative_pos is None:
relative_pos = build_relative_position(query_layer, key_layer, query_layer.device)
if relative_pos.dim() == 2:
relative_pos = relative_pos.unsqueeze(0).unsqueeze(0)
elif relative_pos.dim() == 3:
relative_pos = relative_pos.unsqueeze(1)
elif relative_pos.dim() != 4:
raise ValueError(f'Relative position ids must be of dim 2 or 3 or 4. {relative_pos.dim()}')
att_span = compute_attention_span(query_layer, key_layer, self.max_relative_positions)
relative_pos = relative_pos.long()
rel_embeddings = rel_embeddings[self.max_relative_positions - att_span:self.max_relative_positions + att_span, :].unsqueeze(0)
score = 0
if 'c2p' in self.pos_att_type:
pos_key_layer = self.pos_proj(rel_embeddings)
pos_key_layer = self.transpose_for_scores(pos_key_layer)
c2p_att = torch.matmul(query_layer, pos_key_layer.transpose(-1, -2))
c2p_pos = torch.clamp(relative_pos + att_span, 0, att_span * 2 - 1)
c2p_att = torch.gather(c2p_att, dim=-1, index=c2p_dynamic_expand(c2p_pos, query_layer, relative_pos))
score += c2p_att
if 'p2c' in self.pos_att_type:
pos_query_layer = self.pos_q_proj(rel_embeddings)
pos_query_layer = self.transpose_for_scores(pos_query_layer)
pos_query_layer /= scaled_size_sqrt(pos_query_layer, scale_factor)
r_pos = build_rpos(query_layer, key_layer, relative_pos)
p2c_pos = torch.clamp(-r_pos + att_span, 0, att_span * 2 - 1)
p2c_att = torch.matmul(key_layer, pos_query_layer.transpose(-1, -2).to(dtype=key_layer.dtype))
p2c_att = torch.gather(p2c_att, dim=-1, index=p2c_dynamic_expand(p2c_pos, query_layer, key_layer)).transpose(-1, -2)
p2c_att = uneven_size_corrected(p2c_att, query_layer, key_layer, relative_pos)
score += p2c_att
return score
|
class DisentangledSelfAttention(nn.Module):
'''
Disentangled self-attention module
Parameters:
config (`str`):
A model config class instance with the configuration to build a new model. The schema is similar to
*BertConfig*, for more details, please refer [`DebertaConfig`]
'''
def __init__(self, config):
pass
def transpose_for_scores(self, x):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, output_attentions: bool=False, query_states: Optional[torch.Tensor]=None, relative_pos: Optional[torch.Tensor]=None, rel_embeddings: Optional[torch.Tensor]=None) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
'''
Call the module
Args:
hidden_states (`torch.FloatTensor`):
Input states to the module usually the output from previous layer, it will be the Q,K and V in
*Attention(Q,K,V)*
attention_mask (`torch.BoolTensor`):
An attention mask matrix of shape [*B*, *N*, *N*] where *B* is the batch size, *N* is the maximum
sequence length in which element [i,j] = *1* means the *i* th token in the input can attend to the *j*
th token.
output_attentions (`bool`, *optional*):
Whether return the attention matrix.
query_states (`torch.FloatTensor`, *optional*):
The *Q* state in *Attention(Q,K,V)*.
relative_pos (`torch.LongTensor`):
The relative position encoding between the tokens in the sequence. It's of shape [*B*, *N*, *N*] with
values ranging in [*-max_relative_positions*, *max_relative_positions*].
rel_embeddings (`torch.FloatTensor`):
The embedding of relative distances. It's a tensor of shape [\(2 \times
\text{max_relative_positions}\), *hidden_size*].
'''
pass
def disentangled_att_bias(self, query_layer: torch.Tensor, key_layer: torch.Tensor, relative_pos: torch.Tensor, rel_embeddings: torch.Tensor, scale_factor: int):
pass
| 5
| 2
| 46
| 7
| 32
| 7
| 6
| 0.27
| 1
| 7
| 0
| 0
| 4
| 16
| 4
| 14
| 196
| 33
| 129
| 59
| 109
| 35
| 99
| 44
| 94
| 8
| 1
| 2
| 23
|
1,592
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deberta/modeling_deberta.py
|
transformers.models.deberta.modeling_deberta.LegacyDebertaLMPredictionHead
|
from torch import nn
import torch
class LegacyDebertaLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = LegacyDebertaPredictionHeadTransform(config)
self.embedding_size = getattr(config, 'embedding_size', config.hidden_size)
self.decoder = nn.Linear(self.embedding_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
self.decoder.bias = self.bias
def _tie_weights(self):
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
|
class LegacyDebertaLMPredictionHead(nn.Module):
def __init__(self, config):
pass
def _tie_weights(self):
pass
def forward(self, hidden_states):
pass
| 4
| 0
| 6
| 1
| 4
| 1
| 1
| 0.21
| 1
| 2
| 1
| 0
| 3
| 4
| 3
| 13
| 22
| 5
| 14
| 8
| 10
| 3
| 14
| 8
| 10
| 1
| 1
| 0
| 3
|
1,593
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deberta/modeling_deberta.py
|
transformers.models.deberta.modeling_deberta.LegacyDebertaOnlyMLMHead
|
from torch import nn
import torch
class LegacyDebertaOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = LegacyDebertaLMPredictionHead(config)
def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
prediction_scores = self.predictions(sequence_output)
return prediction_scores
|
class LegacyDebertaOnlyMLMHead(nn.Module):
def __init__(self, config):
pass
def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 3
| 0
| 3
| 0
| 1
| 0
| 1
| 3
| 1
| 0
| 2
| 1
| 2
| 12
| 8
| 1
| 7
| 5
| 4
| 0
| 7
| 5
| 4
| 1
| 1
| 0
| 2
|
1,594
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deberta/modeling_deberta.py
|
transformers.models.deberta.modeling_deberta.LegacyDebertaPredictionHeadTransform
|
from torch import nn
from ...activations import ACT2FN
class LegacyDebertaPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.embedding_size = getattr(config, 'embedding_size', config.hidden_size)
self.dense = nn.Linear(config.hidden_size, self.embedding_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(self.embedding_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
|
class LegacyDebertaPredictionHeadTransform(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 8
| 1
| 7
| 0
| 2
| 0
| 1
| 2
| 0
| 0
| 2
| 4
| 2
| 12
| 17
| 2
| 15
| 7
| 12
| 0
| 14
| 7
| 11
| 2
| 1
| 1
| 3
|
1,595
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deberta/tokenization_deberta.py
|
transformers.models.deberta.tokenization_deberta.DebertaTokenizer
|
import os
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from typing import Optional
import regex as re
import json
class DebertaTokenizer(PreTrainedTokenizer):
"""
Construct a DeBERTa tokenizer. Based on byte-level Byte-Pair-Encoding.
This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
be encoded differently whether it is at the beginning of the sentence (without space) or not:
```python
>>> from transformers import DebertaTokenizer
>>> tokenizer = DebertaTokenizer.from_pretrained("microsoft/deberta-base")
>>> tokenizer("Hello world")["input_ids"]
[1, 31414, 232, 2]
>>> tokenizer(" Hello world")["input_ids"]
[1, 20920, 232, 2]
```
You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
<Tip>
When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one).
</Tip>
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
merges_file (`str`):
Path to the merges file.
errors (`str`, *optional*, defaults to `"replace"`):
Paradigm to follow when decoding bytes to UTF-8. See
[bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
bos_token (`str`, *optional*, defaults to `"[CLS]"`):
The beginning of sequence token.
eos_token (`str`, *optional*, defaults to `"[SEP]"`):
The end of sequence token.
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"[PAD]"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
add_prefix_space (`bool`, *optional*, defaults to `False`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word. (Deberta tokenizer detect beginning of words by the preceding space).
add_bos_token (`bool`, *optional*, defaults to `False`):
Whether or not to add an initial <|endoftext|> to the input. This allows to treat the leading word just as
any other word.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ['input_ids', 'attention_mask', 'token_type_ids']
def __init__(self, vocab_file, merges_file, errors='replace', bos_token='[CLS]', eos_token='[SEP]', sep_token='[SEP]', cls_token='[CLS]', unk_token='[UNK]', pad_token='[PAD]', mask_token='[MASK]', add_prefix_space=False, add_bos_token=False, **kwargs):
bos_token = AddedToken(bos_token, special=True) if isinstance(bos_token, str) else bos_token
eos_token = AddedToken(eos_token, special=True) if isinstance(eos_token, str) else eos_token
sep_token = AddedToken(sep_token, special=True) if isinstance(sep_token, str) else sep_token
cls_token = AddedToken(cls_token, special=True) if isinstance(cls_token, str) else cls_token
unk_token = AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token
pad_token = AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
self.add_bos_token = add_bos_token
with open(vocab_file, encoding='utf-8') as vocab_handle:
self.encoder = json.load(vocab_handle)
self.decoder = {v: k for k, v in self.encoder.items()}
self.errors = errors
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
with open(merges_file, encoding='utf-8') as merges_handle:
bpe_merges = merges_handle.read().split('\n')[1:-1]
bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {}
self.add_prefix_space = add_prefix_space
self.pat = re.compile("'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)|\\s+")
super().__init__(errors=errors, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, add_bos_token=add_bos_token, **kwargs)
@property
def vocab_size(self):
return len(self.encoder)
def get_vocab(self):
return dict(self.encoder, **self.added_tokens_encoder)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token)
pairs = get_pairs(word)
if not pairs:
return token
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if word[i] == first and i < len(word) - 1 and (word[i + 1] == second):
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A DeBERTa sequence has the following format:
- single sequence: [CLS] X [SEP]
- pair of sequences: [CLS] A [SEP] B [SEP]
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + token_ids_1 + sep
def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]:
"""
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if token_ids_1 is None:
return [1] + [0] * len(token_ids_0) + [1]
return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1]
def _tokenize(self, text):
"""Tokenize a string."""
bpe_tokens = []
for token in re.findall(self.pat, text):
token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8')))
bpe_tokens.extend((bpe_token for bpe_token in self.bpe(token).split(' ')))
return bpe_tokens
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.decoder.get(index)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
text = ''.join(tokens)
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors)
return text
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
merge_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(vocab_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + '\n')
index = 0
with open(merge_file, 'w', encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!')
index = token_index
writer.write(' '.join(bpe_tokens) + '\n')
index += 1
return (vocab_file, merge_file)
def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
add_prefix_space = kwargs.pop('add_prefix_space', self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(text) > 0 and (not text[0].isspace())):
text = ' ' + text
return (text, kwargs)
|
class DebertaTokenizer(PreTrainedTokenizer):
'''
Construct a DeBERTa tokenizer. Based on byte-level Byte-Pair-Encoding.
This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
be encoded differently whether it is at the beginning of the sentence (without space) or not:
```python
>>> from transformers import DebertaTokenizer
>>> tokenizer = DebertaTokenizer.from_pretrained("microsoft/deberta-base")
>>> tokenizer("Hello world")["input_ids"]
[1, 31414, 232, 2]
>>> tokenizer(" Hello world")["input_ids"]
[1, 20920, 232, 2]
```
You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
<Tip>
When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one).
</Tip>
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
merges_file (`str`):
Path to the merges file.
errors (`str`, *optional*, defaults to `"replace"`):
Paradigm to follow when decoding bytes to UTF-8. See
[bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
bos_token (`str`, *optional*, defaults to `"[CLS]"`):
The beginning of sequence token.
eos_token (`str`, *optional*, defaults to `"[SEP]"`):
The end of sequence token.
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"[PAD]"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
add_prefix_space (`bool`, *optional*, defaults to `False`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word. (Deberta tokenizer detect beginning of words by the preceding space).
add_bos_token (`bool`, *optional*, defaults to `False`):
Whether or not to add an initial <|endoftext|> to the input. This allows to treat the leading word just as
any other word.
'''
def __init__(self, vocab_file, merges_file, errors='replace', bos_token='[CLS]', eos_token='[SEP]', sep_token='[SEP]', cls_token='[CLS]', unk_token='[UNK]', pad_token='[PAD]', mask_token='[MASK]', add_prefix_space=False, add_bos_token=False, **kwargs):
pass
@property
def vocab_size(self):
pass
def get_vocab(self):
pass
def bpe(self, token):
pass
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
'''
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A DeBERTa sequence has the following format:
- single sequence: [CLS] X [SEP]
- pair of sequences: [CLS] A [SEP] B [SEP]
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
'''
pass
def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]:
'''
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
'''
pass
def _tokenize(self, text):
'''Tokenize a string.'''
pass
def _convert_token_to_id(self, token):
'''Converts a token (str) in an id using the vocab.'''
pass
def _convert_id_to_token(self, index):
'''Converts an index (integer) in a token (str) using the vocab.'''
pass
def convert_tokens_to_string(self, tokens):
'''Converts a sequence of tokens (string) in a single string.'''
pass
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
pass
def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
pass
| 14
| 7
| 18
| 2
| 13
| 4
| 3
| 0.67
| 1
| 11
| 0
| 0
| 13
| 10
| 13
| 102
| 322
| 45
| 168
| 72
| 132
| 112
| 121
| 46
| 107
| 9
| 3
| 3
| 39
|
1,596
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deberta/tokenization_deberta_fast.py
|
transformers.models.deberta.tokenization_deberta_fast.DebertaTokenizerFast
|
from typing import Optional
from .tokenization_deberta import DebertaTokenizer
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
class DebertaTokenizerFast(PreTrainedTokenizerFast):
"""
Construct a "fast" DeBERTa tokenizer (backed by HuggingFace's *tokenizers* library). Based on byte-level
Byte-Pair-Encoding.
This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
be encoded differently whether it is at the beginning of the sentence (without space) or not:
```python
>>> from transformers import DebertaTokenizerFast
>>> tokenizer = DebertaTokenizerFast.from_pretrained("microsoft/deberta-base")
>>> tokenizer("Hello world")["input_ids"]
[1, 31414, 232, 2]
>>> tokenizer(" Hello world")["input_ids"]
[1, 20920, 232, 2]
```
You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer, but since
the model was not pretrained this way, it might yield a decrease in performance.
<Tip>
When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`.
</Tip>
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`, *optional*):
Path to the vocabulary file.
merges_file (`str`, *optional*):
Path to the merges file.
tokenizer_file (`str`, *optional*):
The path to a tokenizer file to use instead of the vocab file.
errors (`str`, *optional*, defaults to `"replace"`):
Paradigm to follow when decoding bytes to UTF-8. See
[bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
bos_token (`str`, *optional*, defaults to `"[CLS]"`):
The beginning of sequence token.
eos_token (`str`, *optional*, defaults to `"[SEP]"`):
The end of sequence token.
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"[PAD]"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
add_prefix_space (`bool`, *optional*, defaults to `False`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word. (Deberta tokenizer detect beginning of words by the preceding space).
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ['input_ids', 'attention_mask', 'token_type_ids']
slow_tokenizer_class = DebertaTokenizer
def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, errors='replace', bos_token='[CLS]', eos_token='[SEP]', sep_token='[SEP]', cls_token='[CLS]', unk_token='[UNK]', pad_token='[PAD]', mask_token='[MASK]', add_prefix_space=False, **kwargs):
super().__init__(vocab_file, merges_file, tokenizer_file=tokenizer_file, errors=errors, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, **kwargs)
self.add_bos_token = kwargs.pop('add_bos_token', False)
@property
def mask_token(self) -> str:
"""
`str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not
having been set.
Deberta tokenizer has a special mask token to be used in the fill-mask pipeline. The mask token will greedily
comprise the space before the *[MASK]*.
"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def mask_token(self, value):
"""
Overriding the default behavior of the mask token to have it eat the space before it.
"""
value = AddedToken(value, lstrip=True, rstrip=False) if isinstance(value, str) else value
self._mask_token = value
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A DeBERTa sequence has the following format:
- single sequence: [CLS] X [SEP]
- pair of sequences: [CLS] A [SEP] B [SEP]
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + token_ids_1 + sep
def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding:
is_split_into_words = kwargs.get('is_split_into_words', False)
assert self.add_prefix_space or not is_split_into_words, f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.'
return super()._batch_encode_plus(*args, **kwargs)
def _encode_plus(self, *args, **kwargs) -> BatchEncoding:
is_split_into_words = kwargs.get('is_split_into_words', False)
assert self.add_prefix_space or not is_split_into_words, f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.'
return super()._encode_plus(*args, **kwargs)
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
files = self._tokenizer.model.save(save_directory, name=filename_prefix)
return tuple(files)
|
class DebertaTokenizerFast(PreTrainedTokenizerFast):
'''
Construct a "fast" DeBERTa tokenizer (backed by HuggingFace's *tokenizers* library). Based on byte-level
Byte-Pair-Encoding.
This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
be encoded differently whether it is at the beginning of the sentence (without space) or not:
```python
>>> from transformers import DebertaTokenizerFast
>>> tokenizer = DebertaTokenizerFast.from_pretrained("microsoft/deberta-base")
>>> tokenizer("Hello world")["input_ids"]
[1, 31414, 232, 2]
>>> tokenizer(" Hello world")["input_ids"]
[1, 20920, 232, 2]
```
You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer, but since
the model was not pretrained this way, it might yield a decrease in performance.
<Tip>
When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`.
</Tip>
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`, *optional*):
Path to the vocabulary file.
merges_file (`str`, *optional*):
Path to the merges file.
tokenizer_file (`str`, *optional*):
The path to a tokenizer file to use instead of the vocab file.
errors (`str`, *optional*, defaults to `"replace"`):
Paradigm to follow when decoding bytes to UTF-8. See
[bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
bos_token (`str`, *optional*, defaults to `"[CLS]"`):
The beginning of sequence token.
eos_token (`str`, *optional*, defaults to `"[SEP]"`):
The end of sequence token.
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"[PAD]"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
add_prefix_space (`bool`, *optional*, defaults to `False`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word. (Deberta tokenizer detect beginning of words by the preceding space).
'''
def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, errors='replace', bos_token='[CLS]', eos_token='[SEP]', sep_token='[SEP]', cls_token='[CLS]', unk_token='[UNK]', pad_token='[PAD]', mask_token='[MASK]', add_prefix_space=False, **kwargs):
pass
@property
def mask_token(self) -> str:
'''
`str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not
having been set.
Deberta tokenizer has a special mask token to be used in the fill-mask pipeline. The mask token will greedily
comprise the space before the *[MASK]*.
'''
pass
@mask_token.setter
def mask_token(self) -> str:
'''
Overriding the default behavior of the mask token to have it eat the space before it.
'''
pass
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
'''
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A DeBERTa sequence has the following format:
- single sequence: [CLS] X [SEP]
- pair of sequences: [CLS] A [SEP] B [SEP]
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
'''
pass
def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding:
pass
def _encode_plus(self, *args, **kwargs) -> BatchEncoding:
pass
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
pass
| 10
| 4
| 16
| 2
| 9
| 5
| 2
| 1.2
| 1
| 5
| 1
| 0
| 8
| 2
| 8
| 96
| 207
| 31
| 80
| 42
| 50
| 96
| 39
| 21
| 30
| 3
| 3
| 2
| 13
|
1,597
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deberta_v2/configuration_deberta_v2.py
|
transformers.models.deberta_v2.configuration_deberta_v2.DebertaV2Config
|
from ...configuration_utils import PretrainedConfig
class DebertaV2Config(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`DebertaV2Model`]. It is used to instantiate a
DeBERTa-v2 model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the DeBERTa
[microsoft/deberta-v2-xlarge](https://huggingface.co/microsoft/deberta-v2-xlarge) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Arguments:
vocab_size (`int`, *optional*, defaults to 128100):
Vocabulary size of the DeBERTa-v2 model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling [`DebertaV2Model`].
hidden_size (`int`, *optional*, defaults to 1536):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 24):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 6144):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"`, `"gelu"`, `"tanh"`, `"gelu_fast"`, `"mish"`, `"linear"`, `"sigmoid"` and `"gelu_new"`
are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 0):
The vocabulary size of the `token_type_ids` passed when calling [`DebertaModel`] or [`TFDebertaModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-7):
The epsilon used by the layer normalization layers.
relative_attention (`bool`, *optional*, defaults to `True`):
Whether use relative position encoding.
max_relative_positions (`int`, *optional*, defaults to -1):
The range of relative positions `[-max_position_embeddings, max_position_embeddings]`. Use the same value
as `max_position_embeddings`.
pad_token_id (`int`, *optional*, defaults to 0):
The value used to pad input_ids.
position_biased_input (`bool`, *optional*, defaults to `True`):
Whether add absolute position embedding to content embedding.
pos_att_type (`list[str]`, *optional*):
The type of relative position attention, it can be a combination of `["p2c", "c2p"]`, e.g. `["p2c"]`,
`["p2c", "c2p"]`, `["p2c", "c2p"]`.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
legacy (`bool`, *optional*, defaults to `True`):
Whether or not the model should use the legacy `LegacyDebertaOnlyMLMHead`, which does not work properly
for mask infilling tasks.
Example:
```python
>>> from transformers import DebertaV2Config, DebertaV2Model
>>> # Initializing a DeBERTa-v2 microsoft/deberta-v2-xlarge style configuration
>>> configuration = DebertaV2Config()
>>> # Initializing a model (with random weights) from the microsoft/deberta-v2-xlarge style configuration
>>> model = DebertaV2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'deberta-v2'
def __init__(self, vocab_size=128100, hidden_size=1536, num_hidden_layers=24, num_attention_heads=24, intermediate_size=6144, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=0, initializer_range=0.02, layer_norm_eps=1e-07, relative_attention=False, max_relative_positions=-1, pad_token_id=0, position_biased_input=True, pos_att_type=None, pooler_dropout=0, pooler_hidden_act='gelu', legacy=True, **kwargs):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.relative_attention = relative_attention
self.max_relative_positions = max_relative_positions
self.pad_token_id = pad_token_id
self.position_biased_input = position_biased_input
if isinstance(pos_att_type, str):
pos_att_type = [x.strip() for x in pos_att_type.lower().split('|')]
self.pos_att_type = pos_att_type
self.vocab_size = vocab_size
self.layer_norm_eps = layer_norm_eps
self.pooler_hidden_size = kwargs.get('pooler_hidden_size', hidden_size)
self.pooler_dropout = pooler_dropout
self.pooler_hidden_act = pooler_hidden_act
self.legacy = legacy
|
class DebertaV2Config(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`DebertaV2Model`]. It is used to instantiate a
DeBERTa-v2 model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the DeBERTa
[microsoft/deberta-v2-xlarge](https://huggingface.co/microsoft/deberta-v2-xlarge) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Arguments:
vocab_size (`int`, *optional*, defaults to 128100):
Vocabulary size of the DeBERTa-v2 model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling [`DebertaV2Model`].
hidden_size (`int`, *optional*, defaults to 1536):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 24):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 6144):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"`, `"gelu"`, `"tanh"`, `"gelu_fast"`, `"mish"`, `"linear"`, `"sigmoid"` and `"gelu_new"`
are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 0):
The vocabulary size of the `token_type_ids` passed when calling [`DebertaModel`] or [`TFDebertaModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-7):
The epsilon used by the layer normalization layers.
relative_attention (`bool`, *optional*, defaults to `True`):
Whether use relative position encoding.
max_relative_positions (`int`, *optional*, defaults to -1):
The range of relative positions `[-max_position_embeddings, max_position_embeddings]`. Use the same value
as `max_position_embeddings`.
pad_token_id (`int`, *optional*, defaults to 0):
The value used to pad input_ids.
position_biased_input (`bool`, *optional*, defaults to `True`):
Whether add absolute position embedding to content embedding.
pos_att_type (`list[str]`, *optional*):
The type of relative position attention, it can be a combination of `["p2c", "c2p"]`, e.g. `["p2c"]`,
`["p2c", "c2p"]`, `["p2c", "c2p"]`.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
legacy (`bool`, *optional*, defaults to `True`):
Whether or not the model should use the legacy `LegacyDebertaOnlyMLMHead`, which does not work properly
for mask infilling tasks.
Example:
```python
>>> from transformers import DebertaV2Config, DebertaV2Model
>>> # Initializing a DeBERTa-v2 microsoft/deberta-v2-xlarge style configuration
>>> configuration = DebertaV2Config()
>>> # Initializing a model (with random weights) from the microsoft/deberta-v2-xlarge style configuration
>>> model = DebertaV2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=128100, hidden_size=1536, num_hidden_layers=24, num_attention_heads=24, intermediate_size=6144, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=0, initializer_range=0.02, layer_norm_eps=1e-07, relative_attention=False, max_relative_positions=-1, pad_token_id=0, position_biased_input=True, pos_att_type=None, pooler_dropout=0, pooler_hidden_act='gelu', legacy=True, **kwargs):
pass
| 2
| 1
| 53
| 4
| 48
| 1
| 2
| 1.28
| 1
| 2
| 0
| 0
| 1
| 21
| 1
| 1
| 127
| 13
| 50
| 47
| 25
| 64
| 27
| 24
| 25
| 2
| 1
| 1
| 2
|
1,598
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deberta_v2/configuration_deberta_v2.py
|
transformers.models.deberta_v2.configuration_deberta_v2.DebertaV2OnnxConfig
|
from collections.abc import Mapping
from ...onnx import OnnxConfig
from typing import TYPE_CHECKING, Any, Union
from collections import OrderedDict
class DebertaV2OnnxConfig(OnnxConfig):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
if self.task == 'multiple-choice':
dynamic_axis = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
dynamic_axis = {0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)])
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)])
@property
def default_onnx_opset(self) -> int:
return 12
def generate_dummy_inputs(self, preprocessor: Union['PreTrainedTokenizerBase', 'FeatureExtractionMixin'], batch_size: int=-1, seq_length: int=-1, num_choices: int=-1, is_pair: bool=False, num_channels: int=3, image_width: int=40, image_height: int=40, tokenizer: 'PreTrainedTokenizerBase'=None) -> Mapping[str, Any]:
dummy_inputs = super().generate_dummy_inputs(preprocessor=preprocessor)
if self._config.type_vocab_size == 0 and 'token_type_ids' in dummy_inputs:
del dummy_inputs['token_type_ids']
return dummy_inputs
|
class DebertaV2OnnxConfig(OnnxConfig):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
pass
@property
def default_onnx_opset(self) -> int:
pass
def generate_dummy_inputs(self, preprocessor: Union['PreTrainedTokenizerBase', 'FeatureExtractionMixin'], batch_size: int=-1, seq_length: int=-1, num_choices: int=-1, is_pair: bool=False, num_channels: int=3, image_width: int=40, image_height: int=40, tokenizer: 'PreTrainedTokenizerBase'=None) -> Mapping[str, Any]:
pass
| 6
| 0
| 10
| 0
| 10
| 0
| 2
| 0
| 1
| 6
| 0
| 0
| 3
| 0
| 3
| 3
| 35
| 2
| 33
| 20
| 15
| 0
| 15
| 6
| 11
| 3
| 1
| 1
| 6
|
1,599
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deberta_v2/modeling_deberta_v2.py
|
transformers.models.deberta_v2.modeling_deberta_v2.ContextPooler
|
from ...activations import ACT2FN
from torch import nn
class ContextPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.pooler_hidden_size, config.pooler_hidden_size)
self.dropout = nn.Dropout(config.pooler_dropout)
self.config = config
def forward(self, hidden_states):
context_token = hidden_states[:, 0]
context_token = self.dropout(context_token)
pooled_output = self.dense(context_token)
pooled_output = ACT2FN[self.config.pooler_hidden_act](pooled_output)
return pooled_output
@property
def output_dim(self):
return self.config.hidden_size
|
class ContextPooler(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states):
pass
@property
def output_dim(self):
pass
| 5
| 0
| 5
| 0
| 4
| 1
| 1
| 0.13
| 1
| 1
| 0
| 0
| 3
| 3
| 3
| 13
| 20
| 3
| 15
| 10
| 10
| 2
| 14
| 9
| 10
| 1
| 1
| 0
| 3
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.