id
int64
0
328k
repository_name
stringlengths
7
58
file_path
stringlengths
9
302
class_name
stringlengths
5
256
human_written_code
stringlengths
16
2.16M
class_skeleton
stringlengths
18
1.49M
total_program_units
int64
1
1.76k
total_doc_str
int64
0
771
AvgCountLine
float64
0
7.89k
AvgCountLineBlank
float64
0
297
AvgCountLineCode
float64
0
7.89k
AvgCountLineComment
float64
0
7.89k
AvgCyclomatic
float64
0
130
CommentToCodeRatio
float64
0
168
CountClassBase
float64
0
40
CountClassCoupled
float64
0
583
CountClassCoupledModified
float64
0
575
CountClassDerived
float64
0
5.35k
CountDeclInstanceMethod
float64
0
529
CountDeclInstanceVariable
float64
0
296
CountDeclMethod
float64
0
599
CountDeclMethodAll
float64
0
1.12k
CountLine
float64
1
40.4k
CountLineBlank
float64
0
8.16k
CountLineCode
float64
1
25.7k
CountLineCodeDecl
float64
1
8.15k
CountLineCodeExe
float64
0
24.2k
CountLineComment
float64
0
16.5k
CountStmt
float64
1
9.71k
CountStmtDecl
float64
1
8.15k
CountStmtExe
float64
0
9.69k
MaxCyclomatic
float64
0
759
MaxInheritanceTree
float64
0
16
MaxNesting
float64
0
34
SumCyclomatic
float64
0
2.9k
1,900
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/qdqbert/modeling_qdqbert.py
transformers.models.deprecated.qdqbert.modeling_qdqbert.QDQBertForSequenceClassification
from typing import Optional, Union from ....utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, is_pytorch_quantization_available, logging, replace_return_docstrings, requires_backends from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ....modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from torch import nn import torch @add_start_docstrings('\n Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled\n output) e.g. for GLUE tasks.\n ', QDQBERT_START_DOCSTRING) class QDQBertForSequenceClassification(QDQBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.bert = QDQBertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(QDQBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, SequenceClassifierOutput]: """ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@add_start_docstrings('\n Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled\n output) e.g. for GLUE tasks.\n ', QDQBERT_START_DOCSTRING) class QDQBertForSequenceClassification(QDQBertPreTrainedModel): def __init__(self, config): pass @add_start_docstrings_to_model_forward(QDQBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, SequenceClassifierOutput]: ''' labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). ''' pass
6
1
40
4
33
4
7
0.1
1
5
2
0
2
5
2
132
88
8
73
27
52
7
35
14
32
12
3
3
13
1,901
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/qdqbert/modeling_qdqbert.py
transformers.models.deprecated.qdqbert.modeling_qdqbert.QDQBertForTokenClassification
from torch import nn from ....modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from typing import Optional, Union from ....utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, is_pytorch_quantization_available, logging, replace_return_docstrings, requires_backends import torch @add_start_docstrings('\n QDQBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n ', QDQBERT_START_DOCSTRING) class QDQBertForTokenClassification(QDQBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.bert = QDQBertModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(QDQBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, TokenClassifierOutput]: """ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@add_start_docstrings('\n QDQBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n ', QDQBERT_START_DOCSTRING) class QDQBertForTokenClassification(QDQBertPreTrainedModel): def __init__(self, config): pass @add_start_docstrings_to_model_forward(QDQBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, TokenClassifierOutput]: ''' labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. ''' pass
6
1
31
4
24
3
3
0.09
1
4
2
0
2
4
2
132
69
9
55
26
34
5
22
13
19
5
3
1
6
1,902
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/qdqbert/modeling_qdqbert.py
transformers.models.deprecated.qdqbert.modeling_qdqbert.QDQBertIntermediate
from torch import nn from ....activations import ACT2FN class QDQBertIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = quant_nn.QuantLinear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states
class QDQBertIntermediate(nn.Module): def __init__(self, config): pass def forward(self, hidden_states): pass
3
0
6
0
6
1
2
0.08
1
2
0
0
2
2
2
12
14
1
12
5
9
1
11
5
8
2
1
1
3
1,903
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/qdqbert/modeling_qdqbert.py
transformers.models.deprecated.qdqbert.modeling_qdqbert.QDQBertLMHeadModel
import torch from typing import Optional, Union from ....utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, is_pytorch_quantization_available, logging, replace_return_docstrings, requires_backends from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ....modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput @add_start_docstrings('QDQBERT Model with a `language modeling` head on top for CLM fine-tuning.', QDQBERT_START_DOCSTRING) class QDQBertLMHeadModel(QDQBertPreTrainedModel): _tied_weights_keys = ['predictions.decoder.weight', 'predictions.decoder.bias'] def __init__(self, config): super().__init__(config) if not config.is_decoder: logger.warning('If you want to use `QDQBertLMHeadModel` as a standalone, add `is_decoder=True.`') self.bert = QDQBertModel(config, add_pooling_layer=False) self.cls = QDQBertOnlyMLMHead(config) self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias @add_start_docstrings_to_model_forward(QDQBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, past_key_values: Optional[tuple[tuple[torch.LongTensor]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, CausalLMOutputWithCrossAttentions]: """ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]` past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). Returns: Example: ```python >>> from transformers import AutoTokenizer, QDQBertLMHeadModel, QDQBertConfig >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-cased") >>> config = QDQBertConfig.from_pretrained("google-bert/bert-base-cased") >>> config.is_decoder = True >>> model = QDQBertLMHeadModel.from_pretrained("google-bert/bert-base-cased", config=config) >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> prediction_logits = outputs.logits ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) lm_loss = None if labels is not None: shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() labels = labels[:, 1:].contiguous() loss_fct = CrossEntropyLoss() lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return (lm_loss,) + output if lm_loss is not None else output return CausalLMOutputWithCrossAttentions(loss=lm_loss, logits=prediction_scores, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions) def prepare_inputs_for_generation(self, input_ids: Optional[torch.LongTensor], past_key_values=None, attention_mask: Optional[torch.Tensor]=None, **model_kwargs): input_shape = input_ids.shape if attention_mask is None: attention_mask = input_ids.new_ones(input_shape) if past_key_values is not None: past_length = past_key_values.get_seq_length() if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] return {'input_ids': input_ids, 'attention_mask': attention_mask, 'past_key_values': past_key_values} def _reorder_cache(self, past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple((past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)),) return reordered_past
@add_start_docstrings('QDQBERT Model with a `language modeling` head on top for CLM fine-tuning.', QDQBERT_START_DOCSTRING) class QDQBertLMHeadModel(QDQBertPreTrainedModel): def __init__(self, config): pass def get_output_embeddings(self): pass def set_output_embeddings(self, new_embeddings): pass @add_start_docstrings_to_model_forward(QDQBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, past_key_values: Optional[tuple[tuple[torch.LongTensor]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, CausalLMOutputWithCrossAttentions]: ''' encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]` past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). Returns: Example: ```python >>> from transformers import AutoTokenizer, QDQBertLMHeadModel, QDQBertConfig >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-cased") >>> config = QDQBertConfig.from_pretrained("google-bert/bert-base-cased") >>> config.is_decoder = True >>> model = QDQBertLMHeadModel.from_pretrained("google-bert/bert-base-cased", config=config) >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> prediction_logits = outputs.logits ```''' pass def prepare_inputs_for_generation(self, input_ids: Optional[torch.LongTensor], past_key_values=None, attention_mask: Optional[torch.Tensor]=None, **model_kwargs): pass def _reorder_cache(self, past_key_values, beam_idx): pass
10
1
25
3
15
7
3
0.42
1
7
3
0
6
2
6
136
161
26
95
45
64
40
47
22
40
6
3
2
16
1,904
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/qdqbert/modeling_qdqbert.py
transformers.models.deprecated.qdqbert.modeling_qdqbert.QDQBertLMPredictionHead
from torch import nn import torch class QDQBertLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = QDQBertPredictionHeadTransform(config) self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) self.decoder.bias = self.bias def _tie_weights(self): self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states
class QDQBertLMPredictionHead(nn.Module): def __init__(self, config): pass def _tie_weights(self): pass def forward(self, hidden_states): pass
4
0
6
1
4
1
1
0.23
1
2
1
0
3
3
3
13
21
5
13
7
9
3
13
7
9
1
1
0
3
1,905
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/qdqbert/modeling_qdqbert.py
transformers.models.deprecated.qdqbert.modeling_qdqbert.QDQBertLayer
from ....utils.deprecation import deprecate_kwarg from ....modeling_layers import GradientCheckpointingLayer class QDQBertLayer(GradientCheckpointingLayer): def __init__(self, config): super().__init__() self.seq_len_dim = 1 self.attention = QDQBertAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f'{self} should be used as a decoder model if cross attention is added') self.crossattention = QDQBertAttention(config) self.intermediate = QDQBertIntermediate(config) self.output = QDQBertOutput(config) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, output_attentions=False): self_attn_past_key_value = past_key_values[:2] if past_key_values is not None else None self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_values=self_attn_past_key_value) attention_output = self_attention_outputs[0] if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, 'crossattention'): raise ValueError(f'If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`') cross_attn_past_key_value = past_key_values[-2:] if past_key_values is not None else None cross_attention_outputs = self.crossattention(attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = self.feed_forward_chunk(attention_output) outputs = (layer_output,) + outputs if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output
class QDQBertLayer(GradientCheckpointingLayer): def __init__(self, config): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, output_attentions=False): pass def feed_forward_chunk(self, attention_output): pass
5
0
26
2
22
2
4
0.1
1
5
3
0
3
7
3
13
81
9
67
31
54
7
40
22
36
7
1
2
11
1,906
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/qdqbert/modeling_qdqbert.py
transformers.models.deprecated.qdqbert.modeling_qdqbert.QDQBertModel
from typing import Optional, Union from ....utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, is_pytorch_quantization_available, logging, replace_return_docstrings, requires_backends from ....modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput import torch from ....cache_utils import Cache @add_start_docstrings('The bare QDQBERT Model transformer outputting raw hidden-states without any specific head on top.', QDQBERT_START_DOCSTRING) class QDQBertModel(QDQBertPreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in [Attention is all you need](https://huggingface.co/papers/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. """ def __init__(self, config, add_pooling_layer: bool=True): requires_backends(self, 'pytorch_quantization') super().__init__(config) self.config = config self.embeddings = QDQBertEmbeddings(config) self.encoder = QDQBertEncoder(config) self.pooler = QDQBertPooler(config) if add_pooling_layer else None self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune: dict[int, list[int]]): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(QDQBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPoolingAndCrossAttentions]: """ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() batch_size, seq_length = input_shape elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] batch_size, seq_length = input_shape else: raise ValueError('You have to specify either input_ids or inputs_embeds') device = input_ids.device if input_ids is not None else inputs_embeds.device past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length + past_key_values_length), device=device) if token_type_ids is None: if hasattr(self.embeddings, 'token_type_ids'): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) if self.config.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length) encoder_outputs = self.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions)
@add_start_docstrings('The bare QDQBERT Model transformer outputting raw hidden-states without any specific head on top.', QDQBERT_START_DOCSTRING) class QDQBertModel(QDQBertPreTrainedModel): ''' The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in [Attention is all you need](https://huggingface.co/papers/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. ''' def __init__(self, config, add_pooling_layer: bool=True): pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass def _prune_heads(self, heads_to_prune: dict[int, list[int]]): ''' Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel ''' pass @add_start_docstrings_to_model_forward(QDQBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPoolingAndCrossAttentions]: ''' encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). ''' pass
9
3
31
3
21
7
5
0.38
1
9
4
0
5
4
5
135
176
24
110
41
83
42
57
25
51
18
3
2
24
1,907
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/qdqbert/modeling_qdqbert.py
transformers.models.deprecated.qdqbert.modeling_qdqbert.QDQBertOnlyMLMHead
from torch import nn class QDQBertOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = QDQBertLMPredictionHead(config) def forward(self, sequence_output): prediction_scores = self.predictions(sequence_output) return prediction_scores
class QDQBertOnlyMLMHead(nn.Module): def __init__(self, config): pass def forward(self, sequence_output): pass
3
0
3
0
3
0
1
0
1
2
1
0
2
1
2
12
8
1
7
5
4
0
7
5
4
1
1
0
2
1,908
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/qdqbert/modeling_qdqbert.py
transformers.models.deprecated.qdqbert.modeling_qdqbert.QDQBertOnlyNSPHead
from torch import nn class QDQBertOnlyNSPHead(nn.Module): def __init__(self, config): super().__init__() self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, pooled_output): seq_relationship_score = self.seq_relationship(pooled_output) return seq_relationship_score
class QDQBertOnlyNSPHead(nn.Module): def __init__(self, config): pass def forward(self, pooled_output): pass
3
0
3
0
3
0
1
0
1
1
0
0
2
1
2
12
8
1
7
5
4
0
7
5
4
1
1
0
2
1,909
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/qdqbert/modeling_qdqbert.py
transformers.models.deprecated.qdqbert.modeling_qdqbert.QDQBertOutput
from torch import nn class QDQBertOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = quant_nn.QuantLinear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.add_local_input_quantizer = TensorQuantizer(quant_nn.QuantLinear.default_quant_desc_input) self.add_residual_input_quantizer = TensorQuantizer(quant_nn.QuantLinear.default_quant_desc_input) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) add_local = self.add_local_input_quantizer(hidden_states) add_residual = self.add_residual_input_quantizer(input_tensor) hidden_states = self.LayerNorm(add_local + add_residual) return hidden_states
class QDQBertOutput(nn.Module): def __init__(self, config): pass def forward(self, hidden_states, input_tensor): pass
3
0
9
1
7
2
1
0.2
1
1
0
0
2
5
2
12
20
2
15
10
12
3
15
10
12
1
1
0
2
1,910
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/qdqbert/modeling_qdqbert.py
transformers.models.deprecated.qdqbert.modeling_qdqbert.QDQBertPooler
from torch import nn import torch class QDQBertPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output
class QDQBertPooler(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass
3
0
6
0
5
1
1
0.2
1
2
0
0
2
2
2
12
13
1
10
7
7
2
10
7
7
1
1
0
2
1,911
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/qdqbert/modeling_qdqbert.py
transformers.models.deprecated.qdqbert.modeling_qdqbert.QDQBertPreTrainedModel
from torch import nn from ....modeling_utils import PreTrainedModel from .configuration_qdqbert import QDQBertConfig class QDQBertPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config: QDQBertConfig base_model_prefix = 'bert' supports_gradient_checkpointing = True def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0)
class QDQBertPreTrainedModel(PreTrainedModel): ''' An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. ''' def _init_weights(self, module): '''Initialize the weights''' pass
2
2
15
0
12
3
6
0.41
1
0
0
8
1
0
1
130
26
2
17
6
15
7
15
6
13
6
2
2
6
1,912
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/qdqbert/modeling_qdqbert.py
transformers.models.deprecated.qdqbert.modeling_qdqbert.QDQBertPreTrainingHeads
from torch import nn class QDQBertPreTrainingHeads(nn.Module): def __init__(self, config): super().__init__() self.predictions = QDQBertLMPredictionHead(config) self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, sequence_output, pooled_output): prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) return (prediction_scores, seq_relationship_score)
class QDQBertPreTrainingHeads(nn.Module): def __init__(self, config): pass def forward(self, sequence_output, pooled_output): pass
3
0
4
0
4
0
1
0
1
2
1
0
2
2
2
12
10
1
9
7
6
0
9
7
6
1
1
0
2
1,913
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/qdqbert/modeling_qdqbert.py
transformers.models.deprecated.qdqbert.modeling_qdqbert.QDQBertPredictionHeadTransform
from ....activations import ACT2FN from torch import nn import torch class QDQBertPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states
class QDQBertPredictionHeadTransform(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass
3
0
7
0
7
0
2
0
1
3
0
0
2
3
2
12
15
1
14
6
11
0
13
6
10
2
1
1
3
1,914
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/qdqbert/modeling_qdqbert.py
transformers.models.deprecated.qdqbert.modeling_qdqbert.QDQBertSelfAttention
from ....utils.deprecation import deprecate_kwarg import math import torch from torch import nn class QDQBertSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')): raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})') self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = quant_nn.QuantLinear(config.hidden_size, self.all_head_size) self.key = quant_nn.QuantLinear(config.hidden_size, self.all_head_size) self.value = quant_nn.QuantLinear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_decoder = config.is_decoder self.matmul_q_input_quantizer = TensorQuantizer(quant_nn.QuantLinear.default_quant_desc_input) self.matmul_k_input_quantizer = TensorQuantizer(quant_nn.QuantLinear.default_quant_desc_input) self.matmul_v_input_quantizer = TensorQuantizer(quant_nn.QuantLinear.default_quant_desc_input) self.matmul_a_input_quantizer = TensorQuantizer(quant_nn.QuantLinear.default_quant_desc_input) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, output_attentions=False): mixed_query_layer = self.query(hidden_states) is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_values is not None: key_layer = past_key_values[0] value_layer = past_key_values[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_values is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_values[0], key_layer], dim=2) value_layer = torch.cat([past_key_values[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) if self.is_decoder: past_key_values = (key_layer, value_layer) attention_scores = torch.matmul(self.matmul_q_input_quantizer(query_layer), self.matmul_k_input_quantizer(key_layer.transpose(-1, -2))) if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': seq_length = hidden_states.size()[1] position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) if self.position_embedding_type == 'relative_key': relative_position_scores = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == 'relative_key_query': relative_position_scores_query = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(self.matmul_a_input_quantizer(attention_probs), self.matmul_v_input_quantizer(value_layer)) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_values,) return outputs
class QDQBertSelfAttention(nn.Module): def __init__(self, config): pass def transpose_for_scores(self, x): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, output_attentions=False): pass
5
0
43
7
31
6
5
0.19
1
3
0
0
3
15
3
13
132
22
93
47
80
18
73
38
69
12
1
2
16
1,915
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/qdqbert/modeling_qdqbert.py
transformers.models.deprecated.qdqbert.modeling_qdqbert.QDQBertSelfOutput
from torch import nn class QDQBertSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = quant_nn.QuantLinear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.add_local_input_quantizer = TensorQuantizer(quant_nn.QuantLinear.default_quant_desc_input) self.add_residual_input_quantizer = TensorQuantizer(quant_nn.QuantLinear.default_quant_desc_input) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) add_local = self.add_local_input_quantizer(hidden_states) add_residual = self.add_residual_input_quantizer(input_tensor) hidden_states = self.LayerNorm(add_local + add_residual) return hidden_states
class QDQBertSelfOutput(nn.Module): def __init__(self, config): pass def forward(self, hidden_states, input_tensor): pass
3
0
10
1
7
2
1
0.2
1
1
0
0
2
5
2
12
21
3
15
10
12
3
15
10
12
1
1
0
2
1,916
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/realm/configuration_realm.py
transformers.models.deprecated.realm.configuration_realm.RealmConfig
from ....configuration_utils import PretrainedConfig class RealmConfig(PretrainedConfig): """ This is the configuration class to store the configuration of 1. [`RealmEmbedder`] 2. [`RealmScorer`] 3. [`RealmKnowledgeAugEncoder`] 4. [`RealmRetriever`] 5. [`RealmReader`] 6. [`RealmForOpenQA`] It is used to instantiate an REALM model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the REALM [google/realm-cc-news-pretrained-embedder](https://huggingface.co/google/realm-cc-news-pretrained-embedder) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the REALM model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`RealmEmbedder`], [`RealmScorer`], [`RealmKnowledgeAugEncoder`], or [`RealmReader`]. hidden_size (`int`, *optional*, defaults to 768): Dimension of the encoder layers and the pooler layer. retriever_proj_size (`int`, *optional*, defaults to 128): Dimension of the retriever(embedder) projection. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. num_candidates (`int`, *optional*, defaults to 8): Number of candidates inputted to the RealmScorer or RealmKnowledgeAugEncoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu_new"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`RealmEmbedder`], [`RealmScorer`], [`RealmKnowledgeAugEncoder`], or [`RealmReader`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. span_hidden_size (`int`, *optional*, defaults to 256): Dimension of the reader's spans. max_span_width (`int`, *optional*, defaults to 10): Max span width of the reader. reader_layer_norm_eps (`float`, *optional*, defaults to 1e-3): The epsilon used by the reader's layer normalization layers. reader_beam_size (`int`, *optional*, defaults to 5): Beam size of the reader. reader_seq_len (`int`, *optional*, defaults to 288+32): Maximum sequence length of the reader. num_block_records (`int`, *optional*, defaults to 13353718): Number of block records. searcher_beam_size (`int`, *optional*, defaults to 5000): Beam size of the searcher. Note that when eval mode is enabled, *searcher_beam_size* will be the same as *reader_beam_size*. Example: ```python >>> from transformers import RealmConfig, RealmEmbedder >>> # Initializing a REALM realm-cc-news-pretrained-* style configuration >>> configuration = RealmConfig() >>> # Initializing a model (with random weights) from the google/realm-cc-news-pretrained-embedder style configuration >>> model = RealmEmbedder(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'realm' def __init__(self, vocab_size=30522, hidden_size=768, retriever_proj_size=128, num_hidden_layers=12, num_attention_heads=12, num_candidates=8, intermediate_size=3072, hidden_act='gelu_new', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, span_hidden_size=256, max_span_width=10, reader_layer_norm_eps=0.001, reader_beam_size=5, reader_seq_len=320, num_block_records=13353718, searcher_beam_size=5000, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.retriever_proj_size = retriever_proj_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_candidates = num_candidates self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.type_vocab_size = type_vocab_size self.layer_norm_eps = layer_norm_eps self.span_hidden_size = span_hidden_size self.max_span_width = max_span_width self.reader_layer_norm_eps = reader_layer_norm_eps self.reader_beam_size = reader_beam_size self.reader_seq_len = reader_seq_len self.num_block_records = num_block_records self.searcher_beam_size = searcher_beam_size
class RealmConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of 1. [`RealmEmbedder`] 2. [`RealmScorer`] 3. [`RealmKnowledgeAugEncoder`] 4. [`RealmRetriever`] 5. [`RealmReader`] 6. [`RealmForOpenQA`] It is used to instantiate an REALM model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the REALM [google/realm-cc-news-pretrained-embedder](https://huggingface.co/google/realm-cc-news-pretrained-embedder) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the REALM model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`RealmEmbedder`], [`RealmScorer`], [`RealmKnowledgeAugEncoder`], or [`RealmReader`]. hidden_size (`int`, *optional*, defaults to 768): Dimension of the encoder layers and the pooler layer. retriever_proj_size (`int`, *optional*, defaults to 128): Dimension of the retriever(embedder) projection. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. num_candidates (`int`, *optional*, defaults to 8): Number of candidates inputted to the RealmScorer or RealmKnowledgeAugEncoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu_new"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`RealmEmbedder`], [`RealmScorer`], [`RealmKnowledgeAugEncoder`], or [`RealmReader`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. span_hidden_size (`int`, *optional*, defaults to 256): Dimension of the reader's spans. max_span_width (`int`, *optional*, defaults to 10): Max span width of the reader. reader_layer_norm_eps (`float`, *optional*, defaults to 1e-3): The epsilon used by the reader's layer normalization layers. reader_beam_size (`int`, *optional*, defaults to 5): Beam size of the reader. reader_seq_len (`int`, *optional*, defaults to 288+32): Maximum sequence length of the reader. num_block_records (`int`, *optional*, defaults to 13353718): Number of block records. searcher_beam_size (`int`, *optional*, defaults to 5000): Beam size of the searcher. Note that when eval mode is enabled, *searcher_beam_size* will be the same as *reader_beam_size*. Example: ```python >>> from transformers import RealmConfig, RealmEmbedder >>> # Initializing a REALM realm-cc-news-pretrained-* style configuration >>> configuration = RealmConfig() >>> # Initializing a model (with random weights) from the google/realm-cc-news-pretrained-embedder style configuration >>> model = RealmEmbedder(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, vocab_size=30522, hidden_size=768, retriever_proj_size=128, num_hidden_layers=12, num_attention_heads=12, num_candidates=8, intermediate_size=3072, hidden_act='gelu_new', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, span_hidden_size=256, max_span_width=10, reader_layer_norm_eps=0.001, reader_beam_size=5, reader_seq_len=320, num_block_records=13353718, searcher_beam_size=5000, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs): pass
2
1
56
3
50
4
1
1.48
1
1
0
0
1
21
1
33
143
15
52
51
23
77
25
24
23
1
2
0
1
1,917
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/realm/modeling_realm.py
transformers.models.deprecated.realm.modeling_realm.RealmAttention
from ....cache_utils import Cache from typing import Optional, Union from ....utils.deprecation import deprecate_kwarg from torch import nn import torch from ....pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer class RealmAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() self.self = REALM_SELF_ATTENTION_CLASSES[config._attn_implementation](config, position_embedding_type=position_embedding_type) self.output = RealmSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads) self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]: self_outputs = self.self(hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_values, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs
class RealmAttention(nn.Module): def __init__(self, config, position_embedding_type=None): pass def prune_heads(self, heads): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]: pass
5
0
15
1
14
1
1
0.07
1
5
1
0
3
3
3
13
49
4
43
20
30
3
22
11
18
2
1
1
4
1,918
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/realm/modeling_realm.py
transformers.models.deprecated.realm.modeling_realm.RealmBertModel
from ....modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, ModelOutput import torch class RealmBertModel(RealmPreTrainedModel): """ Same as the original BertModel but remove docstrings. """ def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = RealmEmbeddings(config) self.encoder = RealmEncoder(config) self.pooler = RealmPooler(config) if add_pooling_layer else None self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length + past_key_values_length), device=device) if token_type_ids is None: if hasattr(self.embeddings, 'token_type_ids'): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) if self.config.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length) encoder_outputs = self.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions)
class RealmBertModel(RealmPreTrainedModel): ''' Same as the original BertModel but remove docstrings. ''' def __init__(self, config, add_pooling_layer=True): pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass def _prune_heads(self, heads_to_prune): ''' Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel ''' pass def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None): pass
6
2
26
3
20
3
5
0.19
1
7
4
0
5
4
5
136
141
20
102
40
81
19
55
25
49
18
3
2
24
1,919
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/realm/modeling_realm.py
transformers.models.deprecated.realm.modeling_realm.RealmEmbedder
import torch from typing import Optional, Union from ....utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings @add_start_docstrings('The embedder of REALM outputting projected score that will be used to calculate relevance score.', REALM_START_DOCSTRING) class RealmEmbedder(RealmPreTrainedModel): _tied_weights_keys = ['cls.predictions.decoder.bias'] def __init__(self, config): super().__init__(config) self.realm = RealmBertModel(self.config) self.cls = RealmScorerProjection(self.config) self.post_init() def get_input_embeddings(self): return self.realm.embeddings.word_embeddings def set_input_embeddings(self, value): self.realm.embeddings.word_embeddings = value @add_start_docstrings_to_model_forward(REALM_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=RealmEmbedderOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, RealmEmbedderOutput]: """ Returns: Example: ```python >>> from transformers import AutoTokenizer, RealmEmbedder >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("google/realm-cc-news-pretrained-embedder") >>> model = RealmEmbedder.from_pretrained("google/realm-cc-news-pretrained-embedder") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> projected_score = outputs.projected_score ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict realm_outputs = self.realm(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooler_output = realm_outputs[1] projected_score = self.cls(pooler_output) if not return_dict: return (projected_score,) + realm_outputs[2:4] else: return RealmEmbedderOutput(projected_score=projected_score, hidden_states=realm_outputs.hidden_states, attentions=realm_outputs.attentions)
@add_start_docstrings('The embedder of REALM outputting projected score that will be used to calculate relevance score.', REALM_START_DOCSTRING) class RealmEmbedder(RealmPreTrainedModel): def __init__(self, config): pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass @add_start_docstrings_to_model_forward(REALM_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=RealmEmbedderOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, RealmEmbedderOutput]: ''' Returns: Example: ```python >>> from transformers import AutoTokenizer, RealmEmbedder >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("google/realm-cc-news-pretrained-embedder") >>> model = RealmEmbedder.from_pretrained("google/realm-cc-news-pretrained-embedder") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> projected_score = outputs.projected_score ``` ''' pass
8
1
17
3
11
4
2
0.32
1
5
3
0
4
2
4
135
76
14
47
23
29
15
19
11
14
3
3
1
6
1,920
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/realm/modeling_realm.py
transformers.models.deprecated.realm.modeling_realm.RealmEmbedderOutput
from dataclasses import dataclass import torch from typing import Optional, Union from ....modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, ModelOutput @dataclass class RealmEmbedderOutput(ModelOutput): """ Outputs of [`RealmEmbedder`] models. Args: projected_score (`torch.FloatTensor` of shape `(batch_size, config.retriever_proj_size)`): Projected score. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ projected_score: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None
@dataclass class RealmEmbedderOutput(ModelOutput): ''' Outputs of [`RealmEmbedder`] models. Args: projected_score (`torch.FloatTensor` of shape `(batch_size, config.retriever_proj_size)`): Projected score. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. ''' pass
2
1
0
0
0
0
0
3.75
1
0
0
0
0
0
0
0
24
5
4
4
3
15
4
4
3
0
1
0
0
1,921
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/realm/modeling_realm.py
transformers.models.deprecated.realm.modeling_realm.RealmEmbeddings
from typing import Optional, Union import torch from torch import nn class RealmEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False) self.register_buffer('token_type_ids', torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False) def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, past_key_values_length: int=0) -> torch.Tensor: if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length:seq_length + past_key_values_length] if token_type_ids is None: if hasattr(self, 'token_type_ids'): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == 'absolute': position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings
class RealmEmbeddings(nn.Module): '''Construct the embeddings from word, position and token_type embeddings.''' def __init__(self, config): pass def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, past_key_values_length: int=0) -> torch.Tensor: pass
3
1
29
3
23
3
4
0.15
1
3
0
0
2
6
2
12
62
8
47
23
37
7
34
16
31
7
1
2
8
1,922
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/realm/modeling_realm.py
transformers.models.deprecated.realm.modeling_realm.RealmEncoder
from ....cache_utils import Cache from typing import Optional, Union from torch import nn from ....modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, ModelOutput import torch class RealmEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([RealmLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_values[i] if past_key_values is not None else None, output_attentions) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions)
class RealmEncoder(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: pass
3
0
45
4
41
0
9
0
1
8
2
0
2
3
2
12
91
8
83
26
68
0
35
14
32
17
1
3
18
1,923
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/realm/modeling_realm.py
transformers.models.deprecated.realm.modeling_realm.RealmForOpenQA
import torch from typing import Optional, Union from ....utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings @add_start_docstrings('`RealmForOpenQA` for end-to-end open domain question answering.', REALM_START_DOCSTRING) class RealmForOpenQA(RealmPreTrainedModel): def __init__(self, config, retriever=None): super().__init__(config) self.embedder = RealmEmbedder(config) self.reader = RealmReader(config) self.register_buffer('block_emb', torch.zeros(()).new_empty(size=(config.num_block_records, config.retriever_proj_size), dtype=torch.float32, device=torch.device('cpu'))) self.retriever = retriever self.post_init() @property def searcher_beam_size(self): if self.training: return self.config.searcher_beam_size return self.config.reader_beam_size def block_embedding_to(self, device): """Send `self.block_emb` to a specific device. Args: device (`str` or `torch.device`): The device to which `self.block_emb` will be sent. """ self.block_emb = self.block_emb.to(device) @add_start_docstrings_to_model_forward(REALM_FOR_OPEN_QA_DOCSTRING.format('1, sequence_length')) @replace_return_docstrings(output_type=RealmForOpenQAOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor], attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, answer_ids: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None) -> Union[tuple, RealmForOpenQAOutput]: """ Returns: Example: ```python >>> import torch >>> from transformers import RealmForOpenQA, RealmRetriever, AutoTokenizer >>> retriever = RealmRetriever.from_pretrained("google/realm-orqa-nq-openqa") >>> tokenizer = AutoTokenizer.from_pretrained("google/realm-orqa-nq-openqa") >>> model = RealmForOpenQA.from_pretrained("google/realm-orqa-nq-openqa", retriever=retriever) >>> question = "Who is the pioneer in modern computer science?" >>> question_ids = tokenizer([question], return_tensors="pt") >>> answer_ids = tokenizer( ... ["alan mathison turing"], ... add_special_tokens=False, ... return_token_type_ids=False, ... return_attention_mask=False, ... ).input_ids >>> reader_output, predicted_answer_ids = model(**question_ids, answer_ids=answer_ids, return_dict=False) >>> predicted_answer = tokenizer.decode(predicted_answer_ids) >>> loss = reader_output.loss ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and input_ids.shape[0] != 1: raise ValueError('The batch_size of the inputs must be 1.') question_outputs = self.embedder(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask, return_dict=True) question_projection = question_outputs[0] batch_scores = torch.einsum('BD,QD->QB', self.block_emb, question_projection.to(self.block_emb.device)) _, retrieved_block_ids = torch.topk(batch_scores, k=self.searcher_beam_size, dim=-1) retrieved_block_ids = retrieved_block_ids.squeeze() retrieved_block_emb = torch.index_select(self.block_emb, dim=0, index=retrieved_block_ids) has_answers, start_pos, end_pos, concat_inputs = self.retriever(retrieved_block_ids.cpu(), input_ids, answer_ids, max_length=self.config.reader_seq_len) concat_inputs = concat_inputs.to(self.reader.device) block_mask = concat_inputs.special_tokens_mask.type(torch.bool).to(device=self.reader.device) block_mask.logical_not_().logical_and_(concat_inputs.token_type_ids.type(torch.bool)) if has_answers is not None: has_answers = torch.tensor(has_answers, dtype=torch.bool, device=self.reader.device) start_pos = torch.tensor(start_pos, dtype=torch.long, device=self.reader.device) end_pos = torch.tensor(end_pos, dtype=torch.long, device=self.reader.device) retrieved_logits = torch.einsum('D,BD->B', question_projection.squeeze(), retrieved_block_emb.to(self.reader.device)) reader_output = self.reader(input_ids=concat_inputs.input_ids[0:self.config.reader_beam_size], attention_mask=concat_inputs.attention_mask[0:self.config.reader_beam_size], token_type_ids=concat_inputs.token_type_ids[0:self.config.reader_beam_size], relevance_score=retrieved_logits, block_mask=block_mask, has_answers=has_answers, start_positions=start_pos, end_positions=end_pos, return_dict=True) predicted_block = concat_inputs.input_ids[reader_output.block_idx] predicted_answer_ids = predicted_block[reader_output.start_pos:reader_output.end_pos + 1] if not return_dict: return (reader_output, predicted_answer_ids) return RealmForOpenQAOutput(reader_output=reader_output, predicted_answer_ids=predicted_answer_ids)
@add_start_docstrings('`RealmForOpenQA` for end-to-end open domain question answering.', REALM_START_DOCSTRING) class RealmForOpenQA(RealmPreTrainedModel): def __init__(self, config, retriever=None): pass @property def searcher_beam_size(self): pass def block_embedding_to(self, device): '''Send `self.block_emb` to a specific device. Args: device (`str` or `torch.device`): The device to which `self.block_emb` will be sent. ''' pass @add_start_docstrings_to_model_forward(REALM_FOR_OPEN_QA_DOCSTRING.format('1, sequence_length')) @replace_return_docstrings(output_type=RealmForOpenQAOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor], attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, answer_ids: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None) -> Union[tuple, RealmForOpenQAOutput]: ''' Returns: Example: ```python >>> import torch >>> from transformers import RealmForOpenQA, RealmRetriever, AutoTokenizer >>> retriever = RealmRetriever.from_pretrained("google/realm-orqa-nq-openqa") >>> tokenizer = AutoTokenizer.from_pretrained("google/realm-orqa-nq-openqa") >>> model = RealmForOpenQA.from_pretrained("google/realm-orqa-nq-openqa", retriever=retriever) >>> question = "Who is the pioneer in modern computer science?" >>> question_ids = tokenizer([question], return_tensors="pt") >>> answer_ids = tokenizer( ... ["alan mathison turing"], ... add_special_tokens=False, ... return_token_type_ids=False, ... return_attention_mask=False, ... ).input_ids >>> reader_output, predicted_answer_ids = model(**question_ids, answer_ids=answer_ids, return_dict=False) >>> predicted_answer = tokenizer.decode(predicted_answer_ids) >>> loss = reader_output.loss ```''' pass
9
2
32
5
18
9
2
0.47
1
6
3
0
4
4
4
135
133
23
75
29
60
35
39
20
34
5
3
1
9
1,924
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/realm/modeling_realm.py
transformers.models.deprecated.realm.modeling_realm.RealmForOpenQAOutput
from ....modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, ModelOutput from dataclasses import dataclass import torch from typing import Optional, Union @dataclass class RealmForOpenQAOutput(ModelOutput): """ Outputs of [`RealmForOpenQA`] models. Args: reader_output (`dict`): Reader output. predicted_answer_ids (`torch.LongTensor` of shape `(answer_sequence_length)`): Predicted answer ids. """ reader_output: Optional[dict] = None predicted_answer_ids: Optional[torch.LongTensor] = None
@dataclass class RealmForOpenQAOutput(ModelOutput): ''' Outputs of [`RealmForOpenQA`] models. Args: reader_output (`dict`): Reader output. predicted_answer_ids (`torch.LongTensor` of shape `(answer_sequence_length)`): Predicted answer ids. ''' pass
2
1
0
0
0
0
0
2.67
1
0
0
0
0
0
0
0
14
3
3
3
2
8
3
3
2
0
1
0
0
1,925
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/realm/modeling_realm.py
transformers.models.deprecated.realm.modeling_realm.RealmIntermediate
import torch from torch import nn from ....activations import ACT2FN class RealmIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states
class RealmIntermediate(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass
3
0
6
0
6
0
2
0
1
3
0
0
2
2
2
12
13
1
12
5
9
0
11
5
8
2
1
1
3
1,926
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/realm/modeling_realm.py
transformers.models.deprecated.realm.modeling_realm.RealmKnowledgeAugEncoder
from ....utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from ....modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, ModelOutput import torch from typing import Optional, Union from torch.nn import CrossEntropyLoss @add_start_docstrings('The knowledge-augmented encoder of REALM outputting masked language model logits and marginal log-likelihood loss.', REALM_START_DOCSTRING) class RealmKnowledgeAugEncoder(RealmPreTrainedModel): _tied_weights_keys = ['cls.predictions.decoder'] def __init__(self, config): super().__init__(config) self.realm = RealmBertModel(self.config) self.cls = RealmOnlyMLMHead(self.config) self.post_init() def get_input_embeddings(self): return self.realm.embeddings.word_embeddings def set_input_embeddings(self, value): self.realm.embeddings.word_embeddings = value def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias @add_start_docstrings_to_model_forward(REALM_INPUTS_DOCSTRING.format('batch_size, num_candidates, sequence_length')) @replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, relevance_score: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, mlm_mask: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, MaskedLMOutput]: """ relevance_score (`torch.FloatTensor` of shape `(batch_size, num_candidates)`, *optional*): Relevance score derived from RealmScorer, must be specified if you want to compute the masked language modeling loss. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` mlm_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid calculating joint loss on certain positions. If not specified, the loss will not be masked. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. Returns: Example: ```python >>> import torch >>> from transformers import AutoTokenizer, RealmKnowledgeAugEncoder >>> tokenizer = AutoTokenizer.from_pretrained("google/realm-cc-news-pretrained-encoder") >>> model = RealmKnowledgeAugEncoder.from_pretrained( ... "google/realm-cc-news-pretrained-encoder", num_candidates=2 ... ) >>> # batch_size = 2, num_candidates = 2 >>> text = [["Hello world!", "Nice to meet you!"], ["The cute cat.", "The adorable dog."]] >>> inputs = tokenizer.batch_encode_candidates(text, max_length=10, return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None and relevance_score is None: raise ValueError('You have to specify `relevance_score` when `labels` is specified in order to compute loss.') flattened_input_ids, flattened_attention_mask, flattened_token_type_ids = self._flatten_inputs(input_ids, attention_mask, token_type_ids) joint_outputs = self.realm(flattened_input_ids, attention_mask=flattened_attention_mask, token_type_ids=flattened_token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) joint_output = joint_outputs[0] prediction_scores = self.cls(joint_output) candidate_score = relevance_score masked_lm_loss = None if labels is not None: batch_size, seq_length = labels.size() if mlm_mask is None: mlm_mask = torch.ones_like(labels, dtype=torch.float32) else: mlm_mask = mlm_mask.type(torch.float32) loss_fct = CrossEntropyLoss(reduction='none') mlm_logits = prediction_scores.view(-1, self.config.vocab_size) mlm_targets = labels.tile(1, self.config.num_candidates).view(-1) masked_lm_log_prob = -loss_fct(mlm_logits, mlm_targets).view(batch_size, self.config.num_candidates, seq_length) candidate_log_prob = candidate_score.log_softmax(-1).unsqueeze(-1) joint_gold_log_prob = candidate_log_prob + masked_lm_log_prob marginal_gold_log_probs = joint_gold_log_prob.logsumexp(1) masked_lm_loss = -torch.nansum(torch.sum(marginal_gold_log_probs * mlm_mask) / torch.sum(mlm_mask)) if not return_dict: output = (prediction_scores,) + joint_outputs[2:4] return (masked_lm_loss,) + output if masked_lm_loss is not None else output return MaskedLMOutput(loss=masked_lm_loss, logits=prediction_scores, hidden_states=joint_outputs.hidden_states, attentions=joint_outputs.attentions)
@add_start_docstrings('The knowledge-augmented encoder of REALM outputting masked language model logits and marginal log-likelihood loss.', REALM_START_DOCSTRING) class RealmKnowledgeAugEncoder(RealmPreTrainedModel): def __init__(self, config): pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass def get_output_embeddings(self): pass def set_output_embeddings(self, new_embeddings): pass @add_start_docstrings_to_model_forward(REALM_INPUTS_DOCSTRING.format('batch_size, num_candidates, sequence_length')) @replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, relevance_score: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, mlm_mask: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, MaskedLMOutput]: ''' relevance_score (`torch.FloatTensor` of shape `(batch_size, num_candidates)`, *optional*): Relevance score derived from RealmScorer, must be specified if you want to compute the masked language modeling loss. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` mlm_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid calculating joint loss on certain positions. If not specified, the loss will not be masked. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. Returns: Example: ```python >>> import torch >>> from transformers import AutoTokenizer, RealmKnowledgeAugEncoder >>> tokenizer = AutoTokenizer.from_pretrained("google/realm-cc-news-pretrained-encoder") >>> model = RealmKnowledgeAugEncoder.from_pretrained( ... "google/realm-cc-news-pretrained-encoder", num_candidates=2 ... ) >>> # batch_size = 2, num_candidates = 2 >>> text = [["Hello world!", "Nice to meet you!"], ["The cute cat.", "The adorable dog."]] >>> inputs = tokenizer.batch_encode_candidates(text, max_length=10, return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits ```''' pass
10
1
23
3
13
7
2
0.48
1
6
3
0
6
2
6
137
147
25
83
42
58
40
43
25
36
7
3
2
12
1,927
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/realm/modeling_realm.py
transformers.models.deprecated.realm.modeling_realm.RealmLMPredictionHead
import torch from torch import nn class RealmLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = RealmPredictionHeadTransform(config) self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) self.decoder.bias = self.bias def _tie_weights(self): self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states
class RealmLMPredictionHead(nn.Module): def __init__(self, config): pass def _tie_weights(self): pass def forward(self, hidden_states): pass
4
0
6
1
4
1
1
0.23
1
2
1
0
3
3
3
13
21
5
13
7
9
3
13
7
9
1
1
0
3
1,928
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/realm/modeling_realm.py
transformers.models.deprecated.realm.modeling_realm.RealmLayer
from typing import Optional, Union from ....cache_utils import Cache from ....pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ....modeling_layers import GradientCheckpointingLayer import torch from ....utils.deprecation import deprecate_kwarg class RealmLayer(GradientCheckpointingLayer): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = RealmAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f'{self} should be used as a decoder model if cross attention is added') self.crossattention = RealmAttention(config, position_embedding_type='absolute') self.intermediate = RealmIntermediate(config) self.output = RealmOutput(config) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]: self_attn_past_key_value = past_key_values[:2] if past_key_values is not None else None self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_values=self_attn_past_key_value) attention_output = self_attention_outputs[0] if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, 'crossattention'): raise ValueError(f'If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`') cross_attn_past_key_value = past_key_values[-2:] if past_key_values is not None else None cross_attention_outputs = self.crossattention(attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output) outputs = (layer_output,) + outputs if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output
class RealmLayer(GradientCheckpointingLayer): def __init__(self, config): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]: pass def feed_forward_chunk(self, attention_output): pass
5
0
27
2
23
2
4
0.1
1
7
3
0
3
8
3
13
84
9
70
32
57
7
41
23
37
7
1
2
11
1,929
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/realm/modeling_realm.py
transformers.models.deprecated.realm.modeling_realm.RealmOnlyMLMHead
from torch import nn class RealmOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = RealmLMPredictionHead(config) def forward(self, sequence_output): prediction_scores = self.predictions(sequence_output) return prediction_scores
class RealmOnlyMLMHead(nn.Module): def __init__(self, config): pass def forward(self, sequence_output): pass
3
0
3
0
3
0
1
0
1
2
1
0
2
1
2
12
8
1
7
5
4
0
7
5
4
1
1
0
2
1,930
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/realm/modeling_realm.py
transformers.models.deprecated.realm.modeling_realm.RealmOutput
import torch from torch import nn class RealmOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states
class RealmOutput(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: pass
3
0
5
0
5
0
1
0
1
2
0
0
2
3
2
12
12
1
11
6
8
0
11
6
8
1
1
0
2
1,931
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/realm/modeling_realm.py
transformers.models.deprecated.realm.modeling_realm.RealmPooler
import torch from torch import nn class RealmPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output
class RealmPooler(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass
3
0
6
0
5
1
1
0.2
1
2
0
0
2
2
2
12
13
1
10
7
7
2
10
7
7
1
1
0
2
1,932
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/realm/modeling_realm.py
transformers.models.deprecated.realm.modeling_realm.RealmPreTrainedModel
from torch import nn from ....modeling_utils import PreTrainedModel from .configuration_realm import RealmConfig class RealmPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config: RealmConfig base_model_prefix = 'realm' def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) def _flatten_inputs(self, *inputs): """Flatten inputs' shape to (-1, input_shape[-1])""" flattened_inputs = [] for tensor in inputs: if tensor is None: flattened_inputs.append(None) else: input_shape = tensor.shape if len(input_shape) > 2: tensor = tensor.view((-1, input_shape[-1])) flattened_inputs.append(tensor) return flattened_inputs
class RealmPreTrainedModel(PreTrainedModel): ''' An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. ''' def _init_weights(self, module): '''Initialize the weights''' pass def _flatten_inputs(self, *inputs): '''Flatten inputs' shape to (-1, input_shape[-1])''' pass
3
3
14
0
12
2
5
0.3
1
0
0
6
2
0
2
131
38
3
27
9
24
8
24
9
21
6
2
3
10
1,933
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/realm/modeling_realm.py
transformers.models.deprecated.realm.modeling_realm.RealmPredictionHeadTransform
from ....activations import ACT2FN from torch import nn class RealmPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states
class RealmPredictionHeadTransform(nn.Module): def __init__(self, config): pass def forward(self, hidden_states): pass
3
0
7
0
7
0
2
0
1
2
0
0
2
3
2
12
15
1
14
6
11
0
13
6
10
2
1
1
3
1,934
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/realm/modeling_realm.py
transformers.models.deprecated.realm.modeling_realm.RealmReader
from ....utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings import torch from typing import Optional, Union @add_start_docstrings('The reader of REALM.', REALM_START_DOCSTRING) class RealmReader(RealmPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.realm = RealmBertModel(config) self.cls = RealmOnlyMLMHead(config) self.qa_outputs = RealmReaderProjection(config) self.post_init() @add_start_docstrings_to_model_forward(REALM_INPUTS_DOCSTRING.format('reader_beam_size, sequence_length')) @replace_return_docstrings(output_type=RealmReaderOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, relevance_score: Optional[torch.FloatTensor]=None, block_mask: Optional[torch.BoolTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, has_answers: Optional[torch.BoolTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, RealmReaderOutput]: """ relevance_score (`torch.FloatTensor` of shape `(searcher_beam_size,)`, *optional*): Relevance score, which must be specified if you want to compute the logits and marginal log loss. block_mask (`torch.BoolTensor` of shape `(searcher_beam_size, sequence_length)`, *optional*): The mask of the evidence block, which must be specified if you want to compute the logits and marginal log loss. start_positions (`torch.LongTensor` of shape `(searcher_beam_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(searcher_beam_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. has_answers (`torch.BoolTensor` of shape `(searcher_beam_size,)`, *optional*): Whether or not the evidence block has answer(s). Returns: """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if relevance_score is None: raise ValueError('You have to specify `relevance_score` to calculate logits and loss.') if block_mask is None: raise ValueError('You have to specify `block_mask` to separate question block and evidence block.') if token_type_ids.size(1) < self.config.max_span_width: raise ValueError('The input sequence length must be greater than or equal to config.max_span_width.') outputs = self.realm(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] reader_logits, candidate_starts, candidate_ends = self.qa_outputs(sequence_output, block_mask[0:self.config.reader_beam_size]) retriever_logits = torch.unsqueeze(relevance_score[0:self.config.reader_beam_size], -1) reader_logits += retriever_logits predicted_block_index = torch.argmax(torch.max(reader_logits, dim=1).values) predicted_candidate = torch.argmax(torch.max(reader_logits, dim=0).values) predicted_start = torch.index_select(candidate_starts, dim=0, index=predicted_candidate) predicted_end = torch.index_select(candidate_ends, dim=0, index=predicted_candidate) total_loss = None retriever_loss = None reader_loss = None retriever_correct = None reader_correct = None if start_positions is not None and end_positions is not None and (has_answers is not None): def compute_correct_candidates(candidate_starts, candidate_ends, gold_starts, gold_ends): """Compute correct span.""" is_gold_start = torch.eq(torch.unsqueeze(torch.unsqueeze(candidate_starts, 0), 0), torch.unsqueeze(gold_starts, -1)) is_gold_end = torch.eq(torch.unsqueeze(torch.unsqueeze(candidate_ends, 0), 0), torch.unsqueeze(gold_ends, -1)) return torch.any(torch.logical_and(is_gold_start, is_gold_end), 1) def marginal_log_loss(logits, is_correct): """Loss based on the negative marginal log-likelihood.""" def mask_to_score(mask, dtype=torch.float32): return (1.0 - mask.type(dtype)) * torch.finfo(dtype).min log_numerator = torch.logsumexp(logits + mask_to_score(is_correct, dtype=logits.dtype), dim=-1) log_denominator = torch.logsumexp(logits, dim=-1) return log_denominator - log_numerator ignored_index = sequence_output.size(1) start_positions = start_positions.clamp(-1, ignored_index) end_positions = end_positions.clamp(-1, ignored_index) retriever_correct = has_answers any_retriever_correct = torch.any(retriever_correct) reader_correct = compute_correct_candidates(candidate_starts=candidate_starts, candidate_ends=candidate_ends, gold_starts=start_positions[0:self.config.reader_beam_size], gold_ends=end_positions[0:self.config.reader_beam_size]) any_reader_correct = torch.any(reader_correct) retriever_loss = marginal_log_loss(relevance_score, retriever_correct) reader_loss = marginal_log_loss(reader_logits.view(-1), reader_correct.view(-1)) retriever_loss *= any_retriever_correct.type(torch.float32) reader_loss *= any_reader_correct.type(torch.float32) total_loss = (retriever_loss + reader_loss).mean() if not return_dict: output = (predicted_block_index, predicted_candidate, predicted_start, predicted_end) + outputs[2:] return (total_loss, retriever_loss, reader_loss, retriever_correct, reader_correct) + output if total_loss is not None else output return RealmReaderOutput(loss=total_loss, retriever_loss=retriever_loss, reader_loss=reader_loss, retriever_correct=retriever_correct, reader_correct=reader_correct, block_idx=predicted_block_index, candidate=predicted_candidate, start_pos=predicted_start, end_pos=predicted_end, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@add_start_docstrings('The reader of REALM.', REALM_START_DOCSTRING) class RealmReader(RealmPreTrainedModel): def __init__(self, config): pass @add_start_docstrings_to_model_forward(REALM_INPUTS_DOCSTRING.format('reader_beam_size, sequence_length')) @replace_return_docstrings(output_type=RealmReaderOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, relevance_score: Optional[torch.FloatTensor]=None, block_mask: Optional[torch.BoolTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, has_answers: Optional[torch.BoolTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, RealmReaderOutput]: ''' relevance_score (`torch.FloatTensor` of shape `(searcher_beam_size,)`, *optional*): Relevance score, which must be specified if you want to compute the logits and marginal log loss. block_mask (`torch.BoolTensor` of shape `(searcher_beam_size, sequence_length)`, *optional*): The mask of the evidence block, which must be specified if you want to compute the logits and marginal log loss. start_positions (`torch.LongTensor` of shape `(searcher_beam_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(searcher_beam_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. has_answers (`torch.BoolTensor` of shape `(searcher_beam_size,)`, *optional*): Whether or not the evidence block has answer(s). Returns: ''' pass def compute_correct_candidates(candidate_starts, candidate_ends, gold_starts, gold_ends): '''Compute correct span.''' pass def marginal_log_loss(logits, is_correct): '''Loss based on the negative marginal log-likelihood.''' pass def mask_to_score(mask, dtype=torch.float32): pass
9
3
37
4
25
8
2
0.29
1
7
4
0
2
4
2
133
165
20
112
48
88
33
57
31
51
8
3
1
12
1,935
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/realm/modeling_realm.py
transformers.models.deprecated.realm.modeling_realm.RealmReaderOutput
import torch from typing import Optional, Union from ....modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, ModelOutput from dataclasses import dataclass @dataclass class RealmReaderOutput(ModelOutput): """ Outputs of [`RealmReader`] models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `start_positions`, `end_positions`, `has_answers` are provided): Total loss. retriever_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `start_positions`, `end_positions`, `has_answers` are provided): Retriever loss. reader_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `start_positions`, `end_positions`, `has_answers` are provided): Reader loss. retriever_correct (`torch.BoolTensor` of shape `(config.searcher_beam_size,)`, *optional*): Whether or not an evidence block contains answer. reader_correct (`torch.BoolTensor` of shape `(config.reader_beam_size, num_candidates)`, *optional*): Whether or not a span candidate contains answer. block_idx (`torch.LongTensor` of shape `()`): The index of the retrieved evidence block in which the predicted answer is most likely. candidate (`torch.LongTensor` of shape `()`): The index of the retrieved span candidates in which the predicted answer is most likely. start_pos (`torch.IntTensor` of shape `()`): Predicted answer starting position in *RealmReader*'s inputs. end_pos (`torch.IntTensor` of shape `()`): Predicted answer ending position in *RealmReader*'s inputs. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None retriever_loss: Optional[torch.FloatTensor] = None reader_loss: Optional[torch.FloatTensor] = None retriever_correct: Optional[torch.BoolTensor] = None reader_correct: Optional[torch.BoolTensor] = None block_idx: Optional[torch.LongTensor] = None candidate: Optional[torch.LongTensor] = None start_pos: Optional[torch.IntTensor] = None end_pos: Optional[torch.IntTensor] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None
@dataclass class RealmReaderOutput(ModelOutput): ''' Outputs of [`RealmReader`] models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `start_positions`, `end_positions`, `has_answers` are provided): Total loss. retriever_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `start_positions`, `end_positions`, `has_answers` are provided): Retriever loss. reader_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `start_positions`, `end_positions`, `has_answers` are provided): Reader loss. retriever_correct (`torch.BoolTensor` of shape `(config.searcher_beam_size,)`, *optional*): Whether or not an evidence block contains answer. reader_correct (`torch.BoolTensor` of shape `(config.reader_beam_size, num_candidates)`, *optional*): Whether or not a span candidate contains answer. block_idx (`torch.LongTensor` of shape `()`): The index of the retrieved evidence block in which the predicted answer is most likely. candidate (`torch.LongTensor` of shape `()`): The index of the retrieved span candidates in which the predicted answer is most likely. start_pos (`torch.IntTensor` of shape `()`): Predicted answer starting position in *RealmReader*'s inputs. end_pos (`torch.IntTensor` of shape `()`): Predicted answer ending position in *RealmReader*'s inputs. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. ''' pass
2
1
0
0
0
0
0
2.58
1
0
0
0
0
0
0
0
47
4
12
12
11
31
12
12
11
0
1
0
0
1,936
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/realm/modeling_realm.py
transformers.models.deprecated.realm.modeling_realm.RealmReaderProjection
import torch from torch import nn class RealmReaderProjection(nn.Module): def __init__(self, config): super().__init__() self.config = config self.dense_intermediate = nn.Linear(config.hidden_size, config.span_hidden_size * 2) self.dense_output = nn.Linear(config.span_hidden_size, 1) self.layer_normalization = nn.LayerNorm(config.span_hidden_size, eps=config.reader_layer_norm_eps) self.relu = nn.ReLU() def forward(self, hidden_states, block_mask): def span_candidates(masks): """ Generate span candidates. Args: masks: <bool> [num_retrievals, max_sequence_len] Returns: starts: <int32> [num_spans] ends: <int32> [num_spans] span_masks: <int32> [num_retrievals, num_spans] whether spans locate in evidence block. """ _, max_sequence_len = masks.shape def _spans_given_width(width): current_starts = torch.arange(max_sequence_len - width + 1, device=masks.device) current_ends = torch.arange(width - 1, max_sequence_len, device=masks.device) return (current_starts, current_ends) starts, ends = zip(*(_spans_given_width(w + 1) for w in range(self.config.max_span_width))) starts = torch.cat(starts, 0) ends = torch.cat(ends, 0) start_masks = torch.index_select(masks, dim=-1, index=starts) end_masks = torch.index_select(masks, dim=-1, index=ends) span_masks = start_masks * end_masks return (starts, ends, span_masks) def mask_to_score(mask, dtype=torch.float32): return (1.0 - mask.type(dtype)) * torch.finfo(dtype).min hidden_states = self.dense_intermediate(hidden_states) start_projection, end_projection = hidden_states.chunk(2, dim=-1) candidate_starts, candidate_ends, candidate_mask = span_candidates(block_mask) candidate_start_projections = torch.index_select(start_projection, dim=1, index=candidate_starts) candidate_end_projections = torch.index_select(end_projection, dim=1, index=candidate_ends) candidate_hidden = candidate_start_projections + candidate_end_projections candidate_hidden = self.relu(candidate_hidden) candidate_hidden = self.layer_normalization(candidate_hidden) reader_logits = self.dense_output(candidate_hidden).squeeze(-1) reader_logits += mask_to_score(candidate_mask, dtype=reader_logits.dtype) return (reader_logits, candidate_starts, candidate_ends)
class RealmReaderProjection(nn.Module): def __init__(self, config): pass def forward(self, hidden_states, block_mask): pass def span_candidates(masks): ''' Generate span candidates. Args: masks: <bool> [num_retrievals, max_sequence_len] Returns: starts: <int32> [num_spans] ends: <int32> [num_spans] span_masks: <int32> [num_retrievals, num_spans] whether spans locate in evidence block. ''' pass def _spans_given_width(width): pass def mask_to_score(mask, dtype=torch.float32): pass
6
1
20
4
11
5
1
0.46
1
3
0
0
2
5
2
12
65
14
35
24
29
16
35
24
29
1
1
0
5
1,937
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/realm/modeling_realm.py
transformers.models.deprecated.realm.modeling_realm.RealmScorer
from ....utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings import torch from typing import Optional, Union @add_start_docstrings('The scorer of REALM outputting relevance scores representing the score of document candidates (before softmax).', REALM_START_DOCSTRING) class RealmScorer(RealmPreTrainedModel): """ Args: query_embedder ([`RealmEmbedder`]): Embedder for input sequences. If not specified, it will use the same embedder as candidate sequences. """ def __init__(self, config, query_embedder=None): super().__init__(config) self.embedder = RealmEmbedder(self.config) self.query_embedder = query_embedder if query_embedder is not None else self.embedder self.post_init() @add_start_docstrings_to_model_forward(REALM_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=RealmScorerOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, candidate_input_ids: Optional[torch.LongTensor]=None, candidate_attention_mask: Optional[torch.FloatTensor]=None, candidate_token_type_ids: Optional[torch.LongTensor]=None, candidate_inputs_embeds: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, RealmScorerOutput]: """ candidate_input_ids (`torch.LongTensor` of shape `(batch_size, num_candidates, sequence_length)`): Indices of candidate input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) candidate_attention_mask (`torch.FloatTensor` of shape `(batch_size, num_candidates, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) candidate_token_type_ids (`torch.LongTensor` of shape `(batch_size, num_candidates, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) candidate_inputs_embeds (`torch.FloatTensor` of shape `(batch_size * num_candidates, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `candidate_input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *candidate_input_ids* indices into associated vectors than the model's internal embedding lookup matrix. Returns: Example: ```python >>> import torch >>> from transformers import AutoTokenizer, RealmScorer >>> tokenizer = AutoTokenizer.from_pretrained("google/realm-cc-news-pretrained-scorer") >>> model = RealmScorer.from_pretrained("google/realm-cc-news-pretrained-scorer", num_candidates=2) >>> # batch_size = 2, num_candidates = 2 >>> input_texts = ["How are you?", "What is the item in the picture?"] >>> candidates_texts = [["Hello world!", "Nice to meet you!"], ["A cute cat.", "An adorable dog."]] >>> inputs = tokenizer(input_texts, return_tensors="pt") >>> candidates_inputs = tokenizer.batch_encode_candidates(candidates_texts, max_length=10, return_tensors="pt") >>> outputs = model( ... **inputs, ... candidate_input_ids=candidates_inputs.input_ids, ... candidate_attention_mask=candidates_inputs.attention_mask, ... candidate_token_type_ids=candidates_inputs.token_type_ids, ... ) >>> relevance_score = outputs.relevance_score ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is None and inputs_embeds is None: raise ValueError('You have to specify either input_ids or input_embeds.') if candidate_input_ids is None and candidate_inputs_embeds is None: raise ValueError('You have to specify either candidate_input_ids or candidate_inputs_embeds.') query_outputs = self.query_embedder(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) flattened_input_ids, flattened_attention_mask, flattened_token_type_ids = self._flatten_inputs(candidate_input_ids, candidate_attention_mask, candidate_token_type_ids) candidate_outputs = self.embedder(flattened_input_ids, attention_mask=flattened_attention_mask, token_type_ids=flattened_token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=candidate_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) query_score = query_outputs[0] candidate_score = candidate_outputs[0] candidate_score = candidate_score.view(-1, self.config.num_candidates, self.config.retriever_proj_size) relevance_score = torch.einsum('bd,bnd->bn', query_score, candidate_score) if not return_dict: return (relevance_score, query_score, candidate_score) return RealmScorerOutput(relevance_score=relevance_score, query_score=query_score, candidate_score=candidate_score)
@add_start_docstrings('The scorer of REALM outputting relevance scores representing the score of document candidates (before softmax).', REALM_START_DOCSTRING) class RealmScorer(RealmPreTrainedModel): ''' Args: query_embedder ([`RealmEmbedder`]): Embedder for input sequences. If not specified, it will use the same embedder as candidate sequences. ''' def __init__(self, config, query_embedder=None): pass @add_start_docstrings_to_model_forward(REALM_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=RealmScorerOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, candidate_input_ids: Optional[torch.LongTensor]=None, candidate_attention_mask: Optional[torch.FloatTensor]=None, candidate_token_type_ids: Optional[torch.LongTensor]=None, candidate_inputs_embeds: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, RealmScorerOutput]: ''' candidate_input_ids (`torch.LongTensor` of shape `(batch_size, num_candidates, sequence_length)`): Indices of candidate input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) candidate_attention_mask (`torch.FloatTensor` of shape `(batch_size, num_candidates, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) candidate_token_type_ids (`torch.LongTensor` of shape `(batch_size, num_candidates, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) candidate_inputs_embeds (`torch.FloatTensor` of shape `(batch_size * num_candidates, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `candidate_input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *candidate_input_ids* indices into associated vectors than the model's internal embedding lookup matrix. Returns: Example: ```python >>> import torch >>> from transformers import AutoTokenizer, RealmScorer >>> tokenizer = AutoTokenizer.from_pretrained("google/realm-cc-news-pretrained-scorer") >>> model = RealmScorer.from_pretrained("google/realm-cc-news-pretrained-scorer", num_candidates=2) >>> # batch_size = 2, num_candidates = 2 >>> input_texts = ["How are you?", "What is the item in the picture?"] >>> candidates_texts = [["Hello world!", "Nice to meet you!"], ["A cute cat.", "An adorable dog."]] >>> inputs = tokenizer(input_texts, return_tensors="pt") >>> candidates_inputs = tokenizer.batch_encode_candidates(candidates_texts, max_length=10, return_tensors="pt") >>> outputs = model( ... **inputs, ... candidate_input_ids=candidates_inputs.input_ids, ... candidate_attention_mask=candidates_inputs.attention_mask, ... candidate_token_type_ids=candidates_inputs.token_type_ids, ... ) >>> relevance_score = outputs.relevance_score ```''' pass
6
2
66
13
30
23
4
0.81
1
5
2
0
2
2
2
133
141
27
63
27
43
51
22
11
19
5
3
1
7
1,938
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/realm/modeling_realm.py
transformers.models.deprecated.realm.modeling_realm.RealmScorerOutput
from ....modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, ModelOutput from typing import Optional, Union import torch from dataclasses import dataclass @dataclass class RealmScorerOutput(ModelOutput): """ Outputs of [`RealmScorer`] models. Args: relevance_score (`torch.FloatTensor` of shape `(batch_size, config.num_candidates)`): The relevance score of document candidates (before softmax). query_score (`torch.FloatTensor` of shape `(batch_size, config.retriever_proj_size)`): Query score derived from the query embedder. candidate_score (`torch.FloatTensor` of shape `(batch_size, config.num_candidates, config.retriever_proj_size)`): Candidate score derived from the embedder. """ relevance_score: Optional[torch.FloatTensor] = None query_score: Optional[torch.FloatTensor] = None candidate_score: Optional[torch.FloatTensor] = None
@dataclass class RealmScorerOutput(ModelOutput): ''' Outputs of [`RealmScorer`] models. Args: relevance_score (`torch.FloatTensor` of shape `(batch_size, config.num_candidates)`): The relevance score of document candidates (before softmax). query_score (`torch.FloatTensor` of shape `(batch_size, config.retriever_proj_size)`): Query score derived from the query embedder. candidate_score (`torch.FloatTensor` of shape `(batch_size, config.num_candidates, config.retriever_proj_size)`): Candidate score derived from the embedder. ''' pass
2
1
0
0
0
0
0
2.5
1
0
0
0
0
0
0
0
16
2
4
4
3
10
4
4
3
0
1
0
0
1,939
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/realm/modeling_realm.py
transformers.models.deprecated.realm.modeling_realm.RealmScorerProjection
from torch import nn class RealmScorerProjection(nn.Module): def __init__(self, config): super().__init__() self.predictions = RealmLMPredictionHead(config) self.dense = nn.Linear(config.hidden_size, config.retriever_proj_size) self.LayerNorm = nn.LayerNorm(config.retriever_proj_size, eps=config.layer_norm_eps) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states
class RealmScorerProjection(nn.Module): def __init__(self, config): pass def forward(self, hidden_states): pass
3
0
5
0
5
0
1
0
1
2
1
0
2
3
2
12
11
1
10
6
7
0
10
6
7
1
1
0
2
1,940
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/realm/modeling_realm.py
transformers.models.deprecated.realm.modeling_realm.RealmSelfAttention
from torch import nn import torch from ....cache_utils import Cache from typing import Optional, Union from ....utils.deprecation import deprecate_kwarg import math class RealmSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')): raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})') self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = position_embedding_type or getattr(config, 'position_embedding_type', 'absolute') if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_decoder = config.is_decoder def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]: mixed_query_layer = self.query(hidden_states) is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_values is not None: key_layer = past_key_values[0] value_layer = past_key_values[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_values is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_values[0], key_layer], dim=2) value_layer = torch.cat([past_key_values[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) use_cache = past_key_values is not None if self.is_decoder: past_key_values = (key_layer, value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': query_length, key_length = (query_layer.shape[2], key_layer.shape[2]) if use_cache: position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(-1, 1) else: position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) if self.position_embedding_type == 'relative_key': relative_position_scores = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == 'relative_key_query': relative_position_scores_query = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: attention_scores = attention_scores + attention_mask attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_values,) return outputs
class RealmSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None): pass def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]: pass
5
0
43
7
31
6
6
0.19
1
5
0
0
3
11
3
13
132
22
93
44
80
18
72
35
68
13
1
2
17
1,941
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/realm/modeling_realm.py
transformers.models.deprecated.realm.modeling_realm.RealmSelfOutput
from torch import nn import torch class RealmSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states
class RealmSelfOutput(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: pass
3
0
5
0
5
0
1
0
1
2
0
0
2
3
2
12
12
1
11
6
8
0
11
6
8
1
1
0
2
1,942
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/realm/retrieval_realm.py
transformers.models.deprecated.realm.retrieval_realm.RealmRetriever
import numpy as np from ....utils import logging, strtobool from huggingface_hub import hf_hub_download from transformers import AutoTokenizer import os from typing import Optional, Union class RealmRetriever: """The retriever of REALM outputting the retrieved evidence block and whether the block has answers as well as answer positions." Parameters: block_records (`np.ndarray`): A numpy array which contains evidence texts. tokenizer ([`RealmTokenizer`]): The tokenizer to encode retrieved texts. """ def __init__(self, block_records, tokenizer): super().__init__() self.block_records = block_records self.tokenizer = tokenizer def __call__(self, retrieved_block_ids, question_input_ids, answer_ids, max_length=None, return_tensors='pt'): retrieved_blocks = np.take(self.block_records, indices=retrieved_block_ids, axis=0) question = self.tokenizer.decode(question_input_ids[0], skip_special_tokens=True) text = [] text_pair = [] for retrieved_block in retrieved_blocks: text.append(question) text_pair.append(retrieved_block.decode()) concat_inputs = self.tokenizer(text, text_pair, padding=True, truncation=True, return_special_tokens_mask=True, max_length=max_length) concat_inputs_tensors = concat_inputs.convert_to_tensors(return_tensors) if answer_ids is not None: return self.block_has_answer(concat_inputs, answer_ids) + (concat_inputs_tensors,) else: return (None, None, None, concat_inputs_tensors) @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *init_inputs, **kwargs): if os.path.isdir(pretrained_model_name_or_path): block_records_path = os.path.join(pretrained_model_name_or_path, _REALM_BLOCK_RECORDS_FILENAME) else: block_records_path = hf_hub_download(repo_id=pretrained_model_name_or_path, filename=_REALM_BLOCK_RECORDS_FILENAME, **kwargs) if not strtobool(os.environ.get('TRUST_REMOTE_CODE', 'False')): raise ValueError("This part uses `pickle.load` which is insecure and will execute arbitrary code that is potentially malicious. It's recommended to never unpickle data that could have come from an untrusted source, or that could have been tampered with. If you already verified the pickle data and decided to use it, you can set the environment variable `TRUST_REMOTE_CODE` to `True` to allow it.") block_records = np.load(block_records_path, allow_pickle=True) tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path, *init_inputs, **kwargs) return cls(block_records, tokenizer) def save_pretrained(self, save_directory): np.save(os.path.join(save_directory, _REALM_BLOCK_RECORDS_FILENAME), self.block_records) self.tokenizer.save_pretrained(save_directory) def block_has_answer(self, concat_inputs, answer_ids): """check if retrieved_blocks has answers.""" has_answers = [] start_pos = [] end_pos = [] max_answers = 0 for input_id in concat_inputs.input_ids: input_id_list = input_id.tolist() first_sep_idx = input_id_list.index(self.tokenizer.sep_token_id) second_sep_idx = first_sep_idx + 1 + input_id_list[first_sep_idx + 1:].index(self.tokenizer.sep_token_id) start_pos.append([]) end_pos.append([]) for answer in answer_ids: for idx in range(first_sep_idx + 1, second_sep_idx): if answer[0] == input_id_list[idx]: if input_id_list[idx:idx + len(answer)] == answer: start_pos[-1].append(idx) end_pos[-1].append(idx + len(answer) - 1) if len(start_pos[-1]) == 0: has_answers.append(False) else: has_answers.append(True) if len(start_pos[-1]) > max_answers: max_answers = len(start_pos[-1]) for start_pos_, end_pos_ in zip(start_pos, end_pos): if len(start_pos_) < max_answers: padded = [-1] * (max_answers - len(start_pos_)) start_pos_ += padded end_pos_ += padded return (has_answers, start_pos, end_pos)
class RealmRetriever: '''The retriever of REALM outputting the retrieved evidence block and whether the block has answers as well as answer positions." Parameters: block_records (`np.ndarray`): A numpy array which contains evidence texts. tokenizer ([`RealmTokenizer`]): The tokenizer to encode retrieved texts. ''' def __init__(self, block_records, tokenizer): pass def __call__(self, retrieved_block_ids, question_input_ids, answer_ids, max_length=None, return_tensors='pt'): pass @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *init_inputs, **kwargs): pass def save_pretrained(self, save_directory): pass def block_has_answer(self, concat_inputs, answer_ids): '''check if retrieved_blocks has answers.''' pass
7
2
15
2
12
1
3
0.2
0
6
1
0
4
2
5
5
93
16
64
31
57
13
56
30
50
10
0
5
17
1,943
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/realm/retrieval_realm.py
transformers.models.deprecated.realm.retrieval_realm.ScaNNSearcher
class ScaNNSearcher: """Note that ScaNNSearcher cannot currently be used within the model. In future versions, it might however be included.""" def __init__(self, db, num_neighbors, dimensions_per_block=2, num_leaves=1000, num_leaves_to_search=100, training_sample_size=100000): """Build scann searcher.""" from scann.scann_ops.py.scann_ops_pybind import builder as Builder builder = Builder(db=db, num_neighbors=num_neighbors, distance_measure='dot_product') builder = builder.tree(num_leaves=num_leaves, num_leaves_to_search=num_leaves_to_search, training_sample_size=training_sample_size) builder = builder.score_ah(dimensions_per_block=dimensions_per_block) self.searcher = builder.build() def search_batched(self, question_projection): retrieved_block_ids, _ = self.searcher.search_batched(question_projection.detach().cpu()) return retrieved_block_ids.astype('int64')
class ScaNNSearcher: '''Note that ScaNNSearcher cannot currently be used within the model. In future versions, it might however be included.''' def __init__(self, db, num_neighbors, dimensions_per_block=2, num_leaves=1000, num_leaves_to_search=100, training_sample_size=100000): '''Build scann searcher.''' pass def search_batched(self, question_projection): pass
3
2
12
2
10
1
1
0.1
0
0
0
0
2
1
2
2
27
5
20
14
8
2
10
6
6
1
0
0
2
1,944
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/realm/tokenization_realm.py
transformers.models.deprecated.realm.tokenization_realm.BasicTokenizer
import unicodedata from ....tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace class BasicTokenizer: """ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). Args: do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). """ def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None): if never_split is None: never_split = [] self.do_lower_case = do_lower_case self.never_split = set(never_split) self.tokenize_chinese_chars = tokenize_chinese_chars self.strip_accents = strip_accents def tokenize(self, text, never_split=None): """ Basic Tokenization of a piece of text. Split on "white spaces" only, for sub-word tokenization, see WordPieceTokenizer. Args: never_split (`List[str]`, *optional*) Kept for backward compatibility purposes. Now implemented directly at the base class level (see [`PreTrainedTokenizer.tokenize`]) List of token not to split. """ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split text = self._clean_text(text) if self.tokenize_chinese_chars: text = self._tokenize_chinese_chars(text) orig_tokens = whitespace_tokenize(text) split_tokens = [] for token in orig_tokens: if token not in never_split: if self.do_lower_case: token = token.lower() if self.strip_accents is not False: token = self._run_strip_accents(token) elif self.strip_accents: token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token, never_split)) output_tokens = whitespace_tokenize(' '.join(split_tokens)) return output_tokens def _run_strip_accents(self, text): """Strips accents from a piece of text.""" text = unicodedata.normalize('NFD', text) output = [] for char in text: cat = unicodedata.category(char) if cat == 'Mn': continue output.append(char) return ''.join(output) def _run_split_on_punc(self, text, never_split=None): """Splits punctuation on a piece of text.""" if never_split is not None and text in never_split: return [text] chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return [''.join(x) for x in output] def _tokenize_chinese_chars(self, text): """Adds whitespace around any CJK character.""" output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(' ') output.append(char) output.append(' ') else: output.append(char) return ''.join(output) def _is_chinese_char(self, cp): """Checks whether CP is the codepoint of a CJK character.""" if cp >= 19968 and cp <= 40959 or (cp >= 13312 and cp <= 19903) or (cp >= 131072 and cp <= 173791) or (cp >= 173824 and cp <= 177983) or (cp >= 177984 and cp <= 178207) or (cp >= 178208 and cp <= 183983) or (cp >= 63744 and cp <= 64255) or (cp >= 194560 and cp <= 195103): return True return False def _clean_text(self, text): """Performs invalid character removal and whitespace cleanup on text.""" output = [] for char in text: cp = ord(char) if cp == 0 or cp == 65533 or _is_control(char): continue if _is_whitespace(char): output.append(' ') else: output.append(char) return ''.join(output)
class BasicTokenizer: ''' Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). Args: do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). ''' def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None): pass def tokenize(self, text, never_split=None): ''' Basic Tokenization of a piece of text. Split on "white spaces" only, for sub-word tokenization, see WordPieceTokenizer. Args: never_split (`List[str]`, *optional*) Kept for backward compatibility purposes. Now implemented directly at the base class level (see [`PreTrainedTokenizer.tokenize`]) List of token not to split. ''' pass def _run_strip_accents(self, text): '''Strips accents from a piece of text.''' pass def _run_split_on_punc(self, text, never_split=None): '''Splits punctuation on a piece of text.''' pass def _tokenize_chinese_chars(self, text): '''Adds whitespace around any CJK character.''' pass def _is_chinese_char(self, cp): '''Checks whether CP is the codepoint of a CJK character.''' pass def _clean_text(self, text): '''Performs invalid character removal and whitespace cleanup on text.''' pass
8
7
17
1
13
5
4
0.57
0
2
0
0
7
4
7
7
147
14
89
30
81
51
76
30
68
8
0
4
27
1,945
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/realm/tokenization_realm.py
transformers.models.deprecated.realm.tokenization_realm.RealmTokenizer
from typing import Optional from ....tokenization_utils_base import BatchEncoding import os import collections from ....tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace from ....utils import PaddingStrategy, logging class RealmTokenizer(PreTrainedTokenizer): """ Construct a REALM tokenizer. [`RealmTokenizer`] is identical to [`BertTokenizer`] and runs end-to-end tokenization: punctuation splitting and wordpiece. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): File containing the vocabulary. do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. do_basic_tokenize (`bool`, *optional*, defaults to `True`): Whether or not to do basic tokenization before WordPiece. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). """ vocab_files_names = VOCAB_FILES_NAMES def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, **kwargs): if not os.path.isfile(vocab_file): raise ValueError(f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained model use `tokenizer = RealmTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`") self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()]) self.do_basic_tokenize = do_basic_tokenize if do_basic_tokenize: self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token)) super().__init__(do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs) @property def do_lower_case(self): return self.basic_tokenizer.do_lower_case @property def vocab_size(self): return len(self.vocab) def get_vocab(self): return dict(self.vocab, **self.added_tokens_encoder) def _tokenize(self, text): split_tokens = [] if self.do_basic_tokenize: for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens): if token in self.basic_tokenizer.never_split: split_tokens.append(token) else: split_tokens += self.wordpiece_tokenizer.tokenize(token) else: split_tokens = self.wordpiece_tokenizer.tokenize(text) return split_tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.vocab.get(token, self.vocab.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.ids_to_tokens.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" out_string = ' '.join(tokens).replace(' ##', '').strip() return out_string def batch_encode_candidates(self, text, **kwargs): """ Encode a batch of text or text pair. This method is similar to regular __call__ method but has the following differences: 1. Handle additional num_candidate axis. (batch_size, num_candidates, text) 2. Always pad the sequences to *max_length*. 3. Must specify *max_length* in order to stack packs of candidates into a batch. - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: text (`List[List[str]]`): The batch of sequences to be encoded. Each sequence must be in this format: (batch_size, num_candidates, text). text_pair (`List[List[str]]`, *optional*): The batch of sequences to be encoded. Each sequence must be in this format: (batch_size, num_candidates, text). **kwargs: Keyword arguments of the __call__ method. Returns: [`BatchEncoding`]: Encoded text or text pair. Example: ```python >>> from transformers import RealmTokenizer >>> # batch_size = 2, num_candidates = 2 >>> text = [["Hello world!", "Nice to meet you!"], ["The cute cat.", "The adorable dog."]] >>> tokenizer = RealmTokenizer.from_pretrained("google/realm-cc-news-pretrained-encoder") >>> tokenized_text = tokenizer.batch_encode_candidates(text, max_length=10, return_tensors="pt") ```""" kwargs['padding'] = PaddingStrategy.MAX_LENGTH batch_text = text batch_text_pair = kwargs.pop('text_pair', None) return_tensors = kwargs.pop('return_tensors', None) output_data = {'input_ids': [], 'attention_mask': [], 'token_type_ids': []} for idx, candidate_text in enumerate(batch_text): if batch_text_pair is not None: candidate_text_pair = batch_text_pair[idx] else: candidate_text_pair = None encoded_candidates = super().__call__(candidate_text, candidate_text_pair, return_tensors=None, **kwargs) encoded_input_ids = encoded_candidates.get('input_ids') encoded_attention_mask = encoded_candidates.get('attention_mask') encoded_token_type_ids = encoded_candidates.get('token_type_ids') if encoded_input_ids is not None: output_data['input_ids'].append(encoded_input_ids) if encoded_attention_mask is not None: output_data['attention_mask'].append(encoded_attention_mask) if encoded_token_type_ids is not None: output_data['token_type_ids'].append(encoded_token_type_ids) output_data = {key: item for key, item in output_data.items() if len(item) != 0} return BatchEncoding(output_data, tensor_type=return_tensors) def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A REALM sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if token_ids_1 is not None: return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1] return [1] + [0] * len(token_ids_0) + [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: index = 0 if os.path.isdir(save_directory): vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) else: vocab_file = (filename_prefix + '-' if filename_prefix else '') + save_directory with open(vocab_file, 'w', encoding='utf-8') as writer: for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning(f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!') index = token_index writer.write(token + '\n') index += 1 return (vocab_file,)
class RealmTokenizer(PreTrainedTokenizer): ''' Construct a REALM tokenizer. [`RealmTokenizer`] is identical to [`BertTokenizer`] and runs end-to-end tokenization: punctuation splitting and wordpiece. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): File containing the vocabulary. do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. do_basic_tokenize (`bool`, *optional*, defaults to `True`): Whether or not to do basic tokenization before WordPiece. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). ''' def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, **kwargs): pass @property def do_lower_case(self): pass @property def vocab_size(self): pass def get_vocab(self): pass def _tokenize(self, text): pass def _convert_token_to_id(self, token): '''Converts a token (str) in an id using the vocab.''' pass def _convert_id_to_token(self, index): '''Converts an index (integer) in a token (str) using the vocab.''' pass def convert_tokens_to_string(self, tokens): '''Converts a sequence of tokens (string) in a single string.''' pass def batch_encode_candidates(self, text, **kwargs): ''' Encode a batch of text or text pair. This method is similar to regular __call__ method but has the following differences: 1. Handle additional num_candidate axis. (batch_size, num_candidates, text) 2. Always pad the sequences to *max_length*. 3. Must specify *max_length* in order to stack packs of candidates into a batch. - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: text (`List[List[str]]`): The batch of sequences to be encoded. Each sequence must be in this format: (batch_size, num_candidates, text). text_pair (`List[List[str]]`, *optional*): The batch of sequences to be encoded. Each sequence must be in this format: (batch_size, num_candidates, text). **kwargs: Keyword arguments of the __call__ method. Returns: [`BatchEncoding`]: Encoded text or text pair. Example: ```python >>> from transformers import RealmTokenizer >>> # batch_size = 2, num_candidates = 2 >>> text = [["Hello world!", "Nice to meet you!"], ["The cute cat.", "The adorable dog."]] >>> tokenizer = RealmTokenizer.from_pretrained("google/realm-cc-news-pretrained-encoder") >>> tokenized_text = tokenizer.batch_encode_candidates(text, max_length=10, return_tensors="pt") ```''' pass def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: ''' Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A REALM sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. ''' pass def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]: ''' Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. ''' pass def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: pass
15
7
19
2
11
6
2
0.79
1
11
3
0
13
5
13
102
303
46
144
63
108
114
87
40
73
6
3
3
32
1,946
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/realm/tokenization_realm_fast.py
transformers.models.deprecated.realm.tokenization_realm_fast.RealmTokenizerFast
import json from ....tokenization_utils_fast import PreTrainedTokenizerFast from typing import Optional from ....tokenization_utils_base import BatchEncoding from ....utils import PaddingStrategy, logging from .tokenization_realm import RealmTokenizer from tokenizers import normalizers class RealmTokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" REALM tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece. [`RealmTokenizerFast`] is identical to [`BertTokenizerFast`] and runs end-to-end tokenization: punctuation splitting and wordpiece. This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): File containing the vocabulary. do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. clean_text (`bool`, *optional*, defaults to `True`): Whether or not to clean the text before tokenization by removing any control characters and replacing all whitespaces by the classic one. tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). wordpieces_prefix (`str`, *optional*, defaults to `"##"`): The prefix for subwords. """ vocab_files_names = VOCAB_FILES_NAMES slow_tokenizer_class = RealmTokenizer def __init__(self, vocab_file=None, tokenizer_file=None, do_lower_case=True, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, **kwargs): super().__init__(vocab_file, tokenizer_file=tokenizer_file, do_lower_case=do_lower_case, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs) normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__()) if normalizer_state.get('lowercase', do_lower_case) != do_lower_case or normalizer_state.get('strip_accents', strip_accents) != strip_accents or normalizer_state.get('handle_chinese_chars', tokenize_chinese_chars) != tokenize_chinese_chars: normalizer_class = getattr(normalizers, normalizer_state.pop('type')) normalizer_state['lowercase'] = do_lower_case normalizer_state['strip_accents'] = strip_accents normalizer_state['handle_chinese_chars'] = tokenize_chinese_chars self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state) self.do_lower_case = do_lower_case def batch_encode_candidates(self, text, **kwargs): """ Encode a batch of text or text pair. This method is similar to regular __call__ method but has the following differences: 1. Handle additional num_candidate axis. (batch_size, num_candidates, text) 2. Always pad the sequences to *max_length*. 3. Must specify *max_length* in order to stack packs of candidates into a batch. - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: text (`List[List[str]]`): The batch of sequences to be encoded. Each sequence must be in this format: (batch_size, num_candidates, text). text_pair (`List[List[str]]`, *optional*): The batch of sequences to be encoded. Each sequence must be in this format: (batch_size, num_candidates, text). **kwargs: Keyword arguments of the __call__ method. Returns: [`BatchEncoding`]: Encoded text or text pair. Example: ```python >>> from transformers import RealmTokenizerFast >>> # batch_size = 2, num_candidates = 2 >>> text = [["Hello world!", "Nice to meet you!"], ["The cute cat.", "The adorable dog."]] >>> tokenizer = RealmTokenizerFast.from_pretrained("google/realm-cc-news-pretrained-encoder") >>> tokenized_text = tokenizer.batch_encode_candidates(text, max_length=10, return_tensors="pt") ```""" kwargs['padding'] = PaddingStrategy.MAX_LENGTH batch_text = text batch_text_pair = kwargs.pop('text_pair', None) return_tensors = kwargs.pop('return_tensors', None) output_data = {'input_ids': [], 'attention_mask': [], 'token_type_ids': []} for idx, candidate_text in enumerate(batch_text): if batch_text_pair is not None: candidate_text_pair = batch_text_pair[idx] else: candidate_text_pair = None encoded_candidates = super().__call__(candidate_text, candidate_text_pair, return_tensors=None, **kwargs) encoded_input_ids = encoded_candidates.get('input_ids') encoded_attention_mask = encoded_candidates.get('attention_mask') encoded_token_type_ids = encoded_candidates.get('token_type_ids') if encoded_input_ids is not None: output_data['input_ids'].append(encoded_input_ids) if encoded_attention_mask is not None: output_data['attention_mask'].append(encoded_attention_mask) if encoded_token_type_ids is not None: output_data['token_type_ids'].append(encoded_token_type_ids) output_data = {key: item for key, item in output_data.items() if len(item) != 0} return BatchEncoding(output_data, tensor_type=return_tensors) def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A REALM sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id] if token_ids_1 is not None: output += token_ids_1 + [self.sep_token_id] return output def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files)
class RealmTokenizerFast(PreTrainedTokenizerFast): ''' Construct a "fast" REALM tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece. [`RealmTokenizerFast`] is identical to [`BertTokenizerFast`] and runs end-to-end tokenization: punctuation splitting and wordpiece. This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): File containing the vocabulary. do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. clean_text (`bool`, *optional*, defaults to `True`): Whether or not to clean the text before tokenization by removing any control characters and replacing all whitespaces by the classic one. tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). wordpieces_prefix (`str`, *optional*, defaults to `"##"`): The prefix for subwords. ''' def __init__(self, vocab_file=None, tokenizer_file=None, do_lower_case=True, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, **kwargs): pass def batch_encode_candidates(self, text, **kwargs): ''' Encode a batch of text or text pair. This method is similar to regular __call__ method but has the following differences: 1. Handle additional num_candidate axis. (batch_size, num_candidates, text) 2. Always pad the sequences to *max_length*. 3. Must specify *max_length* in order to stack packs of candidates into a batch. - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: text (`List[List[str]]`): The batch of sequences to be encoded. Each sequence must be in this format: (batch_size, num_candidates, text). text_pair (`List[List[str]]`, *optional*): The batch of sequences to be encoded. Each sequence must be in this format: (batch_size, num_candidates, text). **kwargs: Keyword arguments of the __call__ method. Returns: [`BatchEncoding`]: Encoded text or text pair. Example: ```python >>> from transformers import RealmTokenizerFast >>> # batch_size = 2, num_candidates = 2 >>> text = [["Hello world!", "Nice to meet you!"], ["The cute cat.", "The adorable dog."]] >>> tokenizer = RealmTokenizerFast.from_pretrained("google/realm-cc-news-pretrained-encoder") >>> tokenized_text = tokenizer.batch_encode_candidates(text, max_length=10, return_tensors="pt") ```''' pass def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): ''' Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A REALM sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. ''' pass def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: pass
5
3
33
6
16
11
3
1.12
1
7
2
0
5
1
5
93
217
37
85
40
64
95
49
25
43
6
3
2
13
1,947
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/retribert/configuration_retribert.py
transformers.models.deprecated.retribert.configuration_retribert.RetriBertConfig
from ....configuration_utils import PretrainedConfig class RetriBertConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`RetriBertModel`]. It is used to instantiate a RetriBertModel model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the RetriBERT [yjernite/retribert-base-uncased](https://huggingface.co/yjernite/retribert-base-uncased) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the RetriBERT model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`RetriBertModel`] hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the *token_type_ids* passed into [`BertModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. share_encoders (`bool`, *optional*, defaults to `True`): Whether or not to use the same Bert-type encoder for the queries and document projection_dim (`int`, *optional*, defaults to 128): Final dimension of the query and document representation after projection """ model_type = 'retribert' def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=8, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, share_encoders=True, projection_dim=128, pad_token_id=0, **kwargs): super().__init__(pad_token_id=pad_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.share_encoders = share_encoders self.projection_dim = projection_dim
class RetriBertConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`RetriBertModel`]. It is used to instantiate a RetriBertModel model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the RetriBERT [yjernite/retribert-base-uncased](https://huggingface.co/yjernite/retribert-base-uncased) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the RetriBERT model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`RetriBertModel`] hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the *token_type_ids* passed into [`BertModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. share_encoders (`bool`, *optional*, defaults to `True`): Whether or not to use the same Bert-type encoder for the queries and document projection_dim (`int`, *optional*, defaults to 128): Final dimension of the query and document representation after projection ''' def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=8, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, share_encoders=True, projection_dim=128, pad_token_id=0, **kwargs): pass
2
1
35
1
34
0
1
1.11
1
1
0
0
1
14
1
33
82
6
36
35
16
40
18
17
16
1
2
0
1
1,948
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/retribert/modeling_retribert.py
transformers.models.deprecated.retribert.modeling_retribert.RetriBertModel
from typing import Optional from ...bert.modeling_bert import BertModel from .configuration_retribert import RetriBertConfig from torch import nn import torch.utils.checkpoint as checkpoint import math import torch from ....utils import add_start_docstrings, logging @add_start_docstrings('Bert Based model to embed queries or document for document retrieval.', RETRIBERT_START_DOCSTRING) class RetriBertModel(RetriBertPreTrainedModel): def __init__(self, config: RetriBertConfig) -> None: super().__init__(config) self.projection_dim = config.projection_dim self.bert_query = BertModel(config) self.bert_doc = None if config.share_encoders else BertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.project_query = nn.Linear(config.hidden_size, config.projection_dim, bias=False) self.project_doc = nn.Linear(config.hidden_size, config.projection_dim, bias=False) self.ce_loss = nn.CrossEntropyLoss(reduction='mean') self.post_init() def embed_sentences_checkpointed(self, input_ids, attention_mask, sent_encoder, checkpoint_batch_size=-1): if checkpoint_batch_size < 0 or input_ids.shape[0] < checkpoint_batch_size: return sent_encoder(input_ids, attention_mask=attention_mask)[1] else: device = input_ids.device input_shape = input_ids.size() token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) head_mask = [None] * sent_encoder.config.num_hidden_layers extended_attention_mask: torch.Tensor = sent_encoder.get_extended_attention_mask(attention_mask, input_shape) def partial_encode(*inputs): encoder_outputs = sent_encoder.encoder(inputs[0], attention_mask=inputs[1], head_mask=head_mask) sequence_output = encoder_outputs[0] pooled_output = sent_encoder.pooler(sequence_output) return pooled_output embedding_output = sent_encoder.embeddings(input_ids=input_ids, position_ids=None, token_type_ids=token_type_ids, inputs_embeds=None) pooled_output_list = [] for b in range(math.ceil(input_ids.shape[0] / checkpoint_batch_size)): b_embedding_output = embedding_output[b * checkpoint_batch_size:(b + 1) * checkpoint_batch_size] b_attention_mask = extended_attention_mask[b * checkpoint_batch_size:(b + 1) * checkpoint_batch_size] pooled_output = checkpoint.checkpoint(partial_encode, b_embedding_output, b_attention_mask) pooled_output_list.append(pooled_output) return torch.cat(pooled_output_list, dim=0) def embed_questions(self, input_ids, attention_mask=None, checkpoint_batch_size=-1): q_reps = self.embed_sentences_checkpointed(input_ids, attention_mask, self.bert_query, checkpoint_batch_size) return self.project_query(q_reps) def embed_answers(self, input_ids, attention_mask=None, checkpoint_batch_size=-1): a_reps = self.embed_sentences_checkpointed(input_ids, attention_mask, self.bert_query if self.bert_doc is None else self.bert_doc, checkpoint_batch_size) return self.project_doc(a_reps) def forward(self, input_ids_query: torch.LongTensor, attention_mask_query: Optional[torch.FloatTensor], input_ids_doc: torch.LongTensor, attention_mask_doc: Optional[torch.FloatTensor], checkpoint_batch_size: int=-1) -> torch.FloatTensor: """ Args: input_ids_query (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary for the queries in a batch. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask_query (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) input_ids_doc (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary for the documents in a batch. attention_mask_doc (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on documents padding token indices. checkpoint_batch_size (`int`, *optional*, defaults to `-1`): If greater than 0, uses gradient checkpointing to only compute sequence representation on `checkpoint_batch_size` examples at a time on the GPU. All query representations are still compared to all document representations in the batch. Return: `torch.FloatTensor``: The bidirectional cross-entropy loss obtained while trying to match each query to its corresponding document and each document to its corresponding query in the batch """ device = input_ids_query.device q_reps = self.embed_questions(input_ids_query, attention_mask_query, checkpoint_batch_size) a_reps = self.embed_answers(input_ids_doc, attention_mask_doc, checkpoint_batch_size) compare_scores = torch.mm(q_reps, a_reps.t()) loss_qa = self.ce_loss(compare_scores, torch.arange(compare_scores.shape[1]).to(device)) loss_aq = self.ce_loss(compare_scores.t(), torch.arange(compare_scores.shape[0]).to(device)) loss = (loss_qa + loss_aq) / 2 return loss
@add_start_docstrings('Bert Based model to embed queries or document for document retrieval.', RETRIBERT_START_DOCSTRING) class RetriBertModel(RetriBertPreTrainedModel): def __init__(self, config: RetriBertConfig) -> None: pass def embed_sentences_checkpointed(self, input_ids, attention_mask, sent_encoder, checkpoint_batch_size=-1): pass def partial_encode(*inputs): pass def embed_questions(self, input_ids, attention_mask=None, checkpoint_batch_size=-1): pass def embed_answers(self, input_ids, attention_mask=None, checkpoint_batch_size=-1): pass def forward(self, input_ids_query: torch.LongTensor, attention_mask_query: Optional[torch.FloatTensor], input_ids_doc: torch.LongTensor, attention_mask_doc: Optional[torch.FloatTensor], checkpoint_batch_size: int=-1) -> torch.FloatTensor: ''' Args: input_ids_query (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary for the queries in a batch. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask_query (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) input_ids_doc (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary for the documents in a batch. attention_mask_doc (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on documents padding token indices. checkpoint_batch_size (`int`, *optional*, defaults to `-1`): If greater than 0, uses gradient checkpointing to only compute sequence representation on `checkpoint_batch_size` examples at a time on the GPU. All query representations are still compared to all document representations in the batch. Return: `torch.FloatTensor``: The bidirectional cross-entropy loss obtained while trying to match each query to its corresponding document and each document to its corresponding query in the batch ''' pass
8
1
23
2
16
5
2
0.34
1
6
2
0
5
7
5
135
133
14
89
60
59
30
47
37
40
3
3
2
10
1,949
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/retribert/modeling_retribert.py
transformers.models.deprecated.retribert.modeling_retribert.RetriBertPreTrainedModel
from ....modeling_utils import PreTrainedModel from .configuration_retribert import RetriBertConfig from torch import nn class RetriBertPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config: RetriBertConfig base_model_prefix = 'retribert' def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0)
class RetriBertPreTrainedModel(PreTrainedModel): ''' An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. ''' def _init_weights(self, module): '''Initialize the weights''' pass
2
2
13
0
12
1
6
0.31
1
0
0
1
1
0
1
130
23
2
16
5
14
5
14
5
12
6
2
2
6
1,950
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/retribert/tokenization_retribert.py
transformers.models.deprecated.retribert.tokenization_retribert.BasicTokenizer
from ....tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace import unicodedata class BasicTokenizer: """ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). Args: do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). do_split_on_punc (`bool`, *optional*, defaults to `True`): In some instances we want to skip the basic punctuation splitting so that later tokenization can capture the full context of the words, such as contractions. """ def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None, do_split_on_punc=True): if never_split is None: never_split = [] self.do_lower_case = do_lower_case self.never_split = set(never_split) self.tokenize_chinese_chars = tokenize_chinese_chars self.strip_accents = strip_accents self.do_split_on_punc = do_split_on_punc def tokenize(self, text, never_split=None): """ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer. Args: never_split (`List[str]`, *optional*) Kept for backward compatibility purposes. Now implemented directly at the base class level (see [`PreTrainedTokenizer.tokenize`]) List of token not to split. """ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split text = self._clean_text(text) if self.tokenize_chinese_chars: text = self._tokenize_chinese_chars(text) unicode_normalized_text = unicodedata.normalize('NFC', text) orig_tokens = whitespace_tokenize(unicode_normalized_text) split_tokens = [] for token in orig_tokens: if token not in never_split: if self.do_lower_case: token = token.lower() if self.strip_accents is not False: token = self._run_strip_accents(token) elif self.strip_accents: token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token, never_split)) output_tokens = whitespace_tokenize(' '.join(split_tokens)) return output_tokens def _run_strip_accents(self, text): """Strips accents from a piece of text.""" text = unicodedata.normalize('NFD', text) output = [] for char in text: cat = unicodedata.category(char) if cat == 'Mn': continue output.append(char) return ''.join(output) def _run_split_on_punc(self, text, never_split=None): """Splits punctuation on a piece of text.""" if not self.do_split_on_punc or (never_split is not None and text in never_split): return [text] chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return [''.join(x) for x in output] def _tokenize_chinese_chars(self, text): """Adds whitespace around any CJK character.""" output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(' ') output.append(char) output.append(' ') else: output.append(char) return ''.join(output) def _is_chinese_char(self, cp): """Checks whether CP is the codepoint of a CJK character.""" if cp >= 19968 and cp <= 40959 or (cp >= 13312 and cp <= 19903) or (cp >= 131072 and cp <= 173791) or (cp >= 173824 and cp <= 177983) or (cp >= 177984 and cp <= 178207) or (cp >= 178208 and cp <= 183983) or (cp >= 63744 and cp <= 64255) or (cp >= 194560 and cp <= 195103): return True return False def _clean_text(self, text): """Performs invalid character removal and whitespace cleanup on text.""" output = [] for char in text: cp = ord(char) if cp == 0 or cp == 65533 or _is_control(char): continue if _is_whitespace(char): output.append(' ') else: output.append(char) return ''.join(output)
class BasicTokenizer: ''' Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). Args: do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). do_split_on_punc (`bool`, *optional*, defaults to `True`): In some instances we want to skip the basic punctuation splitting so that later tokenization can capture the full context of the words, such as contractions. ''' def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None, do_split_on_punc=True): pass def tokenize(self, text, never_split=None): ''' Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer. Args: never_split (`List[str]`, *optional*) Kept for backward compatibility purposes. Now implemented directly at the base class level (see [`PreTrainedTokenizer.tokenize`]) List of token not to split. ''' pass def _run_strip_accents(self, text): '''Strips accents from a piece of text.''' pass def _run_split_on_punc(self, text, never_split=None): '''Splits punctuation on a piece of text.''' pass def _tokenize_chinese_chars(self, text): '''Adds whitespace around any CJK character.''' pass def _is_chinese_char(self, cp): '''Checks whether CP is the codepoint of a CJK character.''' pass def _clean_text(self, text): '''Performs invalid character removal and whitespace cleanup on text.''' pass
8
7
19
1
14
5
4
0.55
0
2
0
0
7
5
7
7
159
14
98
39
83
54
78
32
70
8
0
4
27
1,951
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/retribert/tokenization_retribert.py
transformers.models.deprecated.retribert.tokenization_retribert.RetriBertTokenizer
from typing import Optional import collections import os from ....tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace class RetriBertTokenizer(PreTrainedTokenizer): """ Constructs a RetriBERT tokenizer. [`RetriBertTokenizer`] is identical to [`BertTokenizer`] and runs end-to-end tokenization: punctuation splitting and wordpiece. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to: this superclass for more information regarding those methods. Args: vocab_file (`str`): File containing the vocabulary. do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. do_basic_tokenize (`bool`, *optional*, defaults to `True`): Whether or not to do basic tokenization before WordPiece. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, **kwargs): if not os.path.isfile(vocab_file): raise ValueError(f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`") self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()]) self.do_basic_tokenize = do_basic_tokenize if do_basic_tokenize: self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token)) super().__init__(do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs) @property def do_lower_case(self): return self.basic_tokenizer.do_lower_case @property def vocab_size(self): return len(self.vocab) def get_vocab(self): return dict(self.vocab, **self.added_tokens_encoder) def _tokenize(self, text, split_special_tokens=False): split_tokens = [] if self.do_basic_tokenize: for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens if not split_special_tokens else None): if token in self.basic_tokenizer.never_split: split_tokens.append(token) else: split_tokens += self.wordpiece_tokenizer.tokenize(token) else: split_tokens = self.wordpiece_tokenizer.tokenize(text) return split_tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.vocab.get(token, self.vocab.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.ids_to_tokens.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" out_string = ' '.join(tokens).replace(' ##', '').strip() return out_string def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if token_ids_1 is not None: return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1] return [1] + [0] * len(token_ids_0) + [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: index = 0 if os.path.isdir(save_directory): vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) else: vocab_file = (filename_prefix + '-' if filename_prefix else '') + save_directory with open(vocab_file, 'w', encoding='utf-8') as writer: for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning(f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!') index = token_index writer.write(token + '\n') index += 1 return (vocab_file,)
class RetriBertTokenizer(PreTrainedTokenizer): ''' Constructs a RetriBERT tokenizer. [`RetriBertTokenizer`] is identical to [`BertTokenizer`] and runs end-to-end tokenization: punctuation splitting and wordpiece. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to: this superclass for more information regarding those methods. Args: vocab_file (`str`): File containing the vocabulary. do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. do_basic_tokenize (`bool`, *optional*, defaults to `True`): Whether or not to do basic tokenization before WordPiece. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). ''' def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, **kwargs): pass @property def do_lower_case(self): pass @property def vocab_size(self): pass def get_vocab(self): pass def _tokenize(self, text, split_special_tokens=False): pass def _convert_token_to_id(self, token): '''Converts a token (str) in an id using the vocab.''' pass def _convert_id_to_token(self, index): '''Converts an index (integer) in a token (str) using the vocab.''' pass def convert_tokens_to_string(self, tokens): '''Converts a sequence of tokens (string) in a single string.''' pass def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: ''' Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. ''' pass def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]: ''' Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. ''' pass def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: pass
14
6
15
1
10
4
2
0.71
1
9
2
0
12
5
12
101
233
29
120
53
85
85
66
30
53
6
3
3
27
1,952
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/retribert/tokenization_retribert_fast.py
transformers.models.deprecated.retribert.tokenization_retribert_fast.RetriBertTokenizerFast
from ....tokenization_utils_fast import PreTrainedTokenizerFast import json from typing import Optional from .tokenization_retribert import RetriBertTokenizer from tokenizers import normalizers class RetriBertTokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" RetriBERT tokenizer (backed by HuggingFace's *tokenizers* library). [`RetriBertTokenizerFast`] is identical to [`BertTokenizerFast`] and runs end-to-end tokenization: punctuation splitting and wordpiece. This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): File containing the vocabulary. do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. clean_text (`bool`, *optional*, defaults to `True`): Whether or not to clean the text before tokenization by removing any control characters and replacing all whitespaces by the classic one. tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). wordpieces_prefix (`str`, *optional*, defaults to `"##"`): The prefix for subwords. """ vocab_files_names = VOCAB_FILES_NAMES slow_tokenizer_class = RetriBertTokenizer model_input_names = ['input_ids', 'attention_mask'] def __init__(self, vocab_file=None, tokenizer_file=None, do_lower_case=True, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, **kwargs): super().__init__(vocab_file, tokenizer_file=tokenizer_file, do_lower_case=do_lower_case, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs) normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__()) if normalizer_state.get('lowercase', do_lower_case) != do_lower_case or normalizer_state.get('strip_accents', strip_accents) != strip_accents or normalizer_state.get('handle_chinese_chars', tokenize_chinese_chars) != tokenize_chinese_chars: normalizer_class = getattr(normalizers, normalizer_state.pop('type')) normalizer_state['lowercase'] = do_lower_case normalizer_state['strip_accents'] = strip_accents normalizer_state['handle_chinese_chars'] = tokenize_chinese_chars self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state) self.do_lower_case = do_lower_case def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id] if token_ids_1 is not None: output += token_ids_1 + [self.sep_token_id] return output def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files)
class RetriBertTokenizerFast(PreTrainedTokenizerFast): ''' Construct a "fast" RetriBERT tokenizer (backed by HuggingFace's *tokenizers* library). [`RetriBertTokenizerFast`] is identical to [`BertTokenizerFast`] and runs end-to-end tokenization: punctuation splitting and wordpiece. This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): File containing the vocabulary. do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. clean_text (`bool`, *optional*, defaults to `True`): Whether or not to clean the text before tokenization by removing any control characters and replacing all whitespaces by the classic one. tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). wordpieces_prefix (`str`, *optional*, defaults to `"##"`): The prefix for subwords. ''' def __init__(self, vocab_file=None, tokenizer_file=None, do_lower_case=True, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, **kwargs): pass def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): ''' Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. ''' pass def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: pass
4
2
24
3
14
7
2
1.14
1
4
0
0
4
1
4
92
145
19
59
30
39
67
28
15
23
2
3
1
7
1,953
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/speech_to_text_2/configuration_speech_to_text_2.py
transformers.models.deprecated.speech_to_text_2.configuration_speech_to_text_2.Speech2Text2Config
from ....configuration_utils import PretrainedConfig class Speech2Text2Config(PretrainedConfig): """ This is the configuration class to store the configuration of a [`Speech2Text2ForCausalLM`]. It is used to instantiate an Speech2Text2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Speech2Text2 [facebook/s2t-wav2vec2-large-en-de](https://huggingface.co/facebook/s2t-wav2vec2-large-en-de) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50265): Vocabulary size of the Speech2Text model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Speech2TextModel`] d_model (`int`, *optional*, defaults to 1024): Dimensionality of the layers and the pooler layer. decoder_layers (`int`, *optional*, defaults to 12): Number of decoder layers. decoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. https://huggingface.co/papers/1909.11556>`__ for more details. decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). max_target_positions (`int`, *optional*, defaults to 1024): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). Example: ```python >>> from transformers import Speech2Text2Config, Speech2Text2ForCausalLM >>> # Initializing a Speech2Text2 s2t_transformer_s style configuration >>> configuration = Speech2Text2Config() >>> # Initializing a model (with random weights) from the s2t_transformer_s style configuration >>> model = Speech2Text2ForCausalLM(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'speech_to_text_2' keys_to_ignore_at_inference = ['past_key_values'] attribute_map = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'} def __init__(self, vocab_size=10000, decoder_layers=6, decoder_ffn_dim=2048, decoder_attention_heads=4, decoder_layerdrop=0.0, use_cache=True, activation_function='relu', d_model=256, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, decoder_start_token_id=2, scale_embedding=True, pad_token_id=1, bos_token_id=0, eos_token_id=2, max_target_positions=1024, **kwargs): self.vocab_size = vocab_size self.d_model = d_model self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.decoder_layerdrop = decoder_layerdrop self.use_cache = use_cache self.num_hidden_layers = decoder_layers self.scale_embedding = scale_embedding self.max_target_positions = max_target_positions super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, decoder_start_token_id=decoder_start_token_id, **kwargs)
class Speech2Text2Config(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`Speech2Text2ForCausalLM`]. It is used to instantiate an Speech2Text2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Speech2Text2 [facebook/s2t-wav2vec2-large-en-de](https://huggingface.co/facebook/s2t-wav2vec2-large-en-de) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50265): Vocabulary size of the Speech2Text model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Speech2TextModel`] d_model (`int`, *optional*, defaults to 1024): Dimensionality of the layers and the pooler layer. decoder_layers (`int`, *optional*, defaults to 12): Number of decoder layers. decoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. https://huggingface.co/papers/1909.11556>`__ for more details. decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). max_target_positions (`int`, *optional*, defaults to 1024): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). Example: ```python >>> from transformers import Speech2Text2Config, Speech2Text2ForCausalLM >>> # Initializing a Speech2Text2 s2t_transformer_s style configuration >>> configuration = Speech2Text2Config() >>> # Initializing a model (with random weights) from the s2t_transformer_s style configuration >>> model = Speech2Text2ForCausalLM(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, vocab_size=10000, decoder_layers=6, decoder_ffn_dim=2048, decoder_attention_heads=4, decoder_layerdrop=0.0, use_cache=True, activation_function='relu', d_model=256, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, decoder_start_token_id=2, scale_embedding=True, pad_token_id=1, bos_token_id=0, eos_token_id=2, max_target_positions=1024, **kwargs): pass
2
1
45
1
44
1
1
1.04
1
1
0
0
1
15
1
33
108
11
48
41
25
50
21
20
19
1
2
0
1
1,954
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/speech_to_text_2/modeling_speech_to_text_2.py
transformers.models.deprecated.speech_to_text_2.modeling_speech_to_text_2.Speech2Text2Attention
from torch import nn from .configuration_speech_to_text_2 import Speech2Text2Config import torch from typing import Optional, Union from ....cache_utils import Cache from ....utils.deprecation import deprecate_kwarg class Speech2Text2Attention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, is_causal: bool=False, config: Optional[Speech2Text2Config]=None): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if self.head_dim * num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).') self.scaling = self.head_dim ** (-0.5) self.is_decoder = is_decoder self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" is_cross_attention = key_value_states is not None bsz, tgt_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) * self.scaling if is_cross_attention and past_key_values is not None and (past_key_values[0].shape[2] == key_value_states.shape[1]): key_states = past_key_values[0] value_states = past_key_values[1] elif is_cross_attention: key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_values is not None: key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_values[0], key_states], dim=2) value_states = torch.cat([past_key_values[1], value_states], dim=2) else: key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: past_key_values = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.reshape(*proj_shape) value_states = value_states.reshape(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError(f'Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}') if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError(f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}') attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError(f'Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}') attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError(f'`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}') attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return (attn_output, attn_weights_reshaped, past_key_values)
class Speech2Text2Attention(nn.Module): '''Multi-headed attention from 'Attention Is All You Need' paper''' def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, is_causal: bool=False, config: Optional[Speech2Text2Config]=None): pass def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: '''Input shape: Batch x Time x Channel''' pass
5
2
50
7
35
8
5
0.24
1
7
1
0
3
12
3
13
156
23
107
44
86
26
68
27
64
12
1
2
15
1,955
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/speech_to_text_2/modeling_speech_to_text_2.py
transformers.models.deprecated.speech_to_text_2.modeling_speech_to_text_2.Speech2Text2Decoder
import torch import math from ....modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions from .configuration_speech_to_text_2 import Speech2Text2Config from torch import nn from ....modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask class Speech2Text2Decoder(Speech2Text2PreTrainedModel): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`Speech2Text2DecoderLayer`] Args: config: Speech2Text2Config embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: Speech2Text2Config): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop self.padding_idx = config.pad_token_id self.max_target_positions = config.max_target_positions self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx) self.embed_positions = Speech2Text2SinusoidalPositionalEmbedding(self.max_target_positions, config.d_model, self.padding_idx) self.layers = nn.ModuleList([Speech2Text2DecoderLayer(config) for _ in range(config.decoder_layers)]) self.gradient_checkpointing = False self.post_init() def forward(self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None): """ Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`Speech2Text2Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention on hidden heads. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time') elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either decoder_input_ids or decoder_inputs_embeds') past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale attention_mask = _prepare_4d_causal_attention_mask(attention_mask, input_shape, inputs_embeds, past_key_values_length) if encoder_hidden_states is not None and encoder_attention_mask is not None: encoder_attention_mask = _prepare_4d_attention_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) positions = self.embed_positions(input_ids, past_key_values_length=past_key_values_length) hidden_states = inputs_embeds + positions hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache = True` is incompatible with gradient checkpointing. Setting `use_cache = False`...') use_cache = False all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if output_attentions and encoder_hidden_states is not None else None next_decoder_cache = () if use_cache else None for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ['head_mask', 'cross_attn_head_mask']): if attn_mask is not None: if attn_mask.size()[0] != len(self.layers): raise ValueError(f'The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.') for idx, decoder_layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue layer_outputs = decoder_layer(hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, past_key_values=past_key_values[idx] if past_key_values is not None else None, output_attentions=output_attentions, use_cache=use_cache) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[3 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple((v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions)
class Speech2Text2Decoder(Speech2Text2PreTrainedModel): ''' Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`Speech2Text2DecoderLayer`] Args: config: Speech2Text2Config embed_tokens (nn.Embedding): output embedding ''' def __init__(self, config: Speech2Text2Config): pass def forward(self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None): ''' Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`Speech2Text2Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention on hidden heads. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' pass
3
2
58
8
34
16
10
0.5
1
10
4
0
4
9
4
134
244
38
137
42
118
69
73
28
68
37
3
3
41
1,956
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/speech_to_text_2/modeling_speech_to_text_2.py
transformers.models.deprecated.speech_to_text_2.modeling_speech_to_text_2.Speech2Text2DecoderLayer
from ....cache_utils import Cache from .configuration_speech_to_text_2 import Speech2Text2Config from torch import nn from typing import Optional, Union from ....activations import ACT2FN import torch from ....modeling_layers import GradientCheckpointingLayer from ....utils.deprecation import deprecate_kwarg class Speech2Text2DecoderLayer(GradientCheckpointingLayer): def __init__(self, config: Speech2Text2Config): super().__init__() self.embed_dim = config.d_model self.self_attn = Speech2Text2Attention(embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) if config.is_decoder: self.encoder_attn = Speech2Text2Attention(self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, cross_attn_layer_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True): """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of size *(decoder_attention_heads,)*. past_key_values (`Tuple(torch.FloatTensor)`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states self_attn_past_key_value = past_key_values[:2] if past_key_values is not None else None hidden_states, self_attn_weights, present_key_value = self.self_attn(hidden_states=hidden_states, past_key_values=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states cross_attn_past_key_value = past_key_values[-2:] if past_key_values is not None else None hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_values=cross_attn_past_key_value, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) present_key_value = present_key_value + cross_attn_present_key_value residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) if use_cache: outputs += (present_key_value,) return outputs
class Speech2Text2DecoderLayer(GradientCheckpointingLayer): def __init__(self, config: Speech2Text2Config): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, cross_attn_layer_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True): ''' Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of size *(decoder_attention_heads,)*. past_key_values (`Tuple(torch.FloatTensor)`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. ''' pass
4
1
58
7
39
13
4
0.32
1
5
2
0
2
11
2
12
118
14
79
32
65
25
45
21
42
6
1
1
8
1,957
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/speech_to_text_2/modeling_speech_to_text_2.py
transformers.models.deprecated.speech_to_text_2.modeling_speech_to_text_2.Speech2Text2DecoderWrapper
from ....utils import add_start_docstrings, logging, replace_return_docstrings @add_start_docstrings('The Speech2Text2 Model with a language modeling head. Can be used for summarization.', SPEECH_TO_TEXT_2_START_DOCSTRING) class Speech2Text2DecoderWrapper(Speech2Text2PreTrainedModel): """ This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is used in combination with the [`EncoderDecoderModel`] framework. """ def __init__(self, config): super().__init__(config) self.decoder = Speech2Text2Decoder(config) def forward(self, *args, **kwargs): return self.decoder(*args, **kwargs)
@add_start_docstrings('The Speech2Text2 Model with a language modeling head. Can be used for summarization.', SPEECH_TO_TEXT_2_START_DOCSTRING) class Speech2Text2DecoderWrapper(Speech2Text2PreTrainedModel): ''' This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is used in combination with the [`EncoderDecoderModel`] framework. ''' def __init__(self, config): pass def forward(self, *args, **kwargs): pass
4
1
3
0
3
0
1
0.67
1
2
1
0
2
1
2
132
12
2
6
4
3
4
6
4
3
1
3
0
2
1,958
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/speech_to_text_2/modeling_speech_to_text_2.py
transformers.models.deprecated.speech_to_text_2.modeling_speech_to_text_2.Speech2Text2ForCausalLM
from torch.nn import CrossEntropyLoss from typing import Optional, Union from ....utils import add_start_docstrings, logging, replace_return_docstrings from ....cache_utils import Cache from torch import nn from ....modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions import torch @add_start_docstrings('The Speech2Text2 Decoder with a language modeling head. Can be used as the decoder part of [`EncoderDecoderModel`] and [`SpeechEncoderDecoder`].', SPEECH_TO_TEXT_2_START_DOCSTRING) class Speech2Text2ForCausalLM(Speech2Text2PreTrainedModel): _tied_weights_keys = ['lm_head.weight'] def __init__(self, config): config.is_decoder = True config.is_encoder_decoder = False super().__init__(config) self.model = Speech2Text2DecoderWrapper(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens def set_input_embeddings(self, value): self.model.decoder.embed_tokens = value def set_decoder(self, decoder): self.model.decoder = decoder def get_decoder(self): return self.model.decoder @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.FloatTensor], CausalLMOutputWithCrossAttentions]: """ Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`Speech2Text2Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional tensors are only required when the model is used as a decoder in a Sequence to Sequence model. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. Returns: Example: ```python >>> from transformers import ( ... SpeechEncoderDecoderModel, ... Speech2Text2ForCausalLM, ... Wav2Vec2Model, ... Speech2Text2Config, ... Wav2Vec2Config, ... Wav2Vec2FeatureExtractor, ... Speech2Text2Tokenizer, ... ) >>> from datasets import load_dataset >>> feature_extractor = Wav2Vec2FeatureExtractor() >>> tokenizer = Speech2Text2Tokenizer.from_pretrained("facebook/s2t-wav2vec2-large-en-de") >>> encoder = Wav2Vec2Model(Wav2Vec2Config()) >>> decoder = Speech2Text2ForCausalLM(Speech2Text2Config()) >>> # init random speech2text model >>> model = SpeechEncoderDecoderModel(encoder=encoder, decoder=decoder) >>> model.config.pad_token_id = tokenizer.pad_token_id >>> model.config.decoder_start_token_id = tokenizer.bos_token_id >>> # pre-process inputs and labels >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> inputs = feature_extractor( ... ds[0]["audio"]["array"], sampling_rate=ds[0]["audio"]["sampling_rate"], return_tensors="pt" ... ) >>> input_values = inputs.input_values >>> decoder_input_ids = tokenizer(ds[0]["text"], return_tensors="pt").input_ids >>> # compute loss >>> loss = model(inputs=input_values, labels=decoder_input_ids).loss >>> # backprop loss >>> loss.backward() # doctest: +IGNORE_RESULT ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.model.decoder(input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) logits = self.lm_head(outputs[0]) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithCrossAttentions(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, **kwargs): if attention_mask is None: attention_mask = input_ids.new_ones(input_ids.shape) if past_key_values: past_length = past_key_values.get_seq_length() if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] return {'input_ids': input_ids, 'attention_mask': attention_mask, 'past_key_values': past_key_values, 'use_cache': use_cache} @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple((past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)),) return reordered_past
@add_start_docstrings('The Speech2Text2 Decoder with a language modeling head. Can be used as the decoder part of [`EncoderDecoderModel`] and [`SpeechEncoderDecoder`].', SPEECH_TO_TEXT_2_START_DOCSTRING) class Speech2Text2ForCausalLM(Speech2Text2PreTrainedModel): def __init__(self, config): pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass def set_decoder(self, decoder): pass def get_decoder(self): pass @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.FloatTensor], CausalLMOutputWithCrossAttentions]: ''' Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`Speech2Text2Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional tensors are only required when the model is used as a decoder in a Sequence to Sequence model. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. Returns: Example: ```python >>> from transformers import ( ... SpeechEncoderDecoderModel, ... Speech2Text2ForCausalLM, ... Wav2Vec2Model, ... Speech2Text2Config, ... Wav2Vec2Config, ... Wav2Vec2FeatureExtractor, ... Speech2Text2Tokenizer, ... ) >>> from datasets import load_dataset >>> feature_extractor = Wav2Vec2FeatureExtractor() >>> tokenizer = Speech2Text2Tokenizer.from_pretrained("facebook/s2t-wav2vec2-large-en-de") >>> encoder = Wav2Vec2Model(Wav2Vec2Config()) >>> decoder = Speech2Text2ForCausalLM(Speech2Text2Config()) >>> # init random speech2text model >>> model = SpeechEncoderDecoderModel(encoder=encoder, decoder=decoder) >>> model.config.pad_token_id = tokenizer.pad_token_id >>> model.config.decoder_start_token_id = tokenizer.bos_token_id >>> # pre-process inputs and labels >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> inputs = feature_extractor( ... ds[0]["audio"]["array"], sampling_rate=ds[0]["audio"]["sampling_rate"], return_tensors="pt" ... ) >>> input_values = inputs.input_values >>> decoder_input_ids = tokenizer(ds[0]["text"], return_tensors="pt").input_ids >>> # compute loss >>> loss = model(inputs=input_values, labels=decoder_input_ids).loss >>> # backprop loss >>> loss.backward() # doctest: +IGNORE_RESULT ```''' pass def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, **kwargs): pass @staticmethod def _reorder_cache(past_key_values, beam_idx): pass
12
1
22
3
10
9
2
0.94
1
6
2
0
9
2
10
140
234
41
100
42
70
94
51
23
40
7
3
2
20
1,959
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/speech_to_text_2/modeling_speech_to_text_2.py
transformers.models.deprecated.speech_to_text_2.modeling_speech_to_text_2.Speech2Text2PreTrainedModel
from torch import nn from .configuration_speech_to_text_2 import Speech2Text2Config from ....modeling_utils import PreTrainedModel class Speech2Text2PreTrainedModel(PreTrainedModel): config: Speech2Text2Config base_model_prefix = 'model' supports_gradient_checkpointing = True def _init_weights(self, module): std = self.config.init_std if isinstance(module, (nn.Linear, nn.Conv1d)): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, Speech2Text2SinusoidalPositionalEmbedding): weight = module.get_embedding(*module.weight.shape, module.padding_idx) weight = nn.Parameter(weight, requires_grad=False) weight.detach_() module.weight = weight
class Speech2Text2PreTrainedModel(PreTrainedModel): def _init_weights(self, module): pass
2
0
10
0
10
0
5
0
1
0
0
3
1
0
1
130
15
1
14
6
12
0
13
6
11
5
2
2
5
1,960
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/speech_to_text_2/modeling_speech_to_text_2.py
transformers.models.deprecated.speech_to_text_2.modeling_speech_to_text_2.Speech2Text2SinusoidalPositionalEmbedding
import torch import math from typing import Optional, Union from torch import nn class Speech2Text2SinusoidalPositionalEmbedding(nn.Module): """This module produces sinusoidal positional embeddings of any length.""" def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int]=None): super().__init__() self.offset = 2 self.embedding_dim = embedding_dim self.padding_idx = padding_idx def make_weights(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int]=None): emb_weights = self.get_embedding(num_embeddings, embedding_dim, padding_idx) if hasattr(self, 'weights'): emb_weights = emb_weights.to(dtype=self.weights.dtype, device=self.weights.device) self.weights = nn.Parameter(emb_weights) self.weights.requires_grad = False self.weights.detach_() @staticmethod def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int]=None): """ Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need". """ half_dim = embedding_dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb) emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0) emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1) if embedding_dim % 2 == 1: emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) if padding_idx is not None: emb[padding_idx, :] = 0 return emb.to(torch.get_default_dtype()) @torch.no_grad() def forward(self, input_ids: torch.Tensor, past_key_values_length: int=0): bsz, seq_len = input_ids.size() position_ids = self.create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length).to(input_ids.device) max_pos = self.padding_idx + 1 + seq_len if max_pos > self.weights.size(0): self.make_weights(max_pos + self.offset, self.embedding_dim, self.padding_idx) return self.weights.index_select(0, position_ids.view(-1)).view(bsz, seq_len, -1).detach() def create_position_ids_from_input_ids(self, input_ids: torch.Tensor, padding_idx: int, past_key_values_length: Optional[int]=0): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. Args: x: torch.Tensor x: Returns: torch.Tensor """ mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx
class Speech2Text2SinusoidalPositionalEmbedding(nn.Module): '''This module produces sinusoidal positional embeddings of any length.''' def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int]=None): pass def make_weights(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int]=None): pass @staticmethod def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int]=None): ''' Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need". ''' pass @torch.no_grad() def forward(self, input_ids: torch.Tensor, past_key_values_length: int=0): pass def create_position_ids_from_input_ids(self, input_ids: torch.Tensor, padding_idx: int, past_key_values_length: Optional[int]=0): ''' Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. Args: x: torch.Tensor x: Returns: torch.Tensor ''' pass
8
3
12
1
8
3
2
0.4
1
3
0
0
4
4
5
15
68
9
42
22
32
17
36
18
30
3
1
1
9
1,961
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/speech_to_text_2/processing_speech_to_text_2.py
transformers.models.deprecated.speech_to_text_2.processing_speech_to_text_2.Speech2Text2Processor
import warnings from ....processing_utils import ProcessorMixin from contextlib import contextmanager class Speech2Text2Processor(ProcessorMixin): """ Constructs a Speech2Text2 processor which wraps a Speech2Text2 feature extractor and a Speech2Text2 tokenizer into a single processor. [`Speech2Text2Processor`] offers all the functionalities of [`AutoFeatureExtractor`] and [`Speech2Text2Tokenizer`]. See the [`~Speech2Text2Processor.__call__`] and [`~Speech2Text2Processor.decode`] for more information. Args: feature_extractor (`AutoFeatureExtractor`): An instance of [`AutoFeatureExtractor`]. The feature extractor is a required input. tokenizer (`Speech2Text2Tokenizer`): An instance of [`Speech2Text2Tokenizer`]. The tokenizer is a required input. """ feature_extractor_class = 'AutoFeatureExtractor' tokenizer_class = 'Speech2Text2Tokenizer' def __init__(self, feature_extractor, tokenizer): super().__init__(feature_extractor, tokenizer) self.current_processor = self.feature_extractor self._in_target_context_manager = False def __call__(self, *args, **kwargs): """ When used in normal mode, this method forwards all its arguments to AutoFeatureExtractor's [`~AutoFeatureExtractor.__call__`] and returns its output. If used in the context [`~Speech2Text2Processor.as_target_processor`] this method forwards all its arguments to Speech2Text2Tokenizer's [`~Speech2Text2Tokenizer.__call__`]. Please refer to the docstring of the above two methods for more information. """ if self._in_target_context_manager: return self.current_processor(*args, **kwargs) if 'raw_speech' in kwargs: warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.') audio = kwargs.pop('raw_speech') else: audio = kwargs.pop('audio', None) sampling_rate = kwargs.pop('sampling_rate', None) text = kwargs.pop('text', None) if len(args) > 0: audio = args[0] args = args[1:] if audio is None and text is None: raise ValueError('You need to specify either an `audio` or `text` input to process.') if audio is not None: inputs = self.feature_extractor(audio, *args, sampling_rate=sampling_rate, **kwargs) if text is not None: encodings = self.tokenizer(text, **kwargs) if text is None: return inputs elif audio is None: return encodings else: inputs['labels'] = encodings['input_ids'] return inputs @contextmanager def as_target_processor(self): """ Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning Speech2Text2. """ warnings.warn('`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your labels by using the argument `text` of the regular `__call__` method (either in the same call as your audio inputs, or in a separate call.') self._in_target_context_manager = True self.current_processor = self.tokenizer yield self.current_processor = self.feature_extractor self._in_target_context_manager = False
class Speech2Text2Processor(ProcessorMixin): ''' Constructs a Speech2Text2 processor which wraps a Speech2Text2 feature extractor and a Speech2Text2 tokenizer into a single processor. [`Speech2Text2Processor`] offers all the functionalities of [`AutoFeatureExtractor`] and [`Speech2Text2Tokenizer`]. See the [`~Speech2Text2Processor.__call__`] and [`~Speech2Text2Processor.decode`] for more information. Args: feature_extractor (`AutoFeatureExtractor`): An instance of [`AutoFeatureExtractor`]. The feature extractor is a required input. tokenizer (`Speech2Text2Tokenizer`): An instance of [`Speech2Text2Tokenizer`]. The tokenizer is a required input. ''' def __init__(self, feature_extractor, tokenizer): pass def __call__(self, *args, **kwargs): ''' When used in normal mode, this method forwards all its arguments to AutoFeatureExtractor's [`~AutoFeatureExtractor.__call__`] and returns its output. If used in the context [`~Speech2Text2Processor.as_target_processor`] this method forwards all its arguments to Speech2Text2Tokenizer's [`~Speech2Text2Tokenizer.__call__`]. Please refer to the docstring of the above two methods for more information. ''' pass @contextmanager def as_target_processor(self): ''' Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning Speech2Text2. ''' pass
5
3
14
1
9
4
3
0.63
1
2
0
0
5
2
5
22
92
12
49
16
42
31
41
15
35
9
2
1
13
1,962
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/speech_to_text_2/tokenization_speech_to_text_2.py
transformers.models.deprecated.speech_to_text_2.tokenization_speech_to_text_2.Speech2Text2Tokenizer
import os from ....tokenization_utils import PreTrainedTokenizer from typing import Optional import json class Speech2Text2Tokenizer(PreTrainedTokenizer): """ Constructs a Speech2Text2Tokenizer. This tokenizer inherits from [`PreTrainedTokenizer`] which contains some of the main methods. Users should refer to the superclass for more information regarding such methods. Args: vocab_file (`str`): File containing the vocabulary. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sentence token. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sentence token. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. **kwargs Additional keyword arguments passed along to [`PreTrainedTokenizer`] """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] def __init__(self, vocab_file, bos_token='<s>', pad_token='<pad>', eos_token='</s>', unk_token='<unk>', do_lower_case=False, merges_file=None, **kwargs): self.do_lower_case = do_lower_case with open(vocab_file, encoding='utf-8') as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for k, v in self.encoder.items()} if merges_file is None: logger.info(f'No merges files provided. {self.__class__.__name__} can only be used for decoding.') self.bpe_ranks = None self.cache = None else: with open(merges_file, encoding='utf-8') as merges_handle: merges = merges_handle.read().split('\n')[:-1] merges = [tuple(merge.split()[:2]) for merge in merges] self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {} super().__init__(unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, do_lower_case=do_lower_case, **kwargs) @property def vocab_size(self) -> int: return len(self.decoder) def get_vocab(self) -> dict: return dict(self.encoder, **self.added_tokens_encoder) def bpe(self, token): word = tuple(token[:-1]) + (token[-1] + BPE_TOKEN_MERGES,) if token in self.cache: return self.cache[token] pairs = get_pairs(word) if not pairs: return token while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf'))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if word[i] == first and i < len(word) - 1 and (word[i + 1] == second): new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = ' '.join(word) if word == '\n ' + BPE_TOKEN_MERGES: word = '\n' + BPE_TOKEN_MERGES if word.endswith(BPE_TOKEN_MERGES): word = word.replace(BPE_TOKEN_MERGES, '') word = word.replace(' ', BPE_TOKEN_VOCAB) self.cache[token] = word return word def _tokenize(self, text): """Tokenize a string.""" if self.bpe_ranks is None: raise ValueError('This tokenizer was instantiated without a `merges.txt` file, so that it can only be used for decoding, not for encoding. Make sure to provide `merges.txt` file at instantiation to enable encoding.') if self.do_lower_case: text = text.lower() text = text.split() split_tokens = [] for token in text: if token: split_tokens.extend(list(self.bpe(token).split(' '))) return split_tokens def _convert_token_to_id(self, token: str) -> int: """Converts a token (str) in an index (integer) using the vocab.""" return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index: int) -> str: """Converts an index (integer) in a token (str) using the vocab.""" result = self.decoder.get(index, self.unk_token) return result def convert_tokens_to_string(self, tokens: list[str]) -> str: """ Converts a list of output tokens into a single string. """ string = ' '.join(tokens) string = ''.join(string.split(BPE_TOKEN_VOCAB)) return string def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) merges_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']) with open(vocab_file, 'w', encoding='utf-8') as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + '\n') index = 0 if self.bpe_ranks is None: return (vocab_file,) with open(merges_file, 'w', encoding='utf-8') as writer: for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning(f'Saving vocabulary to {merges_file}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!') index = token_index writer.write(' '.join(bpe_tokens) + '\n') index += 1 return (vocab_file, merges_file)
class Speech2Text2Tokenizer(PreTrainedTokenizer): ''' Constructs a Speech2Text2Tokenizer. This tokenizer inherits from [`PreTrainedTokenizer`] which contains some of the main methods. Users should refer to the superclass for more information regarding such methods. Args: vocab_file (`str`): File containing the vocabulary. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sentence token. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sentence token. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. **kwargs Additional keyword arguments passed along to [`PreTrainedTokenizer`] ''' def __init__(self, vocab_file, bos_token='<s>', pad_token='<pad>', eos_token='</s>', unk_token='<unk>', do_lower_case=False, merges_file=None, **kwargs): pass @property def vocab_size(self) -> int: pass def get_vocab(self) -> dict: pass def bpe(self, token): pass def _tokenize(self, text): '''Tokenize a string.''' pass def _convert_token_to_id(self, token: str) -> int: '''Converts a token (str) in an index (integer) using the vocab.''' pass def _convert_id_to_token(self, index: int) -> str: '''Converts an index (integer) in a token (str) using the vocab.''' pass def convert_tokens_to_string(self, tokens: list[str]) -> str: ''' Converts a list of output tokens into a single string. ''' pass def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: pass
11
5
18
2
15
1
3
0.2
1
10
0
0
9
5
9
98
195
33
135
48
114
27
102
33
92
11
3
3
30
1,963
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/tapex/tokenization_tapex.py
transformers.models.deprecated.tapex.tokenization_tapex.IndexedRowTableLinearize
class IndexedRowTableLinearize: """ FORMAT: col: col1 | col2 | col 3 row 1 : val1 | val2 | val3 row 2 : ... """ def process_table(self, table_content: dict): """ Given a table, TableLinearize aims at converting it into a flatten sequence with special symbols. """ assert 'header' in table_content and 'rows' in table_content, self.PROMPT_MESSAGE table_str = self.process_header(table_content['header']) + ' ' for i, row_example in enumerate(table_content['rows']): table_str += self.process_row(row_example, row_index=i + 1) + ' ' return table_str.strip() def process_header(self, headers: list): """ Given a list of headers, TableLinearize aims at converting it into a flatten sequence with special symbols. """ return 'col : ' + ' | '.join(headers) def process_row(self, row: list, row_index: int): """ Given a row, TableLinearize aims at converting it into a flatten sequence with special symbols. """ row_str = '' row_cell_values = [] for cell_value in row: if isinstance(cell_value, int): row_cell_values.append(str(cell_value)) else: row_cell_values.append(cell_value) row_str += ' | '.join(row_cell_values) return 'row ' + str(row_index) + ' : ' + row_str
class IndexedRowTableLinearize: ''' FORMAT: col: col1 | col2 | col 3 row 1 : val1 | val2 | val3 row 2 : ... ''' def process_table(self, table_content: dict): ''' Given a table, TableLinearize aims at converting it into a flatten sequence with special symbols. ''' pass def process_header(self, headers: list): ''' Given a list of headers, TableLinearize aims at converting it into a flatten sequence with special symbols. ''' pass def process_row(self, row: list, row_index: int): ''' Given a row, TableLinearize aims at converting it into a flatten sequence with special symbols. ''' pass
4
4
10
0
6
4
2
0.79
0
3
0
0
3
0
3
3
37
3
19
9
15
15
18
9
14
3
0
2
6
1,964
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/tapex/tokenization_tapex.py
transformers.models.deprecated.tapex.tokenization_tapex.TapexTokenizer
import random from ....file_utils import ExplicitEnum, PaddingStrategy, TensorType, add_end_docstrings, is_pandas_available import json from typing import Optional, Union import regex as re from ....tokenization_utils import AddedToken, PreTrainedTokenizer import os from ....tokenization_utils_base import ENCODE_KWARGS_DOCSTRING, BatchEncoding, TextInput, TruncationStrategy class TapexTokenizer(PreTrainedTokenizer): """ Construct a TAPEX tokenizer. Based on byte-level Byte-Pair-Encoding (BPE). This tokenizer can be used to flatten one or more table(s) and concatenate them with one or more related sentences to be used by TAPEX models. The format that the TAPEX tokenizer creates is the following: sentence col: col1 | col2 | col 3 row 1 : val1 | val2 | val3 row 2 : ... The tokenizer supports a single table + single query, a single table and multiple queries (in which case the table will be duplicated for every query), a single query and multiple tables (in which case the query will be duplicated for every table), and multiple tables and queries. In other words, you can provide a batch of tables + questions to the tokenizer for instance to prepare them for the model. Tokenization itself is based on the BPE algorithm. It is identical to the one used by BART, RoBERTa and GPT-2. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. merges_file (`str`): Path to the merges file. do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. errors (`str`, *optional*, defaults to `"replace"`): Paradigm to follow when decoding bytes to UTF-8. See [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. add_prefix_space (`bool`, *optional*, defaults to `False`): Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. (BART tokenizer detect beginning of words by the preceding space). max_cell_length (`int`, *optional*, defaults to 15): Maximum number of characters per cell when linearizing a table. If this number is exceeded, truncation takes place. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] def __init__(self, vocab_file, merges_file, do_lower_case=True, errors='replace', bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', add_prefix_space=False, max_cell_length=15, **kwargs): bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token with open(vocab_file, encoding='utf-8') as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for k, v in self.encoder.items()} self.errors = errors self.byte_encoder = bytes_to_unicode() self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} with open(merges_file, encoding='utf-8') as merges_handle: bpe_merges = merges_handle.read().split('\n')[1:-1] bpe_merges = [tuple(merge.split()) for merge in bpe_merges] self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) self.cache = {} self.add_prefix_space = add_prefix_space self.do_lower_case = do_lower_case self.pat = re.compile("'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)|\\s+") super().__init__(vocab_file=vocab_file, merges_file=merges_file, do_lower_case=do_lower_case, errors=errors, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, max_cell_length=max_cell_length, **kwargs) self.max_cell_length = max_cell_length self.table_linearize = IndexedRowTableLinearize() def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A TAPEX sequence has the following format: - single sequence: `<s> X </s>` - pair of sequences: `<s> A </s></s> B </s>` Args: token_ids_0 (`list[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + sep + token_ids_1 + sep def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]: """ Args: Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if token_ids_1 is None: return [1] + [0] * len(token_ids_0) + [1] return [1] + [0] * len(token_ids_0) + [1, 1] + [0] * len(token_ids_1) + [1] def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: """ Args: Create a mask from the two sequences passed to be used in a sequence-pair classification task. TAPEX does not: make use of token type ids, therefore a list of zeros is returned. token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): add_prefix_space = kwargs.pop('add_prefix_space', self.add_prefix_space) if (is_split_into_words or add_prefix_space) and (len(text) > 0 and (not text[0].isspace())): text = ' ' + text return (text, kwargs) @property def vocab_size(self): return len(self.encoder) def get_vocab(self): return dict(self.encoder, **self.added_tokens_encoder) def bpe(self, token): if token in self.cache: return self.cache[token] word = tuple(token) pairs = get_pairs(word) if not pairs: return token while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf'))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if word[i] == first and i < len(word) - 1 and (word[i + 1] == second): new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = ' '.join(word) self.cache[token] = word return word def _tokenize(self, text): """Tokenize a string.""" bpe_tokens = [] for token in re.findall(self.pat, text): token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8'))) bpe_tokens.extend((bpe_token for bpe_token in self.bpe(token).split(' '))) return bpe_tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.decoder.get(index) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" text = ''.join(tokens) text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors) return text def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) merge_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']) with open(vocab_file, 'w', encoding='utf-8') as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + '\n') index = 0 with open(merge_file, 'w', encoding='utf-8') as writer: writer.write('#version: 0.2\n') for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning(f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!') index = token_index writer.write(' '.join(bpe_tokens) + '\n') index += 1 return (vocab_file, merge_file) @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPEX_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def __call__(self, table: Union['pd.DataFrame', list['pd.DataFrame']]=None, query: Optional[Union[TextInput, list[TextInput]]]=None, answer: Optional[Union[str, list[str]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: """ Main method to tokenize and prepare for the model one or several table-sequence pair(s). Args: table (`pd.DataFrame`, `list[pd.DataFrame]`): Table(s) containing tabular data. query (`str` or `list[str]`, *optional*): Sentence or batch of sentences related to one or more table(s) to be encoded. Note that the number of sentences must match the number of tables. answer (`str` or `list[str]`, *optional*): Optionally, the corresponding answer to the questions as supervision. """ if table is not None: return self.source_call_func(table=table, query=query, answer=answer, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs) elif answer is not None: return self.target_call_func(answer=answer, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs) else: raise ValueError('You need to provide either a `table` or an `answer`.') def source_call_func(self, table: Union['pd.DataFrame', list['pd.DataFrame']], query: Optional[Union[TextInput, list[TextInput]]]=None, answer: Optional[Union[str, list[str]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: valid_table = False valid_query = False if isinstance(table, pd.DataFrame): valid_table = True elif isinstance(table, (list, tuple)) and isinstance(table[0], pd.DataFrame): valid_table = True if query is None or isinstance(query, str): valid_query = True elif isinstance(query, (list, tuple)): if len(query) == 0 or isinstance(query[0], str): valid_query = True if not valid_table: raise ValueError('table input must of type `pd.DataFrame` (single example), `list[pd.DataFrame]` (batch of examples). ') if not valid_query: raise ValueError('query input must of type `str` (single example), `list[str]` (batch of examples). ') is_batched = isinstance(table, (list, tuple)) or isinstance(query, (list, tuple)) if is_batched: return self.batch_encode_plus(table=table, query=query, answer=answer, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs) else: return self.encode_plus(table=table, query=query, answer=answer, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs) @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPEX_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def batch_encode_plus(self, table: Union['pd.DataFrame', list['pd.DataFrame']], query: Optional[list[TextInput]]=None, answer: Optional[list[str]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Optional[Union[bool, str]]=None, max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: """ <Tip warning={true}> This method is deprecated, `__call__` should be used instead. </Tip> """ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs) return self._batch_encode_plus(table=table, query=query, answer=answer, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs) def _batch_encode_plus(self, table: Union['pd.DataFrame', list['pd.DataFrame']], query: Optional[list[TextInput]]=None, answer: Optional[list[str]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: if return_offsets_mapping: raise NotImplementedError('return_offset_mapping is not available when using Python tokenizers. To use this feature, change your tokenizer to one deriving from transformers.PreTrainedTokenizerFast.') if isinstance(table, pd.DataFrame) and isinstance(query, (list, tuple)): table = [table] * len(query) if isinstance(table, (list, tuple)) and isinstance(query, str): query = [query] * len(table) batch_outputs = self._batch_prepare_for_model(table=table, query=query, answer=answer, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=return_tensors, verbose=verbose) return BatchEncoding(batch_outputs) @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPEX_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def _batch_prepare_for_model(self, table: Union['pd.DataFrame', list['pd.DataFrame']], query: Optional[Union[TextInput, list[TextInput]]]=None, answer: Optional[Union[str, list[str]]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_length: bool=False, verbose: bool=True) -> BatchEncoding: """ This method adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens. """ batch_outputs = {} if answer is None: answer = [None] * len(table) for _table, _query, _answer in zip(table, query, answer): text = self.prepare_table_query(_table, _query, _answer, truncation_strategy=truncation_strategy, max_length=max_length) if self.do_lower_case: text = text.lower() tokens = self.tokenize(text) outputs = self.prepare_for_model(ids=self.convert_tokens_to_ids(tokens), add_special_tokens=add_special_tokens, padding=PaddingStrategy.DO_NOT_PAD.value, truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=None, return_attention_mask=False, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=None, prepend_batch_axis=False, verbose=verbose) for key, value in outputs.items(): if key not in batch_outputs: batch_outputs[key] = [] batch_outputs[key].append(value) batch_outputs = self.pad(batch_outputs, padding=padding_strategy.value, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask) batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors) return batch_outputs @add_end_docstrings(ENCODE_KWARGS_DOCSTRING) def encode(self, table: 'pd.DataFrame', query: Optional[TextInput]=None, answer: Optional[str]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy, TapexTruncationStrategy]=None, max_length: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs) -> list[int]: """ Prepare a table, a string and possible answer for the model. This method does not return token type IDs, attention masks, etc. which are necessary for the model to work correctly. Use this method if you want to build your processing on your own, otherwise refer to `__call__`. """ encoded_inputs = self.encode_plus(table, query=query, answer=answer, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, return_tensors=return_tensors, **kwargs) return encoded_inputs['input_ids'] @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPEX_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def encode_plus(self, table: 'pd.DataFrame', query: Optional[TextInput]=None, answer: Optional[str]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Optional[Union[bool, str]]=None, max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs) return self._encode_plus(table=table, query=query, answer=answer, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs) def _encode_plus(self, table: 'pd.DataFrame', query: Optional[TextInput]=None, answer: Optional[str]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: if return_offsets_mapping: raise NotImplementedError('return_offset_mapping is not available when using Python tokenizers. To use this feature, change your tokenizer to one deriving from transformers.PreTrainedTokenizerFast. More information on available tokenizers at https://github.com/huggingface/transformers/pull/2674') text = self.prepare_table_query(table, query, answer, truncation_strategy=truncation_strategy, max_length=max_length) if self.do_lower_case: text = text.lower() tokens = self.tokenize(text) return self.prepare_for_model(ids=self.convert_tokens_to_ids(tokens), add_special_tokens=add_special_tokens, padding=padding_strategy.value, truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, prepend_batch_axis=True, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, verbose=verbose) def target_call_func(self, answer: Union[str, list[str]], add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: """ The method tokenizes and prepares the answer label for the model. Args: answer (`str` or `list[str]`): Corresponding answer supervision to the queries for training the model. """ is_batched = isinstance(answer, (list, tuple)) if is_batched: return self.target_batch_encode_plus(answer=answer, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs) else: return self.target_encode_plus(answer=answer, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs) def target_batch_encode_plus(self, answer: list[str], add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Optional[Union[bool, str]]=None, max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: """ Prepare answer strings for the model. Args: answer `list[str]`: Corresponding answer supervision to the queries for training the model. """ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs) return self._target_batch_encode_plus(answer=answer, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs) def _target_batch_encode_plus(self, answer: list[str], add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: batch_outputs = {} for text in answer: if self.do_lower_case: text = text.lower() tokens = self.tokenize(text) outputs = self.prepare_for_model(ids=self.convert_tokens_to_ids(tokens), add_special_tokens=add_special_tokens, padding=PaddingStrategy.DO_NOT_PAD.value, truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=None, return_attention_mask=False, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=None, prepend_batch_axis=False, verbose=verbose) for key, value in outputs.items(): if key not in batch_outputs: batch_outputs[key] = [] batch_outputs[key].append(value) batch_outputs = self.pad(batch_outputs, padding=padding_strategy.value, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask) batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors) return BatchEncoding(batch_outputs) def target_encode(self, answer: str, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy, TapexTruncationStrategy]=None, max_length: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs) -> list[int]: """ Prepare the answer string for the model. This method does not return token type IDs, attention masks, etc. which are necessary for the model to work correctly. Use this method if you want to build your processing on your own, otherwise refer to `__call__`. Args: answer `str`: Corresponding answer supervision to the queries for training the model """ encoded_outputs = self.target_encode_plus(answer=answer, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, return_tensors=return_tensors, **kwargs) return encoded_outputs['input_ids'] def target_encode_plus(self, answer: str, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Optional[Union[bool, str]]=None, max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: """ Prepare a answer string for the model. Args: answer `str`: Corresponding answer supervision to the queries for training the model. """ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs) return self._target_encode_plus(answer=answer, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs) def _target_encode_plus(self, answer: str, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: if return_offsets_mapping: raise NotImplementedError('return_offset_mapping is not available when using Python tokenizers. To use this feature, change your tokenizer to one deriving from transformers.PreTrainedTokenizerFast. More information on available tokenizers at https://github.com/huggingface/transformers/pull/2674') text = answer if self.do_lower_case: text = text.lower() tokens = self.tokenize(text) return self.prepare_for_model(ids=self.convert_tokens_to_ids(tokens), add_special_tokens=add_special_tokens, padding=padding_strategy.value, truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, prepend_batch_axis=True, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, verbose=verbose) def prepare_table_query(self, table, query, answer=None, truncation_strategy=Union[str, TruncationStrategy, TapexTruncationStrategy], max_length=None): """ This method can be used to linearize a table and add a corresponding query. Optionally, it also handles truncation of the table (cells). An answer can be provided for more precise truncation. """ if not table.empty: table_content = {'header': list(table.columns), 'rows': [list(row.values) for i, row in table.iterrows()]} self.truncate_table_cells(table_content, query, answer) if truncation_strategy == TapexTruncationStrategy.DROP_ROWS_TO_FIT: self.truncate_table_rows(table_content, query, answer, max_length=max_length) linear_table = self.table_linearize.process_table(table_content) else: linear_table = '' if linear_table == '': logger.warning('You provide an empty table, or all cells contain much tokens (e.g., >= 1024 tokens). ' + f'Please carefully check the corresponding table with the query : {query}.') if query == '': logger.warning('You provide nothing to query with respect to the table.') separator = ' ' if query and linear_table else '' joint_input = query + separator + linear_table if query else linear_table return joint_input def truncate_table_cells(self, table_content: dict, question: str, answer: list): cell_mapping = {} for row in table_content['rows']: for i, cell in enumerate(row): truncate_cell = self.truncate_cell(cell) if truncate_cell is not None: cell_mapping[cell] = truncate_cell row[i] = truncate_cell if answer is not None: for i, case in enumerate(answer): if case in cell_mapping: answer[i] = cell_mapping[case] def truncate_cell(self, cell_value): if isinstance(cell_value, (int, float)): return cell_value if cell_value.strip() != '': try_tokens = self.tokenize(cell_value) if len(try_tokens) >= self.max_cell_length: retain_tokens = try_tokens[:self.max_cell_length] retain_cell_value = self.convert_tokens_to_string(retain_tokens) return retain_cell_value else: return None else: return cell_value def truncate_table_rows(self, table_content: dict, question: str, answer: Optional[Union[str, list[str]]]=None, max_length=None): """ Args: table_content: {"header": xxx, "rows": xxx, "id" (Optionally): xxx} question: natural language sentence answer: if for training, is the supervision; otherwise will be empty """ delete_ratio, remain_token_len = self.estimate_delete_ratio(table_content, question, max_length) self.delete_unrelated_rows(table_content, question, answer, delete_ratio) maximum_keep_rows = 0 for ind, row_example in enumerate(table_content['rows']): value_string = self.table_linearize.process_row(row_example, ind + 1) value_token_len = len(self.tokenize(value_string)) if value_token_len > remain_token_len: break remain_token_len -= value_token_len maximum_keep_rows += 1 del table_content['rows'][maximum_keep_rows:] def estimate_delete_ratio(self, table_content: dict, question: str, max_length=None): if 'header' not in table_content or 'rows' not in table_content: raise ValueError("The table content should contain both 'header' and 'rows' keys.") question_tokens = self.tokenize(question, add_special_tokens=True) header_string = self.table_linearize.process_header(table_content['header']) header_tokens = self.tokenize(header_string, add_special_tokens=False) used_token_len = len(question_tokens) + len(header_tokens) remain_token_len = max_length - used_token_len value_string = '' for _, row_example in enumerate(table_content['rows']): value_string += self.table_linearize.process_row(row_example, 100) + ' ' value_token_len = len(self.tokenize(value_string)) if value_token_len < remain_token_len: return (0.0, remain_token_len) else: return (1.0 - remain_token_len / value_token_len, remain_token_len) def delete_unrelated_rows(self, table_content: dict, question: str, answer: list, delete_ratio: float): """ The argument answer is used only during training. """ truncated_unrelated_indices = [] related_indices = [] if answer is None or len(answer) == 0: answer_set = set() else: answer_set = {ans_ex.lower() for ans_ex in answer} if question is not None: answer_set.update(question.split()) question_set = set(question.strip('?!.,').split(' ')) row_max_len = len(table_content['rows']) for _row_idx, row in enumerate(table_content['rows']): lower_row = {str(cell).lower() for cell in row} if len(lower_row & answer_set) == 0 and len(lower_row & question_set) == 0: truncated_unrelated_indices.append(_row_idx) else: related_indices.extend([_row_idx - 2, _row_idx - 1, _row_idx, _row_idx + 1, _row_idx + 2]) truncated_unrelated_indices = [_row_idx for _row_idx in truncated_unrelated_indices if _row_idx not in related_indices] drop_items = min(len(truncated_unrelated_indices), int(len(table_content['rows']) * delete_ratio)) drop_row_indices = random.choices(truncated_unrelated_indices, k=drop_items) for _row_idx in reversed(range(row_max_len)): if _row_idx in drop_row_indices: del table_content['rows'][_row_idx] if 'id' in table_content and len(drop_row_indices) > 0: logger.warning('Delete {:.2f} rows in table {}'.format(len(drop_row_indices), table_content['id']))
class TapexTokenizer(PreTrainedTokenizer): ''' Construct a TAPEX tokenizer. Based on byte-level Byte-Pair-Encoding (BPE). This tokenizer can be used to flatten one or more table(s) and concatenate them with one or more related sentences to be used by TAPEX models. The format that the TAPEX tokenizer creates is the following: sentence col: col1 | col2 | col 3 row 1 : val1 | val2 | val3 row 2 : ... The tokenizer supports a single table + single query, a single table and multiple queries (in which case the table will be duplicated for every query), a single query and multiple tables (in which case the query will be duplicated for every table), and multiple tables and queries. In other words, you can provide a batch of tables + questions to the tokenizer for instance to prepare them for the model. Tokenization itself is based on the BPE algorithm. It is identical to the one used by BART, RoBERTa and GPT-2. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. merges_file (`str`): Path to the merges file. do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. errors (`str`, *optional*, defaults to `"replace"`): Paradigm to follow when decoding bytes to UTF-8. See [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. add_prefix_space (`bool`, *optional*, defaults to `False`): Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. (BART tokenizer detect beginning of words by the preceding space). max_cell_length (`int`, *optional*, defaults to 15): Maximum number of characters per cell when linearizing a table. If this number is exceeded, truncation takes place. ''' def __init__(self, vocab_file, merges_file, do_lower_case=True, errors='replace', bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', add_prefix_space=False, max_cell_length=15, **kwargs): pass def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: ''' Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A TAPEX sequence has the following format: - single sequence: `<s> X </s>` - pair of sequences: `<s> A </s></s> B </s>` Args: token_ids_0 (`list[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. ''' pass def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]: ''' Args: Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. ''' pass def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: ''' Args: Create a mask from the two sequences passed to be used in a sequence-pair classification task. TAPEX does not: make use of token type ids, therefore a list of zeros is returned. token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of zeros. ''' pass def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): pass @property def vocab_size(self): pass def get_vocab(self): pass def bpe(self, token): pass def _tokenize(self, text): '''Tokenize a string.''' pass def _convert_token_to_id(self, token): '''Converts a token (str) in an id using the vocab.''' pass def _convert_id_to_token(self, index): '''Converts an index (integer) in a token (str) using the vocab.''' pass def convert_tokens_to_string(self, tokens): '''Converts a sequence of tokens (string) in a single string.''' pass def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: pass @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPEX_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def __call__(self, table: Union['pd.DataFrame', list['pd.DataFrame']]=None, query: Optional[Union[TextInput, list[TextInput]]]=None, answer: Optional[Union[str, list[str]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: ''' Main method to tokenize and prepare for the model one or several table-sequence pair(s). Args: table (`pd.DataFrame`, `list[pd.DataFrame]`): Table(s) containing tabular data. query (`str` or `list[str]`, *optional*): Sentence or batch of sentences related to one or more table(s) to be encoded. Note that the number of sentences must match the number of tables. answer (`str` or `list[str]`, *optional*): Optionally, the corresponding answer to the questions as supervision. ''' pass def source_call_func(self, table: Union['pd.DataFrame', list['pd.DataFrame']], query: Optional[Union[TextInput, list[TextInput]]]=None, answer: Optional[Union[str, list[str]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: pass @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPEX_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def batch_encode_plus(self, table: Union['pd.DataFrame', list['pd.DataFrame']], query: Optional[list[TextInput]]=None, answer: Optional[list[str]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Optional[Union[bool, str]]=None, max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: ''' <Tip warning={true}> This method is deprecated, `__call__` should be used instead. </Tip> ''' pass def _batch_encode_plus(self, table: Union['pd.DataFrame', list['pd.DataFrame']], query: Optional[list[TextInput]]=None, answer: Optional[list[str]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: pass @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPEX_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def _batch_prepare_for_model(self, table: Union['pd.DataFrame', list['pd.DataFrame']], query: Optional[Union[TextInput, list[TextInput]]]=None, answer: Optional[Union[str, list[str]]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_length: bool=False, verbose: bool=True) -> BatchEncoding: ''' This method adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens. ''' pass @add_end_docstrings(ENCODE_KWARGS_DOCSTRING) def encode(self, table: 'pd.DataFrame', query: Optional[TextInput]=None, answer: Optional[str]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy, TapexTruncationStrategy]=None, max_length: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs) -> list[int]: ''' Prepare a table, a string and possible answer for the model. This method does not return token type IDs, attention masks, etc. which are necessary for the model to work correctly. Use this method if you want to build your processing on your own, otherwise refer to `__call__`. ''' pass @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPEX_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def encode_plus(self, table: 'pd.DataFrame', query: Optional[TextInput]=None, answer: Optional[str]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Optional[Union[bool, str]]=None, max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: pass def _encode_plus(self, table: 'pd.DataFrame', query: Optional[TextInput]=None, answer: Optional[str]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: pass def target_call_func(self, answer: Union[str, list[str]], add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: ''' The method tokenizes and prepares the answer label for the model. Args: answer (`str` or `list[str]`): Corresponding answer supervision to the queries for training the model. ''' pass def target_batch_encode_plus(self, answer: list[str], add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Optional[Union[bool, str]]=None, max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: ''' Prepare answer strings for the model. Args: answer `list[str]`: Corresponding answer supervision to the queries for training the model. ''' pass def _target_batch_encode_plus(self, answer: list[str], add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: pass def target_encode(self, answer: str, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy, TapexTruncationStrategy]=None, max_length: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs) -> list[int]: ''' Prepare the answer string for the model. This method does not return token type IDs, attention masks, etc. which are necessary for the model to work correctly. Use this method if you want to build your processing on your own, otherwise refer to `__call__`. Args: answer `str`: Corresponding answer supervision to the queries for training the model ''' pass def target_encode_plus(self, answer: str, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Optional[Union[bool, str]]=None, max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: ''' Prepare a answer string for the model. Args: answer `str`: Corresponding answer supervision to the queries for training the model. ''' pass def _target_encode_plus(self, answer: str, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: pass def prepare_table_query(self, table, query, answer=None, truncation_strategy=Union[str, TruncationStrategy, TapexTruncationStrategy], max_length=None): ''' This method can be used to linearize a table and add a corresponding query. Optionally, it also handles truncation of the table (cells). An answer can be provided for more precise truncation. ''' pass def truncate_table_cells(self, table_content: dict, question: str, answer: list): pass def truncate_cell(self, cell_value): pass def truncate_table_rows(self, table_content: dict, question: str, answer: Optional[Union[str, list[str]]]=None, max_length=None): ''' Args: table_content: {"header": xxx, "rows": xxx, "id" (Optionally): xxx} question: natural language sentence answer: if for training, is the supervision; otherwise will be empty ''' pass def estimate_delete_ratio(self, table_content: dict, question: str, max_length=None): pass def delete_unrelated_rows(self, table_content: dict, question: str, answer: list, delete_ratio: float): ''' The argument answer is used only during training. ''' pass
40
19
36
2
29
5
3
0.22
1
20
4
0
33
12
33
122
1,291
118
968
411
655
216
313
128
279
9
3
3
113
1,965
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/tapex/tokenization_tapex.py
transformers.models.deprecated.tapex.tokenization_tapex.TapexTruncationStrategy
from ....file_utils import ExplicitEnum, PaddingStrategy, TensorType, add_end_docstrings, is_pandas_available class TapexTruncationStrategy(ExplicitEnum): """ Possible values for the `truncation` argument in [`~TapasTokenizer.__call__`]. Useful for tab-completion in an IDE. """ DROP_ROWS_TO_FIT = 'drop_rows_to_fit'
class TapexTruncationStrategy(ExplicitEnum): ''' Possible values for the `truncation` argument in [`~TapasTokenizer.__call__`]. Useful for tab-completion in an IDE. ''' pass
1
1
0
0
0
0
0
1.5
1
0
0
0
0
0
0
0
6
1
2
2
1
3
2
2
1
0
1
0
0
1,966
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/trajectory_transformer/configuration_trajectory_transformer.py
transformers.models.deprecated.trajectory_transformer.configuration_trajectory_transformer.TrajectoryTransformerConfig
from ....configuration_utils import PretrainedConfig class TrajectoryTransformerConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`TrajectoryTransformerModel`]. It is used to instantiate an TrajectoryTransformer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the TrajectoryTransformer [CarlCochet/trajectory-transformer-halfcheetah-medium-v2](https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 100): Vocabulary size of the TrajectoryTransformer model. Defines the number of different tokens that can be represented by the `trajectories` passed when calling [`TrajectoryTransformerModel`] action_weight (`int`, *optional*, defaults to 5): Weight of the action in the loss function reward_weight (`int`, *optional*, defaults to 1): Weight of the reward in the loss function value_weight (`int`, *optional*, defaults to 1): Weight of the value in the loss function block_size (`int`, *optional*, defaults to 249): Size of the blocks in the trajectory transformer. action_dim (`int`, *optional*, defaults to 6): Dimension of the action space. observation_dim (`int`, *optional*, defaults to 17): Dimension of the observation space. transition_dim (`int`, *optional*, defaults to 25): Dimension of the transition space. n_layer (`int`, *optional*, defaults to 4): Number of hidden layers in the Transformer encoder. n_head (`int`, *optional*, defaults to 4): Number of attention heads for each attention layer in the Transformer encoder. n_embd (`int`, *optional*, defaults to 128): Dimensionality of the embeddings and hidden states. resid_pdrop (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. embd_pdrop (`int`, *optional*, defaults to 0.1): The dropout ratio for the embeddings. attn_pdrop (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. kaiming_initializer_range (`float, *optional*, defaults to 1): A coefficient scaling the negative slope of the kaiming initializer rectifier for EinLinear layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. Example: ```python >>> from transformers import TrajectoryTransformerConfig, TrajectoryTransformerModel >>> # Initializing a TrajectoryTransformer CarlCochet/trajectory-transformer-halfcheetah-medium-v2 style configuration >>> configuration = TrajectoryTransformerConfig() >>> # Initializing a model (with random weights) from the CarlCochet/trajectory-transformer-halfcheetah-medium-v2 style configuration >>> model = TrajectoryTransformerModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'trajectory_transformer' keys_to_ignore_at_inference = ['past_key_values'] attribute_map = {'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer'} def __init__(self, vocab_size=100, action_weight=5, reward_weight=1, value_weight=1, block_size=249, action_dim=6, observation_dim=17, transition_dim=25, n_layer=4, n_head=4, n_embd=128, embd_pdrop=0.1, attn_pdrop=0.1, resid_pdrop=0.1, learning_rate=0.0006, max_position_embeddings=512, initializer_range=0.02, layer_norm_eps=1e-12, kaiming_initializer_range=1, use_cache=True, pad_token_id=1, bos_token_id=50256, eos_token_id=50256, **kwargs): self.vocab_size = vocab_size self.action_weight = action_weight self.reward_weight = reward_weight self.value_weight = value_weight self.max_position_embeddings = max_position_embeddings self.block_size = block_size self.action_dim = action_dim self.observation_dim = observation_dim self.transition_dim = transition_dim self.learning_rate = learning_rate self.n_layer = n_layer self.n_head = n_head self.n_embd = n_embd self.embd_pdrop = embd_pdrop self.attn_pdrop = attn_pdrop self.resid_pdrop = resid_pdrop self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.kaiming_initializer_range = kaiming_initializer_range self.use_cache = use_cache super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
class TrajectoryTransformerConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`TrajectoryTransformerModel`]. It is used to instantiate an TrajectoryTransformer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the TrajectoryTransformer [CarlCochet/trajectory-transformer-halfcheetah-medium-v2](https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 100): Vocabulary size of the TrajectoryTransformer model. Defines the number of different tokens that can be represented by the `trajectories` passed when calling [`TrajectoryTransformerModel`] action_weight (`int`, *optional*, defaults to 5): Weight of the action in the loss function reward_weight (`int`, *optional*, defaults to 1): Weight of the reward in the loss function value_weight (`int`, *optional*, defaults to 1): Weight of the value in the loss function block_size (`int`, *optional*, defaults to 249): Size of the blocks in the trajectory transformer. action_dim (`int`, *optional*, defaults to 6): Dimension of the action space. observation_dim (`int`, *optional*, defaults to 17): Dimension of the observation space. transition_dim (`int`, *optional*, defaults to 25): Dimension of the transition space. n_layer (`int`, *optional*, defaults to 4): Number of hidden layers in the Transformer encoder. n_head (`int`, *optional*, defaults to 4): Number of attention heads for each attention layer in the Transformer encoder. n_embd (`int`, *optional*, defaults to 128): Dimensionality of the embeddings and hidden states. resid_pdrop (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. embd_pdrop (`int`, *optional*, defaults to 0.1): The dropout ratio for the embeddings. attn_pdrop (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. kaiming_initializer_range (`float, *optional*, defaults to 1): A coefficient scaling the negative slope of the kaiming initializer rectifier for EinLinear layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. Example: ```python >>> from transformers import TrajectoryTransformerConfig, TrajectoryTransformerModel >>> # Initializing a TrajectoryTransformer CarlCochet/trajectory-transformer-halfcheetah-medium-v2 style configuration >>> configuration = TrajectoryTransformerConfig() >>> # Initializing a model (with random weights) from the CarlCochet/trajectory-transformer-halfcheetah-medium-v2 style configuration >>> model = TrajectoryTransformerModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, vocab_size=100, action_weight=5, reward_weight=1, value_weight=1, block_size=249, action_dim=6, observation_dim=17, transition_dim=25, n_layer=4, n_head=4, n_embd=128, embd_pdrop=0.1, attn_pdrop=0.1, resid_pdrop=0.1, learning_rate=0.0006, max_position_embeddings=512, initializer_range=0.02, layer_norm_eps=1e-12, kaiming_initializer_range=1, use_cache=True, pad_token_id=1, bos_token_id=50256, eos_token_id=50256, **kwargs): pass
2
1
48
0
48
0
1
1.14
1
1
0
0
1
20
1
33
129
9
56
51
28
64
26
25
24
1
2
0
1
1,967
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/trajectory_transformer/convert_trajectory_transformer_original_pytorch_checkpoint_to_pytorch.py
transformers.models.deprecated.trajectory_transformer.convert_trajectory_transformer_original_pytorch_checkpoint_to_pytorch.Parser
import trajectory.utils as utils class Parser(utils.Parser): dataset: str = 'halfcheetah-medium-expert-v2' config: str = 'config.offline'
class Parser(utils.Parser): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
3
0
3
3
2
0
3
3
2
0
1
0
0
1,968
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/trajectory_transformer/modeling_trajectory_transformer.py
transformers.models.deprecated.trajectory_transformer.modeling_trajectory_transformer.Block
from ....modeling_layers import GradientCheckpointingLayer from torch import nn import torch from typing import Optional, Union class Block(GradientCheckpointingLayer): def __init__(self, config): super().__init__() self.ln1 = nn.LayerNorm(config.n_embd) self.ln2 = nn.LayerNorm(config.n_embd) self.attn = CausalSelfAttention(config) self.l1 = nn.Linear(config.n_embd, 4 * config.n_embd) self.act = nn.GELU() self.l2 = nn.Linear(4 * config.n_embd, config.n_embd) self.drop = nn.Dropout(config.resid_pdrop) def forward(self, hidden_states: Optional[tuple[torch.FloatTensor]], layer_past: Optional[tuple[torch.Tensor]]=None, use_cache: Optional[bool]=False, output_attentions: Optional[bool]=False): residual = hidden_states hidden_states = self.ln1(hidden_states) attn_outputs = self.attn(hidden_states, layer_past=layer_past, use_cache=use_cache, output_attentions=output_attentions) attn_output = attn_outputs[0] outputs = attn_outputs[1:] hidden_states = attn_output + residual residual = hidden_states hidden_states = self.ln2(hidden_states) hidden_states = self.l1(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.l2(hidden_states) hidden_states = residual + self.drop(hidden_states) if use_cache: outputs = (hidden_states,) + outputs else: outputs = (hidden_states,) + outputs[1:] return outputs
class Block(GradientCheckpointingLayer): def __init__(self, config): pass def forward(self, hidden_states: Optional[tuple[torch.FloatTensor]], layer_past: Optional[tuple[torch.Tensor]]=None, use_cache: Optional[bool]=False, output_attentions: Optional[bool]=False): pass
3
0
21
3
18
1
2
0.03
1
4
1
0
2
7
2
12
43
6
36
20
27
1
27
14
24
2
1
1
3
1,969
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/trajectory_transformer/modeling_trajectory_transformer.py
transformers.models.deprecated.trajectory_transformer.modeling_trajectory_transformer.CausalSelfAttention
import torch import math from typing import Optional, Union from torch.nn import functional as F from torch import nn class CausalSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.n_embd % config.n_head != 0: raise ValueError(f'n_head ({config.n_head}) should be a divisor of n_embd ({config.n_embd})') self.key = nn.Linear(config.n_embd, config.n_embd) self.query = nn.Linear(config.n_embd, config.n_embd) self.value = nn.Linear(config.n_embd, config.n_embd) self.attn_drop = nn.Dropout(config.attn_pdrop) self.resid_drop = nn.Dropout(config.resid_pdrop) self.proj = nn.Linear(config.n_embd, config.n_embd) self.register_buffer('mask', torch.tril(torch.ones(config.block_size, config.block_size)).view(1, 1, config.block_size, config.block_size), persistent=False) joined_dim = config.observation_dim + config.action_dim + 2 self.mask.squeeze()[:, joined_dim - 1::joined_dim] = 0 self.n_head = config.n_head def forward(self, hidden_states: Optional[tuple[torch.FloatTensor]], layer_past: Optional[tuple[torch.Tensor]]=None, use_cache: Optional[bool]=False, output_attentions: Optional[bool]=False): batch_size, sequence_length, embedding_dim = hidden_states.size() key = self.key(hidden_states).view(batch_size, sequence_length, self.n_head, embedding_dim // self.n_head).transpose(1, 2) query = self.query(hidden_states).view(batch_size, sequence_length, self.n_head, embedding_dim // self.n_head).transpose(1, 2) value = self.value(hidden_states).view(batch_size, sequence_length, self.n_head, embedding_dim // self.n_head).transpose(1, 2) if layer_past is not None: past_key, past_value = layer_past key = torch.cat((past_key, key), dim=-2) value = torch.cat((past_value, value), dim=-2) if use_cache is True: present = (key, value) else: present = None attn_weights = torch.matmul(query, key.transpose(-2, -1)) * (1.0 / math.sqrt(key.size(-1))) attn_weights = attn_weights.masked_fill(self.mask[:, :, :sequence_length, :sequence_length] == 0, torch.finfo(attn_weights.dtype).min) attn_weights = F.softmax(attn_weights, dim=-1) self._attn_map = attn_weights.clone() attn_weights = self.attn_drop(attn_weights) output = torch.matmul(attn_weights, value) output = output.transpose(1, 2).contiguous().view(batch_size, sequence_length, embedding_dim) output = self.resid_drop(self.proj(output)) outputs = (output, present) if output_attentions: outputs += (attn_weights,) return outputs
class CausalSelfAttention(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: Optional[tuple[torch.FloatTensor]], layer_past: Optional[tuple[torch.Tensor]]=None, use_cache: Optional[bool]=False, output_attentions: Optional[bool]=False): pass
3
0
46
8
33
6
3
0.18
1
4
0
0
2
8
2
12
94
16
66
27
57
12
39
21
36
4
1
1
6
1,970
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/trajectory_transformer/modeling_trajectory_transformer.py
transformers.models.deprecated.trajectory_transformer.modeling_trajectory_transformer.EinLinear
import math from torch import nn import torch class EinLinear(nn.Module): def __init__(self, n_models, in_features, out_features, bias): super().__init__() self.n_models = n_models self.out_features = out_features self.in_features = in_features self.weight = nn.Parameter(torch.Tensor(n_models, out_features, in_features)) if bias: self.bias = nn.Parameter(torch.Tensor(n_models, out_features)) else: self.register_parameter('bias', None) def reset_parameters(self): for i in range(self.n_models): nn.init.kaiming_uniform_(self.weight[i], a=math.sqrt(5)) if self.bias is not None: fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight[i]) bound = 1 / math.sqrt(fan_in) nn.init.uniform_(self.bias[i], -bound, bound) def forward(self, input): """ Args: input (`torch.FloatTensor` of shape `(B, n_models, input_dim)`): The input to the layer. """ output = torch.einsum('eoi,bei->beo', self.weight, input) if self.bias is not None: raise RuntimeError() return output
class EinLinear(nn.Module): def __init__(self, n_models, in_features, out_features, bias): pass def reset_parameters(self): pass def forward(self, input): ''' Args: input (`torch.FloatTensor` of shape `(B, n_models, input_dim)`): The input to the layer. ''' pass
4
1
9
0
7
2
2
0.26
1
4
0
0
3
5
3
13
31
2
23
13
19
6
22
13
18
3
1
2
7
1,971
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/trajectory_transformer/modeling_trajectory_transformer.py
transformers.models.deprecated.trajectory_transformer.modeling_trajectory_transformer.TrajectoryTransformerModel
from torch import nn from typing import Optional, Union import numpy as np from ....cache_utils import Cache import torch from torch.nn import functional as F from ....utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings @add_start_docstrings('The bare TrajectoryTransformer Model transformer outputting raw hidden-states without any specific head on top.', TRAJECTORY_TRANSFORMER_START_DOCSTRING) class TrajectoryTransformerModel(TrajectoryTransformerPreTrainedModel): """the full GPT language model, with a context size of block_size""" def __init__(self, config): super().__init__(config) self.tok_emb = nn.Embedding(config.vocab_size * config.transition_dim + 1, config.n_embd) self.pos_emb = nn.Parameter(torch.zeros(1, config.block_size, config.n_embd)) self.drop = nn.Dropout(config.embd_pdrop) self.blocks = nn.ModuleList([Block(config) for _ in range(config.n_layer)]) self.ln_f = nn.LayerNorm(config.n_embd) self.head = EinLinear(config.transition_dim, config.n_embd, config.vocab_size + 1, bias=False) self.vocab_size = config.vocab_size self.stop_token = config.vocab_size * config.transition_dim self.block_size = config.block_size self.observation_dim = config.observation_dim self.action_dim = config.action_dim self.transition_dim = config.transition_dim self.embedding_dim = config.n_embd self.action_weight = config.action_weight self.reward_weight = config.reward_weight self.value_weight = config.value_weight self.gradient_checkpointing = False self.post_init() def get_block_size(self): return self.block_size def offset_tokens(self, trajectories): _, sequence_length = trajectories.shape n_states = int(np.ceil(sequence_length / self.transition_dim)) offsets = torch.arange(self.transition_dim) * self.vocab_size offsets = offsets.repeat(n_states).to(trajectories.device) offset_trajectories = trajectories + offsets[:sequence_length] offset_trajectories[trajectories == self.vocab_size] = self.stop_token return offset_trajectories def pad_to_full_observation(self, hidden_states): batch_size, sequence_length, _ = hidden_states.shape n_pad = (self.transition_dim - sequence_length % self.transition_dim) % self.transition_dim padding = torch.zeros(batch_size, n_pad, self.embedding_dim, device=hidden_states.device) hidden_states_pad = torch.cat([hidden_states, padding], dim=1) hidden_states_pad = hidden_states_pad.view(-1, self.transition_dim, self.embedding_dim) return (hidden_states_pad, n_pad) @add_start_docstrings_to_model_forward(TRAJECTORY_TRANSFORMER_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=TrajectoryTransformerOutput, config_class=_CONFIG_FOR_DOC) def forward(self, trajectories: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, targets: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], TrajectoryTransformerOutput]: """ Returns: Examples: ```python >>> from transformers import TrajectoryTransformerModel >>> import torch >>> model = TrajectoryTransformerModel.from_pretrained( ... "CarlCochet/trajectory-transformer-halfcheetah-medium-v2" ... ) >>> model.to(device) >>> model.eval() >>> observations_dim, action_dim, batch_size = 17, 6, 256 >>> seq_length = observations_dim + action_dim + 1 >>> trajectories = torch.LongTensor([np.random.permutation(self.seq_length) for _ in range(batch_size)]).to( ... device ... ) >>> targets = torch.LongTensor([np.random.permutation(self.seq_length) for _ in range(batch_size)]).to(device) >>> outputs = model( ... trajectories, ... targets=targets, ... use_cache=True, ... output_attentions=True, ... output_hidden_states=True, ... return_dict=True, ... ) ``` """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states if past_key_values is None: past_key_values = tuple([None] * len(self.blocks)) batch_size, sequence_length = trajectories.size() if sequence_length > self.block_size: raise ValueError('Cannot forward, model block size is exhausted.') offset_trajectories = self.offset_tokens(trajectories) token_embeddings = self.tok_emb(offset_trajectories) position_embeddings = self.pos_emb[:, :sequence_length, :] hidden_states = self.drop(token_embeddings + position_embeddings) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False presents = () if use_cache else None all_self_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for i, (block, layer_past) in enumerate(zip(self.blocks, past_key_values)): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = block(hidden_states, layer_past, use_cache, output_attentions) hidden_states = outputs[0] if use_cache is True: presents = presents + (outputs[1],) if output_attentions: all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) hidden_state = self.ln_f(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) hidden_states_pad, n_pad = self.pad_to_full_observation(hidden_state) logits = self.head(hidden_states_pad) logits = logits.reshape(batch_size, sequence_length + n_pad, self.vocab_size + 1) logits = logits[:, :sequence_length] if targets is not None: loss = F.cross_entropy(logits.reshape(-1, logits.size(-1)), targets.view(-1), reduction='none') if self.action_weight != 1 or self.reward_weight != 1 or self.value_weight != 1: n_states = int(np.ceil(sequence_length / self.transition_dim)) weights = torch.cat([torch.ones(self.observation_dim, device=trajectories.device), torch.ones(self.action_dim, device=trajectories.device) * self.action_weight, torch.ones(1, device=trajectories.device) * self.reward_weight, torch.ones(1, device=trajectories.device) * self.value_weight]) weights = weights.repeat(n_states) weights = weights[1:].repeat(batch_size, 1) loss = loss * weights.view(-1) loss = (loss * attention_mask.view(-1)).mean() else: loss = None if not return_dict: return tuple((v for v in [loss, logits, presents, all_hidden_states, all_self_attentions] if v is not None)) return TrajectoryTransformerOutput(loss=loss, logits=logits, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_self_attentions)
@add_start_docstrings('The bare TrajectoryTransformer Model transformer outputting raw hidden-states without any specific head on top.', TRAJECTORY_TRANSFORMER_START_DOCSTRING) class TrajectoryTransformerModel(TrajectoryTransformerPreTrainedModel): '''the full GPT language model, with a context size of block_size''' def __init__(self, config): pass def get_block_size(self): pass def offset_tokens(self, trajectories): pass def pad_to_full_observation(self, hidden_states): pass @add_start_docstrings_to_model_forward(TRAJECTORY_TRANSFORMER_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=TrajectoryTransformerOutput, config_class=_CONFIG_FOR_DOC) def forward(self, trajectories: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, targets: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], TrajectoryTransformerOutput]: ''' Returns: Examples: ```python >>> from transformers import TrajectoryTransformerModel >>> import torch >>> model = TrajectoryTransformerModel.from_pretrained( ... "CarlCochet/trajectory-transformer-halfcheetah-medium-v2" ... ) >>> model.to(device) >>> model.eval() >>> observations_dim, action_dim, batch_size = 17, 6, 256 >>> seq_length = observations_dim + action_dim + 1 >>> trajectories = torch.LongTensor([np.random.permutation(self.seq_length) for _ in range(batch_size)]).to( ... device ... ) >>> targets = torch.LongTensor([np.random.permutation(self.seq_length) for _ in range(batch_size)]).to(device) >>> outputs = model( ... trajectories, ... targets=targets, ... use_cache=True, ... output_attentions=True, ... output_hidden_states=True, ... return_dict=True, ... ) ``` ''' pass
9
2
39
7
24
8
5
0.31
1
12
3
0
5
17
5
135
206
42
127
60
107
39
88
47
82
20
3
2
24
1,972
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/trajectory_transformer/modeling_trajectory_transformer.py
transformers.models.deprecated.trajectory_transformer.modeling_trajectory_transformer.TrajectoryTransformerOutput
import torch from ....cache_utils import Cache from typing import Optional, Union from dataclasses import dataclass from ....utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings @dataclass class TrajectoryTransformerOutput(ModelOutput): """ Base class for model's outputs that also contains a pooling of the last hidden states. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`tuple[tuple[torch.Tensor]]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of length `config.n_layers`, containing tuples of tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. GPT2Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None past_key_values: Optional[Cache] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None
@dataclass class TrajectoryTransformerOutput(ModelOutput): ''' Base class for model's outputs that also contains a pooling of the last hidden states. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`tuple[tuple[torch.Tensor]]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of length `config.n_layers`, containing tuples of tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. GPT2Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. ''' pass
2
1
0
0
0
0
0
3.33
1
0
0
0
0
0
0
0
28
2
6
6
5
20
6
6
5
0
1
0
0
1,973
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/trajectory_transformer/modeling_trajectory_transformer.py
transformers.models.deprecated.trajectory_transformer.modeling_trajectory_transformer.TrajectoryTransformerPreTrainedModel
from ....modeling_utils import PreTrainedModel from torch import nn from .configuration_trajectory_transformer import TrajectoryTransformerConfig import math class TrajectoryTransformerPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config: TrajectoryTransformerConfig base_model_prefix = 'trajectory_transformer' main_input_name = 'trajectories' supports_gradient_checkpointing = True def _init_weights(self, module): if isinstance(module, (nn.Linear, nn.Embedding)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, EinLinear): for i in range(module.n_models): nn.init.kaiming_uniform_(module.weight[i], a=math.sqrt(5) / self.config.kaiming_initializer_range) if module.bias is not None: fan_in, _ = nn.init._calculate_fan_in_and_fan_out(module.weight[i]) bound = 1 / math.sqrt(fan_in) * self.config.initializer_range nn.init.uniform_(module.bias[i], -bound, bound)
class TrajectoryTransformerPreTrainedModel(PreTrainedModel): ''' An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. ''' def _init_weights(self, module): pass
2
1
15
0
15
0
7
0.19
1
2
1
1
1
0
1
130
27
2
21
10
19
4
19
10
17
7
2
3
7
1,974
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/transfo_xl/configuration_transfo_xl.py
transformers.models.deprecated.transfo_xl.configuration_transfo_xl.TransfoXLConfig
from ....configuration_utils import PretrainedConfig class TransfoXLConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`TransfoXLModel`] or a [`TFTransfoXLModel`]. It is used to instantiate a Transformer-XL model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the TransfoXL [transfo-xl/transfo-xl-wt103](https://huggingface.co/transfo-xl/transfo-xl-wt103) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 267735): Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`TransfoXLModel`] or [`TFTransfoXLModel`]. cutoffs (`list[int]`, *optional*, defaults to `[20000, 40000, 200000]`): Cutoffs for the adaptive softmax. d_model (`int`, *optional*, defaults to 1024): Dimensionality of the model's hidden states. d_embed (`int`, *optional*, defaults to 1024): Dimensionality of the embeddings n_head (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. d_head (`int`, *optional*, defaults to 64): Dimensionality of the model's heads. d_inner (`int`, *optional*, defaults to 4096): Inner dimension in FF div_val (`int`, *optional*, defaults to 4): Divident value for adaptive input and softmax pre_lnorm (`boolean`, *optional*, defaults to `False`): Whether or not to apply LayerNorm to the input instead of the output in the blocks. n_layer (`int`, *optional*, defaults to 18): Number of hidden layers in the Transformer encoder. mem_len (`int`, *optional*, defaults to 1600): Length of the retained previous heads. clamp_len (`int`, *optional*, defaults to 1000): Use the same pos embeddings after clamp_len. same_length (`boolean`, *optional*, defaults to `True`): Whether or not to use the same attn length for all tokens proj_share_all_but_first (`boolean`, *optional*, defaults to `True`): True to share all but first projs, False not to share. attn_type (`int`, *optional*, defaults to 0): Attention type. 0 for Transformer-XL, 1 for Shaw et al, 2 for Vaswani et al, 3 for Al Rfou et al. sample_softmax (`int`, *optional*, defaults to -1): Number of samples in the sampled softmax. adaptive (`boolean`, *optional*, defaults to `True`): Whether or not to use adaptive softmax. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. dropatt (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. untie_r (`boolean`, *optional*, defaults to `True`): Whether ot not to untie relative position biases. init (`str`, *optional*, defaults to `"normal"`): Parameter initializer to use. init_range (`float`, *optional*, defaults to 0.01): Parameters initialized by U(-init_range, init_range). proj_init_std (`float`, *optional*, defaults to 0.01): Parameters initialized by N(0, init_std) init_std (`float`, *optional*, defaults to 0.02): Parameters initialized by N(0, init_std) layer_norm_epsilon (`float`, *optional*, defaults to 1e-05): The epsilon to use in the layer normalization layers eos_token_id (`int`, *optional*, defaults to 0): End of stream token id. Examples: ```python >>> from transformers import TransfoXLConfig, TransfoXLModel >>> # Initializing a Transformer XL configuration >>> configuration = TransfoXLConfig() >>> # Initializing a model (with random weights) from the configuration >>> model = TransfoXLModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'transfo-xl' keys_to_ignore_at_inference = ['mems'] attribute_map = {'n_token': 'vocab_size', 'hidden_size': 'd_model', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer'} def __init__(self, vocab_size=267735, cutoffs=[20000, 40000, 200000], d_model=1024, d_embed=1024, n_head=16, d_head=64, d_inner=4096, div_val=4, pre_lnorm=False, n_layer=18, mem_len=1600, clamp_len=1000, same_length=True, proj_share_all_but_first=True, attn_type=0, sample_softmax=-1, adaptive=True, dropout=0.1, dropatt=0.0, untie_r=True, init='normal', init_range=0.01, proj_init_std=0.01, init_std=0.02, layer_norm_epsilon=1e-05, eos_token_id=0, **kwargs): self.vocab_size = vocab_size self.cutoffs = [] self.cutoffs.extend(cutoffs) if proj_share_all_but_first: self.tie_projs = [False] + [True] * len(self.cutoffs) else: self.tie_projs = [False] + [False] * len(self.cutoffs) self.d_model = d_model self.d_embed = d_embed self.d_head = d_head self.d_inner = d_inner self.div_val = div_val self.pre_lnorm = pre_lnorm self.n_layer = n_layer self.n_head = n_head self.mem_len = mem_len self.same_length = same_length self.attn_type = attn_type self.clamp_len = clamp_len self.sample_softmax = sample_softmax self.adaptive = adaptive self.dropout = dropout self.dropatt = dropatt self.untie_r = untie_r self.init = init self.init_range = init_range self.proj_init_std = proj_init_std self.init_std = init_std self.layer_norm_epsilon = layer_norm_epsilon super().__init__(eos_token_id=eos_token_id, **kwargs) @property def max_position_embeddings(self): logger.info(f'The model {self.model_type} is one of the few models that has no sequence length limit.') return -1 @max_position_embeddings.setter def max_position_embeddings(self, value): raise NotImplementedError(f'The model {self.model_type} is one of the few models that has no sequence length limit.')
class TransfoXLConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`TransfoXLModel`] or a [`TFTransfoXLModel`]. It is used to instantiate a Transformer-XL model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the TransfoXL [transfo-xl/transfo-xl-wt103](https://huggingface.co/transfo-xl/transfo-xl-wt103) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 267735): Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`TransfoXLModel`] or [`TFTransfoXLModel`]. cutoffs (`list[int]`, *optional*, defaults to `[20000, 40000, 200000]`): Cutoffs for the adaptive softmax. d_model (`int`, *optional*, defaults to 1024): Dimensionality of the model's hidden states. d_embed (`int`, *optional*, defaults to 1024): Dimensionality of the embeddings n_head (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. d_head (`int`, *optional*, defaults to 64): Dimensionality of the model's heads. d_inner (`int`, *optional*, defaults to 4096): Inner dimension in FF div_val (`int`, *optional*, defaults to 4): Divident value for adaptive input and softmax pre_lnorm (`boolean`, *optional*, defaults to `False`): Whether or not to apply LayerNorm to the input instead of the output in the blocks. n_layer (`int`, *optional*, defaults to 18): Number of hidden layers in the Transformer encoder. mem_len (`int`, *optional*, defaults to 1600): Length of the retained previous heads. clamp_len (`int`, *optional*, defaults to 1000): Use the same pos embeddings after clamp_len. same_length (`boolean`, *optional*, defaults to `True`): Whether or not to use the same attn length for all tokens proj_share_all_but_first (`boolean`, *optional*, defaults to `True`): True to share all but first projs, False not to share. attn_type (`int`, *optional*, defaults to 0): Attention type. 0 for Transformer-XL, 1 for Shaw et al, 2 for Vaswani et al, 3 for Al Rfou et al. sample_softmax (`int`, *optional*, defaults to -1): Number of samples in the sampled softmax. adaptive (`boolean`, *optional*, defaults to `True`): Whether or not to use adaptive softmax. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. dropatt (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. untie_r (`boolean`, *optional*, defaults to `True`): Whether ot not to untie relative position biases. init (`str`, *optional*, defaults to `"normal"`): Parameter initializer to use. init_range (`float`, *optional*, defaults to 0.01): Parameters initialized by U(-init_range, init_range). proj_init_std (`float`, *optional*, defaults to 0.01): Parameters initialized by N(0, init_std) init_std (`float`, *optional*, defaults to 0.02): Parameters initialized by N(0, init_std) layer_norm_epsilon (`float`, *optional*, defaults to 1e-05): The epsilon to use in the layer normalization layers eos_token_id (`int`, *optional*, defaults to 0): End of stream token id. Examples: ```python >>> from transformers import TransfoXLConfig, TransfoXLModel >>> # Initializing a Transformer XL configuration >>> configuration = TransfoXLConfig() >>> # Initializing a model (with random weights) from the configuration >>> model = TransfoXLModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, vocab_size=267735, cutoffs=[20000, 40000, 200000], d_model=1024, d_embed=1024, n_head=16, d_head=64, d_inner=4096, div_val=4, pre_lnorm=False, n_layer=18, mem_len=1600, clamp_len=1000, same_length=True, proj_share_all_but_first=True, attn_type=0, sample_softmax=-1, adaptive=True, dropout=0.1, dropatt=0.0, untie_r=True, init='normal', init_range=0.01, proj_init_std=0.01, init_std=0.02, layer_norm_epsilon=1e-05, eos_token_id=0, **kwargs): pass @property def max_position_embeddings(self): pass @max_position_embeddings.setter def max_position_embeddings(self): pass
6
1
23
0
22
1
1
0.94
1
2
0
0
3
25
3
35
162
11
78
63
43
73
39
32
35
2
2
1
4
1,975
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/transfo_xl/modeling_transfo_xl.py
transformers.models.deprecated.transfo_xl.modeling_transfo_xl.AdaptiveEmbedding
from torch import nn import torch class AdaptiveEmbedding(nn.Module): def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1, sample_softmax=False): super().__init__() self.n_token = n_token self.d_embed = d_embed self.cutoffs = cutoffs + [n_token] self.div_val = div_val self.d_proj = d_proj self.emb_scale = d_proj ** 0.5 self.cutoff_ends = [0] + self.cutoffs self.emb_layers = nn.ModuleList() self.emb_projs = nn.ParameterList() if div_val == 1: self.emb_layers.append(nn.Embedding(n_token, d_embed, sparse=sample_softmax > 0)) if d_proj != d_embed: self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed))) else: for i in range(len(self.cutoffs)): l_idx, r_idx = (self.cutoff_ends[i], self.cutoff_ends[i + 1]) d_emb_i = d_embed // div_val ** i self.emb_layers.append(nn.Embedding(r_idx - l_idx, d_emb_i)) self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i))) def forward(self, inp): if self.div_val == 1: embed = self.emb_layers[0](inp) if self.d_proj != self.d_embed: embed = nn.functional.linear(embed, self.emb_projs[0]) else: param = next(self.parameters()) inp_flat = inp.view(-1) emb_flat = torch.zeros([inp_flat.size(0), self.d_proj], dtype=param.dtype, device=param.device) for i in range(len(self.cutoffs)): l_idx, r_idx = (self.cutoff_ends[i], self.cutoff_ends[i + 1]) mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx) indices_i = mask_i.nonzero().squeeze() if indices_i.numel() == 0: continue inp_i = inp_flat.index_select(0, indices_i) - l_idx emb_i = self.emb_layers[i](inp_i) emb_i = nn.functional.linear(emb_i, self.emb_projs[i]) emb_flat.index_copy_(0, indices_i, emb_i) embed_shape = inp.size() + (self.d_proj,) embed = emb_flat.view(embed_shape) embed.mul_(self.emb_scale) return embed
class AdaptiveEmbedding(nn.Module): def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1, sample_softmax=False): pass def forward(self, inp): pass
3
0
28
6
22
0
5
0
1
2
0
0
2
9
2
12
58
13
45
26
42
0
43
26
40
5
1
3
9
1,976
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/transfo_xl/modeling_transfo_xl.py
transformers.models.deprecated.transfo_xl.modeling_transfo_xl.PositionalEmbedding
import torch from torch import nn class PositionalEmbedding(nn.Module): def __init__(self, demb): super().__init__() self.demb = demb inv_freq = 1 / 10000 ** (torch.arange(0.0, demb, 2.0) / demb) self.register_buffer('inv_freq', inv_freq) def forward(self, pos_seq, bsz=None): sinusoid_inp = torch.outer(pos_seq, self.inv_freq) pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1) if bsz is not None: return pos_emb[:, None, :].expand(-1, bsz, -1) else: return pos_emb[:, None, :]
class PositionalEmbedding(nn.Module): def __init__(self, demb): pass def forward(self, pos_seq, bsz=None): pass
3
0
8
2
6
0
2
0
1
1
0
0
2
1
2
12
17
4
13
7
10
0
12
7
9
2
1
1
3
1,977
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/transfo_xl/modeling_transfo_xl.py
transformers.models.deprecated.transfo_xl.modeling_transfo_xl.PositionwiseFF
from torch import nn class PositionwiseFF(nn.Module): def __init__(self, d_model, d_inner, dropout, pre_lnorm=False, layer_norm_epsilon=1e-05): super().__init__() self.d_model = d_model self.d_inner = d_inner self.dropout = dropout self.CoreNet = nn.Sequential(nn.Linear(d_model, d_inner), nn.ReLU(inplace=True), nn.Dropout(dropout), nn.Linear(d_inner, d_model), nn.Dropout(dropout)) self.layer_norm = nn.LayerNorm(d_model, eps=layer_norm_epsilon) self.pre_lnorm = pre_lnorm def forward(self, inp): if self.pre_lnorm: core_out = self.CoreNet(self.layer_norm(inp)) output = core_out + inp else: core_out = self.CoreNet(inp) output = self.layer_norm(inp + core_out) return output
class PositionwiseFF(nn.Module): def __init__(self, d_model, d_inner, dropout, pre_lnorm=False, layer_norm_epsilon=1e-05): pass def forward(self, inp): pass
3
0
17
4
11
2
2
0.17
1
1
0
0
2
6
2
12
35
8
23
11
20
4
16
11
13
2
1
1
3
1,978
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/transfo_xl/modeling_transfo_xl.py
transformers.models.deprecated.transfo_xl.modeling_transfo_xl.RelPartialLearnableDecoderLayer
from torch import nn class RelPartialLearnableDecoderLayer(nn.Module): def __init__(self, n_head, d_model, d_head, d_inner, dropout, layer_norm_epsilon=1e-05, **kwargs): super().__init__() self.dec_attn = RelPartialLearnableMultiHeadAttn(n_head, d_model, d_head, dropout, layer_norm_epsilon=layer_norm_epsilon, **kwargs) self.pos_ff = PositionwiseFF(d_model, d_inner, dropout, pre_lnorm=kwargs.get('pre_lnorm'), layer_norm_epsilon=layer_norm_epsilon) def forward(self, dec_inp, r, dec_attn_mask=None, mems=None, head_mask=None, output_attentions=False): attn_outputs = self.dec_attn(dec_inp, r, attn_mask=dec_attn_mask, mems=mems, head_mask=head_mask, output_attentions=output_attentions) ff_output = self.pos_ff(attn_outputs[0]) outputs = [ff_output] + attn_outputs[1:] return outputs
class RelPartialLearnableDecoderLayer(nn.Module): def __init__(self, n_head, d_model, d_head, d_inner, dropout, layer_norm_epsilon=1e-05, **kwargs): pass def forward(self, dec_inp, r, dec_attn_mask=None, mems=None, head_mask=None, output_attentions=False): pass
3
0
12
2
10
0
1
0
1
3
2
0
2
2
2
12
25
4
21
8
18
0
10
8
7
1
1
0
2
1,979
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/transfo_xl/modeling_transfo_xl.py
transformers.models.deprecated.transfo_xl.modeling_transfo_xl.RelPartialLearnableMultiHeadAttn
import torch from torch import nn class RelPartialLearnableMultiHeadAttn(nn.Module): def __init__(self, n_head, d_model, d_head, dropout, dropatt=0, pre_lnorm=False, r_r_bias=None, r_w_bias=None, layer_norm_epsilon=1e-05): super().__init__() self.n_head = n_head self.d_model = d_model self.d_head = d_head self.dropout = dropout self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head, bias=False) self.drop = nn.Dropout(dropout) self.dropatt = nn.Dropout(dropatt) self.o_net = nn.Linear(n_head * d_head, d_model, bias=False) self.layer_norm = nn.LayerNorm(d_model, eps=layer_norm_epsilon) self.scale = 1 / d_head ** 0.5 self.pre_lnorm = pre_lnorm if r_r_bias is None or r_w_bias is None: self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head)) self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head)) else: self.r_r_bias = r_r_bias self.r_w_bias = r_w_bias self.r_net = nn.Linear(self.d_model, self.n_head * self.d_head, bias=False) def _rel_shift(self, x): zero_pad_shape = (x.size(0), 1) + x.size()[2:] zero_pad = torch.zeros(zero_pad_shape, device=x.device, dtype=x.dtype) x_padded = torch.cat([zero_pad, x], dim=1) x_padded_shape = (x.size(1) + 1, x.size(0)) + x.size()[2:] x_padded = x_padded.view(*x_padded_shape) x = x_padded[1:].view_as(x) return x def forward(self, w, r, attn_mask=None, mems=None, head_mask=None, output_attentions=False): qlen, rlen, bsz = (w.size(0), r.size(0), w.size(1)) if mems is not None: cat = torch.cat([mems, w], 0) if self.pre_lnorm: w_heads = self.qkv_net(self.layer_norm(cat)) else: w_heads = self.qkv_net(cat) r_head_k = self.r_net(r) w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1) w_head_q = w_head_q[-qlen:] else: if self.pre_lnorm: w_heads = self.qkv_net(self.layer_norm(w)) else: w_heads = self.qkv_net(w) r_head_k = self.r_net(r) w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1) klen = w_head_k.size(0) w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head) w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head) w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head) r_head_k = r_head_k.view(rlen, self.n_head, self.d_head) rw_head_q = w_head_q + self.r_w_bias AC = torch.einsum('ibnd,jbnd->ijbn', (rw_head_q, w_head_k)) rr_head_q = w_head_q + self.r_r_bias BD = torch.einsum('ibnd,jnd->ijbn', (rr_head_q, r_head_k)) BD = self._rel_shift(BD) attn_score = AC + BD attn_score.mul_(self.scale) mask_value = torch.finfo(attn_score.dtype).min if attn_mask is not None and torch.sum(attn_mask).item(): attn_mask = attn_mask == 1 if attn_mask.dim() == 2: attn_score = attn_score.float().masked_fill(attn_mask[None, :, :, None], mask_value).type_as(attn_score) elif attn_mask.dim() == 3: attn_score = attn_score.float().masked_fill(attn_mask[:, :, :, None], mask_value).type_as(attn_score) attn_prob = nn.functional.softmax(attn_score, dim=1) attn_prob = self.dropatt(attn_prob) if head_mask is not None: attn_prob = attn_prob * head_mask attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, w_head_v)) attn_vec = attn_vec.contiguous().view(attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head) attn_out = self.o_net(attn_vec) attn_out = self.drop(attn_out) if self.pre_lnorm: outputs = [w + attn_out] else: outputs = [self.layer_norm(w + attn_out)] if output_attentions: outputs.append(attn_prob) return outputs
class RelPartialLearnableMultiHeadAttn(nn.Module): def __init__(self, n_head, d_model, d_head, dropout, dropatt=0, pre_lnorm=False, r_r_bias=None, r_w_bias=None, layer_norm_epsilon=1e-05): pass def _rel_shift(self, x): pass def forward(self, w, r, attn_mask=None, mems=None, head_mask=None, output_attentions=False): pass
4
0
44
10
31
6
4
0.2
1
1
0
0
3
14
3
13
136
32
94
49
79
19
75
38
71
10
1
2
13
1,980
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/transfo_xl/modeling_transfo_xl.py
transformers.models.deprecated.transfo_xl.modeling_transfo_xl.TransfoXLForSequenceClassification
from ....utils import ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss import torch from torch import nn from typing import Optional, Union @add_start_docstrings('\n The Transformer-XL Model transformer with a sequence classification head on top (linear layer).\n\n [`TransfoXLForSequenceClassification`] uses the last token in order to do the classification, as other causal\n models (e.g. GPT-1) do.\n\n Since it does classification on the last token, it requires to know the position of the last token. If a\n `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If\n no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the\n padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in\n each row of the batch).\n ', TRANSFO_XL_START_DOCSTRING) class TransfoXLForSequenceClassification(TransfoXLPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = TransfoXLModel(config) self.score = nn.Linear(config.d_embed, self.num_labels, bias=False) self.post_init() @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TransfoXLSequenceClassifierOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, mems: Optional[list[torch.FloatTensor]]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, TransfoXLSequenceClassifierOutputWithPast]: """ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer(input_ids, mems=mems, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if input_ids is not None: batch_size, sequence_length = input_ids.shape[:2] else: batch_size, sequence_length = inputs_embeds.shape[:2] assert self.config.pad_token_id is not None or batch_size == 1, 'Cannot handle batch sizes > 1 if no padding token is defined.' if self.config.pad_token_id is None: sequence_lengths = -1 elif input_ids is not None: sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 sequence_lengths = sequence_lengths % input_ids.shape[-1] sequence_lengths = sequence_lengths.to(logits.device) else: sequence_lengths = -1 logger.warning_once(f'{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be unexpected if using padding tokens in conjunction with `inputs_embeds.`') pooled_logits = logits[range(batch_size), sequence_lengths] loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return (loss,) + output if loss is not None else output return TransfoXLSequenceClassifierOutputWithPast(loss=loss, logits=pooled_logits, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions)
null
6
1
48
4
41
4
8
0.09
1
6
2
0
2
3
2
139
104
8
88
26
69
8
45
15
42
15
3
3
16
1,981
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/transfo_xl/modeling_transfo_xl.py
transformers.models.deprecated.transfo_xl.modeling_transfo_xl.TransfoXLLMHeadModel
from torch import nn from .modeling_transfo_xl_utilities import ProjectedAdaptiveLogSoftmax import warnings from typing import Optional, Union from ....utils import ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging import torch @add_start_docstrings('\n The Transformer-XL Model with a language modeling head on top (adaptive softmax with weights tied to the adaptive\n input embeddings)\n ', TRANSFO_XL_START_DOCSTRING) class TransfoXLLMHeadModel(TransfoXLPreTrainedModel): _tied_weights_keys = ['crit\\.out_projs\\.\\d+', 'crit\\.out_layers\\.\\d+\\.weight'] def __init__(self, config): super().__init__(config) self.transformer = TransfoXLModel(config) self.sample_softmax = config.sample_softmax self.trainer_compatible = getattr(config, 'trainer_compatible', False) if not self.trainer_compatible: warnings.warn('The output of TransfoXL will be updated in v5 to support a single loss as first argument. In order to use that updated output, please specify `trainer_compatible=True` as your configuration attribute.', DeprecationWarning) assert self.sample_softmax <= 0, 'Sampling from the softmax is not implemented yet. Please look at issue: #3310: https://github.com/huggingface/transformers/issues/3310' self.crit = ProjectedAdaptiveLogSoftmax(config.vocab_size, config.d_embed, config.d_model, config.cutoffs, div_val=config.div_val) self.post_init() def tie_weights(self): """ Run this to be sure output and input (adaptive) softmax weights are tied """ if self.config.tie_word_embeddings: for i in range(len(self.crit.out_layers)): self._tie_or_clone_weights(self.crit.out_layers[i], self.transformer.word_emb.emb_layers[i]) if self.config.tie_projs: for i, tie_proj in enumerate(self.config.tie_projs): if tie_proj and self.config.div_val == 1 and (self.config.d_model != self.config.d_embed): if self.config.torchscript: self.crit.out_projs[i] = nn.Parameter(self.transformer.word_emb.emb_projs[0].clone()) else: self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[0] elif tie_proj and self.config.div_val != 1: if self.config.torchscript: self.crit.out_projs[i] = nn.Parameter(self.transformer.word_emb.emb_projs[i].clone()) else: self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[i] def reset_memory_length(self, mem_len): self.transformer.reset_memory_length(mem_len) def init_mems(self, bsz): return self.transformer.init_mems(bsz) @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TransfoXLLMHeadModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, mems: Optional[list[torch.FloatTensor]]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, TransfoXLLMHeadModelOutput]: """ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None: bsz, tgt_len = (input_ids.size(0), input_ids.size(1)) elif inputs_embeds is not None: bsz, tgt_len = (inputs_embeds.size(0), inputs_embeds.size(1)) else: raise ValueError('You have to specify either input_ids or inputs_embeds') transformer_outputs = self.transformer(input_ids, mems=mems, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) last_hidden = transformer_outputs[0] pred_hid = last_hidden[:, -tgt_len:] if labels is not None: miss_valid_label = labels[0, 1:].sum() == (labels.size(1) - 1) * -100 if miss_valid_label: labels[0, 1] = self.config.eos_token_id softmax_output = self.crit(pred_hid, labels) prediction_scores = softmax_output.view(bsz, tgt_len, -1) if labels is None else () if labels is not None: losses = softmax_output.view(bsz, tgt_len - 1) loss = losses[losses != 0].mean() else: losses, loss = (None, None) if not return_dict: if self.trainer_compatible: output = (prediction_scores, losses) if losses is not None else (prediction_scores,) output += transformer_outputs[1:] return (loss,) + output if loss is not None else output else: output = (prediction_scores, *transformer_outputs[1:]) output = (losses,) + output if losses is not None else output return output + (loss,) if loss is not None else output return TransfoXLLMHeadModelOutput(loss=loss, prediction_scores=prediction_scores, losses=losses, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions) def get_output_embeddings(self): """Double-check if you are using adaptive softmax.""" if self.sample_softmax > 0: return self.out_layer else: return self.crit.out_layers[-1] def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **model_kwargs): inputs = {} if past_key_values: inputs['mems'] = past_key_values inputs['input_ids'] = input_ids[:, -1].unsqueeze(-1) else: inputs['input_ids'] = input_ids return inputs def _resize_cutoffs(self, new_num_tokens, new_emb_size, new_embedding_shapes, layer): new_cutoffs = super()._resize_cutoffs(new_num_tokens, new_emb_size, new_embedding_shapes, layer) self.crit.cutoffs = new_cutoffs self.crit.cutoff_ends = [0] + new_cutoffs self.crit.n_token = new_num_tokens @staticmethod def _reorder_cache(mems: list[torch.Tensor], beam_idx: torch.Tensor) -> list[torch.Tensor]: """ This function is used to re-order the `mems` cache if [`~PreTrainedModel.beam_search`] or [`~PreTrainedModel.beam_sample`] is called. This is required to match `mems` with the correct beam_idx at every generation step. """ return [layer_past.index_select(1, beam_idx.to(layer_past.device)) for layer_past in mems]
@add_start_docstrings('\n The Transformer-XL Model with a language modeling head on top (adaptive softmax with weights tied to the adaptive\n input embeddings)\n ', TRANSFO_XL_START_DOCSTRING) class TransfoXLLMHeadModel(TransfoXLPreTrainedModel): def __init__(self, config): pass def tie_weights(self): ''' Run this to be sure output and input (adaptive) softmax weights are tied ''' pass def reset_memory_length(self, mem_len): pass def init_mems(self, bsz): pass @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TransfoXLLMHeadModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, mems: Optional[list[torch.FloatTensor]]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, TransfoXLLMHeadModelOutput]: ''' labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` ''' pass def get_output_embeddings(self): '''Double-check if you are using adaptive softmax.''' pass def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **model_kwargs): pass def _resize_cutoffs(self, new_num_tokens, new_emb_size, new_embedding_shapes, layer): pass @staticmethod def _reorder_cache(mems: list[torch.Tensor], beam_idx: torch.Tensor) -> list[torch.Tensor]: ''' This function is used to re-order the `mems` cache if [`~PreTrainedModel.beam_search`] or [`~PreTrainedModel.beam_sample`] is called. This is required to match `mems` with the correct beam_idx at every generation step. ''' pass
14
4
17
2
13
2
4
0.17
1
10
3
0
8
4
9
146
171
24
126
41
99
22
75
29
65
14
3
4
33
1,982
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/transfo_xl/modeling_transfo_xl.py
transformers.models.deprecated.transfo_xl.modeling_transfo_xl.TransfoXLLMHeadModelOutput
import torch from dataclasses import dataclass from ....utils import ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from typing import Optional, Union @dataclass class TransfoXLLMHeadModelOutput(ModelOutput): """ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding). Args: losses (`torch.FloatTensor` of shape *(batch_size, sequence_length-1)*, *optional*, returned when `labels` is provided): Language modeling losses (not reduced). prediction_scores (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token after SoftMax). mems (`list[torch.FloatTensor]` of length `config.n_layers`): Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as input ids as they have already been computed. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. loss (`torch.FloatTensor` of shape `()`, *optional*, returned when `labels` is provided) Reduced language modeling loss. """ losses: Optional[torch.FloatTensor] = None prediction_scores: Optional[torch.FloatTensor] = None mems: Optional[list[torch.FloatTensor]] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None loss: Optional[torch.FloatTensor] = None @property def logits(self): return self.prediction_scores
null
4
1
7
0
2
5
1
2.8
1
0
0
0
1
0
1
1
43
5
10
9
7
28
9
8
7
1
1
0
1
1,983
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/transfo_xl/modeling_transfo_xl.py
transformers.models.deprecated.transfo_xl.modeling_transfo_xl.TransfoXLModel
from typing import Optional, Union from ....utils import ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from torch import nn import torch @add_start_docstrings('The bare Bert Model transformer outputting raw hidden-states without any specific head on top.', TRANSFO_XL_START_DOCSTRING) class TransfoXLModel(TransfoXLPreTrainedModel): def __init__(self, config): super().__init__(config) self.n_token = config.vocab_size self.d_embed = config.d_embed self.d_model = config.d_model self.n_head = config.n_head self.d_head = config.d_head self.word_emb = AdaptiveEmbedding(config.vocab_size, config.d_embed, config.d_model, config.cutoffs, div_val=config.div_val) self.drop = nn.Dropout(config.dropout) self.n_layer = config.n_layer self.mem_len = config.mem_len self.attn_type = config.attn_type if not config.untie_r: self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head)) self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head)) self.layers = nn.ModuleList() if config.attn_type == 0: for i in range(config.n_layer): self.layers.append(RelPartialLearnableDecoderLayer(config.n_head, config.d_model, config.d_head, config.d_inner, config.dropout, dropatt=config.dropatt, pre_lnorm=config.pre_lnorm, r_w_bias=None if config.untie_r else self.r_w_bias, r_r_bias=None if config.untie_r else self.r_r_bias, layer_norm_epsilon=config.layer_norm_epsilon)) else: raise NotImplementedError self.same_length = config.same_length self.clamp_len = config.clamp_len if self.attn_type == 0: self.pos_emb = PositionalEmbedding(self.d_model) else: raise NotImplementedError self.post_init() def get_input_embeddings(self): return self.word_emb def set_input_embeddings(self, new_embeddings): self.word_emb = new_embeddings def backward_compatible(self): self.sample_softmax = -1 def reset_memory_length(self, mem_len): self.mem_len = mem_len def _prune_heads(self, heads): logger.info('Head pruning is not implemented for Transformer-XL model') pass def init_mems(self, bsz): if self.mem_len > 0: mems = [] param = next(self.parameters()) for i in range(self.n_layer): empty = torch.zeros(self.mem_len, bsz, self.config.d_model, dtype=param.dtype, device=param.device) mems.append(empty) return mems else: return None def _update_mems(self, hids, mems, mlen, qlen): if mems is None: return None assert len(hids) == len(mems), 'len(hids) != len(mems)' with torch.no_grad(): new_mems = [] end_idx = mlen + max(0, qlen) beg_idx = max(0, end_idx - self.mem_len) for i in range(len(hids)): cat = torch.cat([mems[i], hids[i]], dim=0) new_mems.append(cat[beg_idx:end_idx].detach()) return new_mems @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TransfoXLModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, mems: Optional[list[torch.FloatTensor]]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, TransfoXLModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: input_ids = input_ids.transpose(0, 1).contiguous() qlen, bsz = input_ids.size() elif inputs_embeds is not None: inputs_embeds = inputs_embeds.transpose(0, 1).contiguous() qlen, bsz = (inputs_embeds.shape[0], inputs_embeds.shape[1]) else: raise ValueError('You have to specify either input_ids or inputs_embeds') if mems is None: mems = self.init_mems(bsz) if head_mask is not None: if head_mask.dim() == 1: head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(0).unsqueeze(0) head_mask = head_mask.expand(self.n_layer, -1, -1, -1, -1) elif head_mask.dim() == 2: head_mask = head_mask.unsqueeze(1).unsqueeze(1).unsqueeze(1) head_mask = head_mask.to(dtype=next(self.parameters()).dtype) else: head_mask = [None] * self.n_layer if inputs_embeds is not None: word_emb = inputs_embeds else: word_emb = self.word_emb(input_ids) mlen = mems[0].size(0) if mems is not None else 0 klen = mlen + qlen if self.same_length: all_ones = word_emb.new_ones((qlen, klen), dtype=torch.bool) mask_len = klen - self.mem_len if mask_len > 0: mask_shift_len = qlen - mask_len else: mask_shift_len = qlen dec_attn_mask = (torch.triu(all_ones, 1 + mlen) + torch.tril(all_ones, -mask_shift_len))[:, :, None] else: dec_attn_mask = torch.triu(word_emb.new_ones((qlen, klen), dtype=torch.bool), diagonal=1 + mlen)[:, :, None] hids = [] attentions = [] if output_attentions else None if self.attn_type == 0: pos_seq = torch.arange(klen - 1, -1, -1.0, device=word_emb.device, dtype=torch.int64).type_as(dtype=word_emb.dtype) if self.clamp_len > 0: pos_seq.clamp_(max=self.clamp_len) pos_emb = self.pos_emb(pos_seq) core_out = self.drop(word_emb) pos_emb = self.drop(pos_emb) for i, layer in enumerate(self.layers): hids.append(core_out) mems_i = None if mems is None else mems[i] layer_outputs = layer(core_out, pos_emb, dec_attn_mask=dec_attn_mask, mems=mems_i, head_mask=head_mask[i], output_attentions=output_attentions) core_out = layer_outputs[0] if output_attentions: attentions.append(layer_outputs[1]) else: raise NotImplementedError core_out = self.drop(core_out) new_mems = self._update_mems(hids, mems, mlen, qlen) if output_hidden_states: hids.append(core_out) hids = tuple((t.transpose(0, 1).contiguous() for t in hids)) else: hids = None if output_attentions: attentions = tuple((t.permute(2, 3, 0, 1).contiguous() for t in attentions)) core_out = core_out.transpose(0, 1).contiguous() if not return_dict: return tuple((v for v in [core_out, new_mems, hids, attentions] if v is not None)) return TransfoXLModelOutput(last_hidden_state=core_out, mems=new_mems, hidden_states=hids, attentions=attentions)
@add_start_docstrings('The bare Bert Model transformer outputting raw hidden-states without any specific head on top.', TRANSFO_XL_START_DOCSTRING) class TransfoXLModel(TransfoXLPreTrainedModel): def __init__(self, config): pass def get_input_embeddings(self): pass def set_input_embeddings(self, new_embeddings): pass def backward_compatible(self): pass def reset_memory_length(self, mem_len): pass def _prune_heads(self, heads): pass def init_mems(self, bsz): pass def _update_mems(self, hids, mems, mlen, qlen): pass @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TransfoXLModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, mems: Optional[list[torch.FloatTensor]]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, TransfoXLModelOutput]: pass
13
0
24
3
20
3
5
0.14
1
11
4
0
9
17
9
146
233
35
184
64
159
25
121
54
111
24
3
3
42
1,984
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/transfo_xl/modeling_transfo_xl.py
transformers.models.deprecated.transfo_xl.modeling_transfo_xl.TransfoXLModelOutput
from ....utils import ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging import torch from typing import Optional, Union from dataclasses import dataclass @dataclass class TransfoXLModelOutput(ModelOutput): """ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding). Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. mems (`list[torch.FloatTensor]` of length `config.n_layers`): Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as input ids as they have already been computed. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: torch.FloatTensor mems: Optional[list[torch.FloatTensor]] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None
null
2
1
0
0
0
0
0
3.8
1
0
0
0
0
0
0
0
28
4
5
4
4
19
5
4
4
0
1
0
0
1,985
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/transfo_xl/modeling_transfo_xl.py
transformers.models.deprecated.transfo_xl.modeling_transfo_xl.TransfoXLPreTrainedModel
from ....modeling_utils import PreTrainedModel from typing import Optional, Union from torch import nn from .configuration_transfo_xl import TransfoXLConfig class TransfoXLPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config: TransfoXLConfig base_model_prefix = 'transformer' def _init_weight(self, weight): if self.config.init == 'uniform': nn.init.uniform_(weight, -self.config.init_range, self.config.init_range) elif self.config.init == 'normal': nn.init.normal_(weight, 0.0, self.config.init_std) def _init_bias(self, bias): nn.init.constant_(bias, 0.0) def _init_weights(self, m): """Initialize the weights.""" classname = m.__class__.__name__ if classname.find('Linear') != -1: if hasattr(m, 'weight') and m.weight is not None: self._init_weight(m.weight) if hasattr(m, 'bias') and m.bias is not None: self._init_bias(m.bias) elif classname.find('AdaptiveEmbedding') != -1: if hasattr(m, 'emb_projs'): for i in range(len(m.emb_projs)): if m.emb_projs[i] is not None: nn.init.normal_(m.emb_projs[i], 0.0, self.config.proj_init_std) elif classname.find('Embedding') != -1: if hasattr(m, 'weight'): self._init_weight(m.weight) elif classname.find('ProjectedAdaptiveLogSoftmax') != -1: if hasattr(m, 'cluster_weight') and m.cluster_weight is not None: self._init_weight(m.cluster_weight) if hasattr(m, 'cluster_bias') and m.cluster_bias is not None: self._init_bias(m.cluster_bias) if hasattr(m, 'out_projs'): for i in range(len(m.out_projs)): if m.out_projs[i] is not None: nn.init.normal_(m.out_projs[i], 0.0, self.config.proj_init_std) elif classname.find('LayerNorm') != -1: if hasattr(m, 'weight'): nn.init.normal_(m.weight, 1.0, self.config.init_std) if hasattr(m, 'bias') and m.bias is not None: self._init_bias(m.bias) else: if hasattr(m, 'r_emb'): self._init_weight(m.r_emb) if hasattr(m, 'r_w_bias'): self._init_weight(m.r_w_bias) if hasattr(m, 'r_r_bias'): self._init_weight(m.r_r_bias) if hasattr(m, 'r_bias'): self._init_bias(m.r_bias) def resize_token_embeddings(self, new_num_tokens: Optional[int]=None, layer: Optional[int]=-1): """ Resize input token embeddings matrix of the model if new_num_tokens != config.vocab_size. Take care of tying weights embeddings afterwards if the model class has a *tie_weights()* method. Arguments: new_num_tokens: (*optional*) int: New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or None: does nothing and just returns a pointer to the input tokens `torch.nn.Embeddings` Module of the model. layer: (*optional*) int: Layer of the *AdaptiveEmbedding* where the resizing should be done. Per default the last layer will be resized. Be aware that when resizing other than the last layer, you have to ensure that the new token(s) in the tokenizer are at the corresponding position. Return: `torch.nn.Embeddings` Pointer to the input tokens Embeddings Module of the model """ base_model = getattr(self, self.base_model_prefix, self) if new_num_tokens is None: return self.get_input_embeddings() new_num_tokens_layer, layer = self._get_new_num_tokens_layer(new_num_tokens, layer) assert new_num_tokens_layer > 0, 'The size of the new embedding layer cannot be 0 or less' model_embeds = base_model._resize_token_embeddings(new_num_tokens_layer, layer) self.config.vocab_size = new_num_tokens base_model.vocab_size = new_num_tokens base_model.n_token = new_num_tokens new_embedding_shapes = self._get_embedding_shapes() self._resize_cutoffs(new_num_tokens, new_num_tokens_layer, new_embedding_shapes, layer) self.tie_weights() return model_embeds def _get_new_num_tokens_layer(self, new_num_tokens, layer): embeddings = self.get_input_embeddings() if layer == -1: layer = len(embeddings.emb_layers) - 1 assert 0 <= layer <= len(embeddings.emb_layers) - 1 new_num_tokens_layer = new_num_tokens - sum([emb.weight.shape[0] for emb in embeddings.emb_layers[:layer]]) - sum([emb.weight.shape[0] for emb in embeddings.emb_layers[layer + 1:]]) return (new_num_tokens_layer, layer) def _get_embedding_shapes(self): embeddings = self.get_input_embeddings() return [emb.weight.shape[0] for emb in embeddings.emb_layers] def _resize_token_embeddings(self, new_num_tokens, layer=-1): embeddings = self.get_input_embeddings() if new_num_tokens is None: return embeddings new_embeddings_layer = self._get_resized_embeddings(embeddings.emb_layers[layer], new_num_tokens) embeddings.emb_layers[layer] = new_embeddings_layer self.set_input_embeddings(embeddings) return self.get_input_embeddings() def _resize_cutoffs(self, new_num_tokens, new_emb_size, new_embedding_shapes, layer): embeddings = self.get_input_embeddings() for i in range(layer, len(embeddings.cutoffs)): embeddings.cutoffs[i] = sum(new_embedding_shapes[:i + 1]) embeddings.cutoff_ends = [0] + embeddings.cutoffs embeddings.n_token = new_num_tokens self.config.cutoffs = embeddings.cutoffs[:-1] return embeddings.cutoffs
class TransfoXLPreTrainedModel(PreTrainedModel): ''' An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. ''' def _init_weight(self, weight): pass def _init_bias(self, bias): pass def _init_weights(self, m): '''Initialize the weights.''' pass def resize_token_embeddings(self, new_num_tokens: Optional[int]=None, layer: Optional[int]=-1): ''' Resize input token embeddings matrix of the model if new_num_tokens != config.vocab_size. Take care of tying weights embeddings afterwards if the model class has a *tie_weights()* method. Arguments: new_num_tokens: (*optional*) int: New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or None: does nothing and just returns a pointer to the input tokens `torch.nn.Embeddings` Module of the model. layer: (*optional*) int: Layer of the *AdaptiveEmbedding* where the resizing should be done. Per default the last layer will be resized. Be aware that when resizing other than the last layer, you have to ensure that the new token(s) in the tokenizer are at the corresponding position. Return: `torch.nn.Embeddings` Pointer to the input tokens Embeddings Module of the model ''' pass def _get_new_num_tokens_layer(self, new_num_tokens, layer): pass def _get_embedding_shapes(self): pass def _resize_token_embeddings(self, new_num_tokens, layer=-1): pass def _resize_cutoffs(self, new_num_tokens, new_emb_size, new_embedding_shapes, layer): pass
9
3
15
2
11
2
5
0.24
1
2
0
3
8
0
8
137
138
24
93
25
84
22
83
25
74
23
2
4
36
1,986
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/transfo_xl/modeling_transfo_xl.py
transformers.models.deprecated.transfo_xl.modeling_transfo_xl.TransfoXLSequenceClassifierOutputWithPast
from dataclasses import dataclass from ....utils import ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging import torch from typing import Optional, Union @dataclass class TransfoXLSequenceClassifierOutputWithPast(ModelOutput): """ Base class for outputs of sentence classification models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). mems (`list[torch.FloatTensor]` of length `config.n_layers`): Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as input ids as they have already been computed. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None mems: Optional[list[torch.FloatTensor]] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None
null
2
1
0
0
0
0
0
3.5
1
0
0
0
0
0
0
0
31
4
6
6
5
21
6
6
5
0
1
0
0
1,987
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/transfo_xl/modeling_transfo_xl_utilities.py
transformers.models.deprecated.transfo_xl.modeling_transfo_xl_utilities.ProjectedAdaptiveLogSoftmax
from torch import nn import torch class ProjectedAdaptiveLogSoftmax(nn.Module): def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1, keep_order=False): super().__init__() self.n_token = n_token self.d_embed = d_embed self.d_proj = d_proj self.cutoffs = cutoffs + [n_token] self.cutoff_ends = [0] + self.cutoffs self.div_val = div_val self.shortlist_size = self.cutoffs[0] self.n_clusters = len(self.cutoffs) - 1 self.head_size = self.shortlist_size + self.n_clusters if self.n_clusters > 0: self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed)) self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters)) self.out_layers = nn.ModuleList() self.out_projs = nn.ParameterList() if div_val == 1: for i in range(len(self.cutoffs)): if d_proj != d_embed: self.out_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed))) else: self.out_projs.append(None) self.out_layers.append(nn.Linear(d_embed, n_token)) else: for i in range(len(self.cutoffs)): l_idx, r_idx = (self.cutoff_ends[i], self.cutoff_ends[i + 1]) d_emb_i = d_embed // div_val ** i self.out_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i))) self.out_layers.append(nn.Linear(d_emb_i, r_idx - l_idx)) self.keep_order = keep_order def _compute_logit(self, hidden, weight, bias, proj): if proj is None: logit = nn.functional.linear(hidden, weight, bias=bias) else: proj_hid = nn.functional.linear(hidden, proj.t().contiguous()) logit = nn.functional.linear(proj_hid, weight, bias=bias) return logit def forward(self, hidden, labels=None, keep_order=False): """ Params: hidden :: [len*bsz x d_proj] labels :: [len*bsz] Return: if labels is None: out :: [len*bsz x n_tokens] log probabilities of tokens over the vocabulary else: out :: [(len-1)*bsz] Negative log likelihood. We could replace this implementation by the native PyTorch one if theirs had an option to set bias on all clusters in the native one. here: https://github.com/pytorch/pytorch/blob/dbe6a7a9ff1a364a8706bf5df58a1ca96d2fd9da/torch/nn/modules/adaptive.py#L138 """ if labels is not None: hidden = hidden[..., :-1, :].contiguous() labels = labels[..., 1:].contiguous() hidden = hidden.view(-1, hidden.size(-1)) labels = labels.view(-1) if hidden.size(0) != labels.size(0): raise RuntimeError('Input and labels should have the same size in the batch dimension.') else: hidden = hidden.view(-1, hidden.size(-1)) if self.n_clusters == 0: logit = self._compute_logit(hidden, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0]) if labels is not None: mask = labels != -100 out = torch.zeros_like(labels, dtype=hidden.dtype, device=hidden.device) out[mask] = -nn.functional.log_softmax(logit, dim=-1)[mask].gather(1, labels[mask].unsqueeze(1)).squeeze(1) else: out = nn.functional.log_softmax(logit, dim=-1) else: weights, biases = ([], []) for i in range(len(self.cutoffs)): if self.div_val == 1: l_idx, r_idx = (self.cutoff_ends[i], self.cutoff_ends[i + 1]) weight_i = self.out_layers[0].weight[l_idx:r_idx] bias_i = self.out_layers[0].bias[l_idx:r_idx] else: weight_i = self.out_layers[i].weight bias_i = self.out_layers[i].bias if i == 0: weight_i = torch.cat([weight_i, self.cluster_weight], dim=0) bias_i = torch.cat([bias_i, self.cluster_bias], dim=0) weights.append(weight_i) biases.append(bias_i) head_weight, head_bias, head_proj = (weights[0], biases[0], self.out_projs[0]) head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj) head_logprob = nn.functional.log_softmax(head_logit, dim=1) if labels is None: out = hidden.new_empty((head_logit.size(0), self.n_token)) else: out = torch.zeros_like(labels, dtype=hidden.dtype, device=hidden.device) offset = 0 cutoff_values = [0] + self.cutoffs for i in range(len(cutoff_values) - 1): l_idx, r_idx = (cutoff_values[i], cutoff_values[i + 1]) if labels is not None: mask_i = (labels >= l_idx) & (labels < r_idx) indices_i = mask_i.nonzero().squeeze() if indices_i.numel() == 0: continue target_i = labels.index_select(0, indices_i) - l_idx head_logprob_i = head_logprob.index_select(0, indices_i) hidden_i = hidden.index_select(0, indices_i) else: hidden_i = hidden if i == 0: if labels is not None: logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1) else: out[:, :self.cutoffs[0]] = head_logprob[:, :self.cutoffs[0]] else: weight_i, bias_i, proj_i = (weights[i], biases[i], self.out_projs[i]) tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i) tail_logprob_i = nn.functional.log_softmax(tail_logit_i, dim=1) cluster_prob_idx = self.cutoffs[0] + i - 1 if labels is not None: logprob_i = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(1, target_i[:, None]).squeeze(1) else: logprob_i = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i out[:, l_idx:r_idx] = logprob_i if labels is not None: if hasattr(self, 'keep_order') and self.keep_order or keep_order: out.index_copy_(0, indices_i, -logprob_i) else: out[offset:offset + logprob_i.size(0)].copy_(-logprob_i) offset += logprob_i.size(0) return out def log_prob(self, hidden): """ Computes log probabilities for all \\\\(n\\_classes\\\\) From: https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/adaptive.p Args: hidden (Tensor): a minibatch of example Returns: log-probabilities of for each class \\\\(c\\\\) in range \\\\(0 <= c <= n\\_classes\\\\), where \\\\(n\\_classes\\\\) is a parameter passed to `AdaptiveLogSoftmaxWithLoss` constructor. Shape: - Input: \\\\((N, in\\_features)\\\\) - Output: \\\\((N, n\\_classes)\\\\) """ if self.n_clusters == 0: logit = self._compute_logit(hidden, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0]) return nn.functional.log_softmax(logit, dim=-1) else: weights, biases = ([], []) for i in range(len(self.cutoffs)): if self.div_val == 1: l_idx, r_idx = (self.cutoff_ends[i], self.cutoff_ends[i + 1]) weight_i = self.out_layers[0].weight[l_idx:r_idx] bias_i = self.out_layers[0].bias[l_idx:r_idx] else: weight_i = self.out_layers[i].weight bias_i = self.out_layers[i].bias if i == 0: weight_i = torch.cat([weight_i, self.cluster_weight], dim=0) bias_i = torch.cat([bias_i, self.cluster_bias], dim=0) weights.append(weight_i) biases.append(bias_i) head_weight, head_bias, head_proj = (weights[0], biases[0], self.out_projs[0]) head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj) out = hidden.new_empty((head_logit.size(0), self.n_token)) head_logprob = nn.functional.log_softmax(head_logit, dim=1) cutoff_values = [0] + self.cutoffs for i in range(len(cutoff_values) - 1): start_idx, stop_idx = (cutoff_values[i], cutoff_values[i + 1]) if i == 0: out[:, :self.cutoffs[0]] = head_logprob[:, :self.cutoffs[0]] else: weight_i, bias_i, proj_i = (weights[i], biases[i], self.out_projs[i]) tail_logit_i = self._compute_logit(hidden, weight_i, bias_i, proj_i) tail_logprob_i = nn.functional.log_softmax(tail_logit_i, dim=1) logprob_i = head_logprob[:, -i] + tail_logprob_i out[:, start_idx, stop_idx] = logprob_i return out
class ProjectedAdaptiveLogSoftmax(nn.Module): def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1, keep_order=False): pass def _compute_logit(self, hidden, weight, bias, proj): pass def forward(self, hidden, labels=None, keep_order=False): ''' Params: hidden :: [len*bsz x d_proj] labels :: [len*bsz] Return: if labels is None: out :: [len*bsz x n_tokens] log probabilities of tokens over the vocabulary else: out :: [(len-1)*bsz] Negative log likelihood. We could replace this implementation by the native PyTorch one if theirs had an option to set bias on all clusters in the native one. here: https://github.com/pytorch/pytorch/blob/dbe6a7a9ff1a364a8706bf5df58a1ca96d2fd9da/torch/nn/modules/adaptive.py#L138 ''' pass def log_prob(self, hidden): ''' Computes log probabilities for all \\(n\_classes\\) From: https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/adaptive.p Args: hidden (Tensor): a minibatch of example Returns: log-probabilities of for each class \\(c\\) in range \\(0 <= c <= n\_classes\\), where \\(n\_classes\\) is a parameter passed to `AdaptiveLogSoftmaxWithLoss` constructor. Shape: - Input: \\((N, in\_features)\\) - Output: \\((N, n\_classes)\\) ''' pass
5
2
55
10
38
8
8
0.2
1
3
0
0
4
14
4
14
224
42
153
63
148
30
133
63
128
17
1
4
32
1,988
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/transfo_xl/tokenization_transfo_xl.py
transformers.models.deprecated.transfo_xl.tokenization_transfo_xl.LMMultiFileIterator
import numpy as np class LMMultiFileIterator(LMShuffledIterator): def __init__(self, paths, vocab, bsz, bptt, device='cpu', ext_len=None, shuffle=False): self.paths = paths self.vocab = vocab self.bsz = bsz self.bptt = bptt self.ext_len = ext_len if ext_len is not None else 0 self.device = device self.shuffle = shuffle def get_sent_stream(self, path): sents = self.vocab.encode_file(path, add_double_eos=True) if self.shuffle: np.random.shuffle(sents) sent_stream = iter(sents) return sent_stream def __iter__(self): if self.shuffle: np.random.shuffle(self.paths) for path in self.paths: sent_stream = self.get_sent_stream(path) for batch in self.stream_iterator(sent_stream): yield batch
class LMMultiFileIterator(LMShuffledIterator): def __init__(self, paths, vocab, bsz, bptt, device='cpu', ext_len=None, shuffle=False): pass def get_sent_stream(self, path): pass def __iter__(self): pass
4
0
9
1
7
0
3
0.05
1
0
0
0
3
7
3
7
29
6
22
16
18
1
22
16
18
4
1
2
8
1,989
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/transfo_xl/tokenization_transfo_xl.py
transformers.models.deprecated.transfo_xl.tokenization_transfo_xl.LMOrderedIterator
import numpy as np class LMOrderedIterator: def __init__(self, data, bsz, bptt, device='cpu', ext_len=None): """ data -- LongTensor -- the LongTensor is strictly ordered """ self.bsz = bsz self.bptt = bptt self.ext_len = ext_len if ext_len is not None else 0 self.device = device self.n_step = data.size(0) // bsz data = data.narrow(0, 0, self.n_step * bsz) self.data = data.view(bsz, -1).t().contiguous().to(device) self.n_batch = (self.n_step + self.bptt - 1) // self.bptt def get_batch(self, i, bptt=None): if bptt is None: bptt = self.bptt seq_len = min(bptt, self.data.size(0) - 1 - i) end_idx = i + seq_len beg_idx = max(0, i - self.ext_len) data = self.data[beg_idx:end_idx] target = self.data[i + 1:i + 1 + seq_len] data_out = data.transpose(0, 1).contiguous().to(self.device) target_out = target.transpose(0, 1).contiguous().to(self.device) return (data_out, target_out, seq_len) def get_fixlen_iter(self, start=0): for i in range(start, self.data.size(0) - 1, self.bptt): yield self.get_batch(i) def get_varlen_iter(self, start=0, std=5, min_len=5, max_deviation=3): max_len = self.bptt + max_deviation * std i = start while True: bptt = self.bptt if np.random.random() < 0.95 else self.bptt / 2.0 bptt = min(max_len, max(min_len, int(np.random.normal(bptt, std)))) data, target, seq_len = self.get_batch(i, bptt) i += seq_len yield (data, target, seq_len) if i >= self.data.size(0) - 2: break def __iter__(self): return self.get_fixlen_iter()
class LMOrderedIterator: def __init__(self, data, bsz, bptt, device='cpu', ext_len=None): ''' data -- LongTensor -- the LongTensor is strictly ordered ''' pass def get_batch(self, i, bptt=None): pass def get_fixlen_iter(self, start=0): pass def get_varlen_iter(self, start=0, std=5, min_len=5, max_deviation=3): pass def __iter__(self): pass
6
1
10
2
7
1
2
0.19
0
2
0
0
5
7
5
5
57
13
37
25
31
7
37
25
31
4
0
2
11
1,990
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/transfo_xl/tokenization_transfo_xl.py
transformers.models.deprecated.transfo_xl.tokenization_transfo_xl.LMShuffledIterator
import numpy as np from ....utils import cached_file, check_torch_load_is_safe, is_sacremoses_available, is_torch_available, logging, requires_backends, strtobool, torch_only_method class LMShuffledIterator: def __init__(self, data, bsz, bptt, device='cpu', ext_len=None, shuffle=False): """ data -- list[LongTensor] -- there is no order among the LongTensors """ self.data = data self.bsz = bsz self.bptt = bptt self.ext_len = ext_len if ext_len is not None else 0 self.device = device self.shuffle = shuffle def get_sent_stream(self): epoch_indices = np.random.permutation(len(self.data)) if self.shuffle else np.array(range(len(self.data))) for idx in epoch_indices: yield self.data[idx] @torch_only_method def stream_iterator(self, sent_stream): streams = [None] * self.bsz data = torch.LongTensor(self.bptt, self.bsz) target = torch.LongTensor(self.bptt, self.bsz) n_retain = 0 while True: data[n_retain:].fill_(-1) target.fill_(-1) valid_batch = True for i in range(self.bsz): n_filled = 0 try: while n_filled < self.bptt: if streams[i] is None or len(streams[i]) <= 1: streams[i] = next(sent_stream) n_new = min(len(streams[i]) - 1, self.bptt - n_filled) data[n_retain + n_filled:n_retain + n_filled + n_new, i] = streams[i][:n_new] target[n_filled:n_filled + n_new, i] = streams[i][1:n_new + 1] streams[i] = streams[i][n_new:] n_filled += n_new except StopIteration: valid_batch = False break if not valid_batch: return data_out = data.transpose(0, 1).contiguous().to(self.device) target_out = target.transpose(0, 1).contiguous().to(self.device) yield (data_out, target_out, self.bptt) n_retain = min(data.size(0), self.ext_len) if n_retain > 0: data[:n_retain] = data[-n_retain:] data.resize_(n_retain + self.bptt, data.size(1)) def __iter__(self): sent_stream = self.get_sent_stream() for batch in self.stream_iterator(sent_stream): yield batch
class LMShuffledIterator: def __init__(self, data, bsz, bptt, device='cpu', ext_len=None, shuffle=False): ''' data -- list[LongTensor] -- there is no order among the LongTensors ''' pass def get_sent_stream(self): pass @torch_only_method def stream_iterator(self, sent_stream): pass def __iter__(self): pass
6
1
18
3
12
3
4
0.22
0
2
0
1
4
6
4
4
76
16
49
26
43
11
48
25
43
8
0
5
15
1,991
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/transfo_xl/tokenization_transfo_xl.py
transformers.models.deprecated.transfo_xl.tokenization_transfo_xl.TransfoXLCorpus
import os from ....utils import cached_file, check_torch_load_is_safe, is_sacremoses_available, is_torch_available, logging, requires_backends, strtobool, torch_only_method import glob class TransfoXLCorpus: @classmethod @torch_only_method def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs): """ Instantiate a pre-processed corpus. """ vocab = TransfoXLTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs) is_local = os.path.isdir(pretrained_model_name_or_path) try: resolved_corpus_file = cached_file(pretrained_model_name_or_path, CORPUS_NAME, cache_dir=cache_dir) except OSError: logger.error(f"Corpus '{pretrained_model_name_or_path}' was not found in corpus list ({', '.join(PRETRAINED_CORPUS_ARCHIVE_MAP.keys())}. We assumed '{pretrained_model_name_or_path}' was a path or url but couldn't find files {CORPUS_NAME} at this path or url.") return None if is_local: logger.info(f'loading corpus file {resolved_corpus_file}') else: logger.info(f'loading corpus file {CORPUS_NAME} from cache at {resolved_corpus_file}') corpus = cls(*inputs, **kwargs) check_torch_load_is_safe() corpus_dict = torch.load(resolved_corpus_file, weights_only=True) for key, value in corpus_dict.items(): corpus.__dict__[key] = value corpus.vocab = vocab if corpus.train is not None: corpus.train = torch.tensor(corpus.train, dtype=torch.long) if corpus.valid is not None: corpus.valid = torch.tensor(corpus.valid, dtype=torch.long) if corpus.test is not None: corpus.test = torch.tensor(corpus.test, dtype=torch.long) return corpus def __init__(self, *args, **kwargs): self.vocab = TransfoXLTokenizer(*args, **kwargs) self.dataset = None self.train = None self.valid = None self.test = None def build_corpus(self, path, dataset): self.dataset = dataset if self.dataset in ['ptb', 'wt2', 'enwik8', 'text8']: self.vocab.count_file(os.path.join(path, 'train.txt')) self.vocab.count_file(os.path.join(path, 'valid.txt')) self.vocab.count_file(os.path.join(path, 'test.txt')) elif self.dataset == 'wt103': self.vocab.count_file(os.path.join(path, 'train.txt')) elif self.dataset == 'lm1b': train_path_pattern = os.path.join(path, '1-billion-word-language-modeling-benchmark-r13output', 'training-monolingual.tokenized.shuffled', 'news.en-*') train_paths = glob.glob(train_path_pattern) self.vocab.build_vocab() if self.dataset in ['ptb', 'wt2', 'wt103']: self.train = self.vocab.encode_file(os.path.join(path, 'train.txt'), ordered=True) self.valid = self.vocab.encode_file(os.path.join(path, 'valid.txt'), ordered=True) self.test = self.vocab.encode_file(os.path.join(path, 'test.txt'), ordered=True) elif self.dataset in ['enwik8', 'text8']: self.train = self.vocab.encode_file(os.path.join(path, 'train.txt'), ordered=True, add_eos=False) self.valid = self.vocab.encode_file(os.path.join(path, 'valid.txt'), ordered=True, add_eos=False) self.test = self.vocab.encode_file(os.path.join(path, 'test.txt'), ordered=True, add_eos=False) elif self.dataset == 'lm1b': self.train = train_paths self.valid = self.vocab.encode_file(os.path.join(path, 'valid.txt'), ordered=False, add_double_eos=True) self.test = self.vocab.encode_file(os.path.join(path, 'test.txt'), ordered=False, add_double_eos=True) def get_iterator(self, split, *args, **kwargs): if split == 'train': if self.dataset in ['ptb', 'wt2', 'wt103', 'enwik8', 'text8']: data_iter = LMOrderedIterator(self.train, *args, **kwargs) elif self.dataset == 'lm1b': kwargs['shuffle'] = True data_iter = LMMultiFileIterator(self.train, self.vocab, *args, **kwargs) elif split in ['valid', 'test']: data = self.valid if split == 'valid' else self.test if self.dataset in ['ptb', 'wt2', 'wt103', 'enwik8', 'text8']: data_iter = LMOrderedIterator(data, *args, **kwargs) elif self.dataset == 'lm1b': data_iter = LMShuffledIterator(data, *args, **kwargs) else: data_iter = None raise ValueError(f'Split not recognized: {split}') return data_iter
class TransfoXLCorpus: @classmethod @torch_only_method def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs): ''' Instantiate a pre-processed corpus. ''' pass def __init__(self, *args, **kwargs): pass def build_corpus(self, path, dataset): pass def get_iterator(self, split, *args, **kwargs): pass
7
1
23
1
20
2
6
0.07
0
5
4
0
3
5
4
4
97
8
83
21
76
6
63
20
58
8
0
2
23
1,992
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/transfo_xl/tokenization_transfo_xl.py
transformers.models.deprecated.transfo_xl.tokenization_transfo_xl.TransfoXLTokenizer
import re from ....utils import cached_file, check_torch_load_is_safe, is_sacremoses_available, is_torch_available, logging, requires_backends, strtobool, torch_only_method import os import pickle from collections import Counter, OrderedDict from ....tokenization_utils import PreTrainedTokenizer from typing import Optional class TransfoXLTokenizer(PreTrainedTokenizer): """ Construct a Transformer-XL tokenizer adapted from Vocab class in [the original code](https://github.com/kimiyoung/transformer-xl). The Transformer-XL tokenizer is a word-level tokenizer (no sub-word tokenization). This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: special (`list[str]`, *optional*): A list of special tokens (to be treated by the original implementation of this tokenizer). min_freq (`int`, *optional*, defaults to 0): The minimum number of times a token has to be present in order to be kept in the vocabulary (otherwise it will be mapped to `unk_token`). max_size (`int`, *optional*): The maximum size of the vocabulary. If left unset, it will default to the size of the vocabulary found after excluding the tokens according to the `min_freq` rule. lower_case (`bool`, *optional*, defaults to `False`): Whether or not to lowercase the input when tokenizing. delimiter (`str`, *optional*): The delimiter used between tokens. vocab_file (`str`, *optional*): File containing the vocabulary (from the original implementation). pretrained_vocab_file (`str`, *optional*): File containing the vocabulary as saved with the `save_pretrained()` method. never_split (`list[str]`, *optional*): List of tokens that should never be split. If no list is specified, will simply use the existing special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. eos_token (`str`, *optional*, defaults to `"<eos>"`): The end of sequence token. additional_special_tokens (`list[str]`, *optional*, defaults to `['<formula>']`): A list of additional special tokens (for the HuggingFace functionality). language (`str`, *optional*, defaults to `"en"`): The language of this tokenizer (used for mose preprocessing). """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids'] def __init__(self, special=None, min_freq=0, max_size=None, lower_case=False, delimiter=None, vocab_file=None, pretrained_vocab_file: Optional[str]=None, never_split=None, unk_token='<unk>', eos_token='<eos>', additional_special_tokens=['<formula>'], language='en', **kwargs): logger.error("`TransfoXL` was deprecated due to security issues linked to `pickle.load` in `TransfoXLTokenizer`. See more details on this model's documentation page: `https://github.com/huggingface/transformers/blob/main/docs/source/en/model_doc/transfo-xl.md`.") requires_backends(self, 'sacremoses') if special is None: special = [] self.counter = Counter() self.special = special self.min_freq = min_freq self.max_size = max_size self.lower_case = lower_case self.delimiter = delimiter self.vocab_file = vocab_file self.punctuation_symbols = '!"#$%&()*+,-./\\:;<=>?@[\\]^_`{|}~' self.punction_without_space_before_pattern = re.compile(f'[^\\s][{self.punctuation_symbols}]') self.punctuation_with_space_around_pattern = self._compile_space_around_punctuation_pattern() self.language = language self.moses_punct_normalizer = sm.MosesPunctNormalizer(language) self.moses_tokenizer = sm.MosesTokenizer(language) self.moses_detokenizer = sm.MosesDetokenizer(language) self.idx2sym = [] self.sym2idx = OrderedDict() try: vocab_dict = None if pretrained_vocab_file is not None: if not strtobool(os.environ.get('TRUST_REMOTE_CODE', 'False')): raise ValueError("This part uses `pickle.load` which is insecure and will execute arbitrary code that is potentially malicious. It's recommended to never unpickle data that could have come from an untrusted source, or that could have been tampered with. If you already verified the pickle data and decided to use it, you can set the environment variable `TRUST_REMOTE_CODE` to `True` to allow it.") with open(pretrained_vocab_file, 'rb') as f: vocab_dict = pickle.load(f) if isinstance(vocab_dict, int): if not is_torch_available(): raise ImportError('Not trying to load dict with PyTorch as you need to install pytorch to load from a PyTorch pretrained vocabulary, or activate it with environment variables USE_TORCH=1 and USE_TF=0.') check_torch_load_is_safe() vocab_dict = torch.load(pretrained_vocab_file, weights_only=True) if vocab_dict is not None: for key, value in vocab_dict.items(): if key not in self.__dict__ or key in ['sym2idx', 'idx2sym']: self.__dict__[key] = value elif vocab_file is not None: self.build_vocab() except Exception as e: raise ValueError(f'Unable to parse file {pretrained_vocab_file}. Unknown format. If you tried to load a model saved through TransfoXLTokenizerFast, please note they are not compatible.') from e if vocab_file is not None: self.build_vocab() super().__init__(special=special, min_freq=min_freq, max_size=max_size, lower_case=lower_case, delimiter=delimiter, vocab_file=vocab_file, pretrained_vocab_file=pretrained_vocab_file, never_split=never_split, unk_token=unk_token, eos_token=eos_token, additional_special_tokens=additional_special_tokens, language=language, **kwargs) if never_split is None: never_split = self.all_special_tokens self.never_split = never_split @property def do_lower_case(self): return self.lower_case def _compile_space_around_punctuation_pattern(self): look_ahead_for_special_token = f'(?=[{self.punctuation_symbols}])' look_ahead_to_match_all_except_space = '(?=[^\\s])' return re.compile('' + look_ahead_for_special_token + look_ahead_to_match_all_except_space) def count_file(self, path, verbose=False, add_eos=False): if verbose: logger.info(f'counting file {path} ...') assert os.path.exists(path), f'Input file {path} not found' sents = [] with open(path, 'r', encoding='utf-8') as f: for idx, line in enumerate(f): if verbose and idx > 0 and (idx % 500000 == 0): logger.info(f' line {idx}') symbols = self.tokenize(line, add_eos=add_eos) self.counter.update(symbols) sents.append(symbols) return sents def count_sents(self, sents, verbose=False): """ sents : a list of sentences, each a list of tokenized symbols """ if verbose: logger.info(f'counting {len(sents)} sents ...') for idx, symbols in enumerate(sents): if verbose and idx > 0 and (idx % 500000 == 0): logger.info(f' line {idx}') self.counter.update(symbols) def _build_from_file(self, vocab_file): self.idx2sym = [] self.sym2idx = OrderedDict() with open(vocab_file, 'r', encoding='utf-8') as f: for line in f: symb = line.strip().split()[0] self.add_symbol(symb) if '<UNK>' in self.sym2idx: self.unk_idx = self.sym2idx['<UNK>'] elif '<unk>' in self.sym2idx: self.unk_idx = self.sym2idx['<unk>'] else: raise ValueError('Token not in vocabulary and no <unk> token in vocabulary for replacement.') def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: if os.path.isdir(save_directory): vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['pretrained_vocab_file']) else: vocab_file = (filename_prefix + '-' if filename_prefix else '') + save_directory with open(vocab_file, 'wb') as f: pickle.dump(self.__dict__, f) return (vocab_file,) def build_vocab(self): if self.vocab_file: logger.info(f'building vocab from {self.vocab_file}') self._build_from_file(self.vocab_file) logger.info(f'Final vocab size {len(self.sym2idx)}') else: logger.info(f'building vocab with min_freq={self.min_freq}, max_size={self.max_size}') self.idx2sym = [] self.sym2idx = OrderedDict() for sym in self.special: self.add_special(sym) for sym, cnt in self.counter.most_common(self.max_size): if cnt < self.min_freq: break self.add_symbol(sym) logger.info(f'Final vocab size {len(self.sym2idx)} from {len(self.counter)} unique tokens') @torch_only_method def encode_file(self, path, ordered=False, verbose=False, add_eos=True, add_double_eos=False): if verbose: logger.info(f'encoding file {path} ...') assert os.path.exists(path), f'Output file {path} not found' encoded = [] with open(path, 'r', encoding='utf-8') as f: for idx, line in enumerate(f): if verbose and idx > 0 and (idx % 500000 == 0): logger.info(f' line {idx}') symbols = self.tokenize(line, add_eos=add_eos, add_double_eos=add_double_eos) encoded.append(self.convert_to_tensor(symbols)) if ordered: encoded = torch.cat(encoded) return encoded @torch_only_method def encode_sents(self, sents, ordered=False, verbose=False): if verbose: logger.info(f'encoding {len(sents)} sents ...') encoded = [] for idx, symbols in enumerate(sents): if verbose and idx > 0 and (idx % 500000 == 0): logger.info(f' line {idx}') encoded.append(self.convert_to_tensor(symbols)) if ordered: encoded = torch.cat(encoded) return encoded def add_special(self, sym): if sym not in self.sym2idx: self.idx2sym.append(sym) self.sym2idx[sym] = len(self.idx2sym) - 1 setattr(self, f"{sym.strip('<>')}_idx", self.sym2idx[sym]) def add_symbol(self, sym): if sym not in self.sym2idx: self.idx2sym.append(sym) self.sym2idx[sym] = len(self.idx2sym) - 1 def move_added_token(self, token: str, target_idx: int): """ Moves an added token to a specific position in the vocab. This method should be used when resizing an embedding layer other than the last one in the `AdaptiveEmbedding` in order to move the token in the tokenizer from the default position (at the very end) to the desired one. Args: token: The token to move to a specific position in the vocab. target_idx: The position where the token should be moved to. """ assert token in self.added_tokens_encoder, 'Token which should be moved has to be an added token' assert token not in self.idx2sym, 'Token which should be moved is already in vocab' self.idx2sym.insert(target_idx, token) self.sym2idx[token] = target_idx for idx in range(target_idx + 1, len(self.idx2sym)): current_sym = self.idx2sym[idx] self.sym2idx[current_sym] = idx old_index = self._added_tokens_encoder.pop(token) self._added_tokens_decoder.pop(old_index) def moses_punct_norm(self, text): return self.moses_punct_normalizer.normalize(text) def moses_tokenize(self, text): return self.moses_tokenizer.tokenize(text, aggressive_dash_splits=True, return_str=False, escape=False, protected_patterns=self.never_split) def moses_pipeline(self, text: str) -> list[str]: """ Does basic tokenization using [`sacremoses.MosesPunctNormalizer`] and [`sacremoses.MosesTokenizer`] with *aggressive_dash_splits=True* (see [`sacremoses.tokenize.MosesTokenizer.tokenize`]). Additionally, large comma-separated numbers and floating point values are split. E.g. "23,000 people are 1.80m tall" -> "23 @,@ 000 people are 1 @.@ 80m tall" Args: text: Text to be tokenize Returns: A list of tokenized string Example: ```python >>> tokenizer = TransfoXLTokenizer.from_pretrained("transfo-xl/transfo-xl-wt103") >>> tokenizer.moses_pipeline("23,000 people are 1.80 m tall") ['23', '@,@', '000', 'people', 'are', '1', '@.@', '80', 'm', 'tall'] ```""" text = self.moses_punct_norm(text) text = self.moses_tokenize(text) text = tokenize_numbers(text) return text def _convert_id_to_token(self, idx): """Converts an id in a token (BPE) using the vocab.""" assert 0 <= idx < len(self), f'Index {idx} out of vocabulary range' return self.idx2sym[idx] def _convert_token_to_id(self, sym): """Converts a token (str) in an id using the vocab.""" if sym in self.sym2idx: return self.sym2idx[sym] elif hasattr(self, 'unk_idx'): return self.sym2idx.get(sym, self.unk_idx) elif '<unk>' in self.sym2idx: return self.sym2idx['<unk>'] elif '<UNK>' in self.sym2idx: return self.sym2idx['<UNK>'] else: raise ValueError('Token not in vocabulary and no <unk> token in vocabulary for replacement.') def convert_tokens_to_string(self, tokens): """ Converts a sequence of tokens (string) in a single string. Additionally, the split numbers are converted back into it's original form. """ out_string = self.moses_detokenizer.detokenize(tokens) return detokenize_numbers(out_string).strip() @torch_only_method def convert_to_tensor(self, symbols): return torch.LongTensor(self.convert_tokens_to_ids(symbols)) @property def vocab_size(self): return len(self.idx2sym) def get_vocab(self): vocab = self.sym2idx.copy() vocab.update(self.added_tokens_encoder) return vocab def _tokenize(self, line, add_eos=False, add_double_eos=False): line = line.strip() if self.lower_case: line = line.lower() if self.delimiter == '': symbols = line else: symbols = self.moses_pipeline(line) if add_double_eos: return ['<S>'] + symbols + ['<S>'] elif add_eos: return symbols + ['<eos>'] else: return symbols
class TransfoXLTokenizer(PreTrainedTokenizer): ''' Construct a Transformer-XL tokenizer adapted from Vocab class in [the original code](https://github.com/kimiyoung/transformer-xl). The Transformer-XL tokenizer is a word-level tokenizer (no sub-word tokenization). This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: special (`list[str]`, *optional*): A list of special tokens (to be treated by the original implementation of this tokenizer). min_freq (`int`, *optional*, defaults to 0): The minimum number of times a token has to be present in order to be kept in the vocabulary (otherwise it will be mapped to `unk_token`). max_size (`int`, *optional*): The maximum size of the vocabulary. If left unset, it will default to the size of the vocabulary found after excluding the tokens according to the `min_freq` rule. lower_case (`bool`, *optional*, defaults to `False`): Whether or not to lowercase the input when tokenizing. delimiter (`str`, *optional*): The delimiter used between tokens. vocab_file (`str`, *optional*): File containing the vocabulary (from the original implementation). pretrained_vocab_file (`str`, *optional*): File containing the vocabulary as saved with the `save_pretrained()` method. never_split (`list[str]`, *optional*): List of tokens that should never be split. If no list is specified, will simply use the existing special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. eos_token (`str`, *optional*, defaults to `"<eos>"`): The end of sequence token. additional_special_tokens (`list[str]`, *optional*, defaults to `['<formula>']`): A list of additional special tokens (for the HuggingFace functionality). language (`str`, *optional*, defaults to `"en"`): The language of this tokenizer (used for mose preprocessing). ''' def __init__(self, special=None, min_freq=0, max_size=None, lower_case=False, delimiter=None, vocab_file=None, pretrained_vocab_file: Optional[str]=None, never_split=None, unk_token='<unk>', eos_token='<eos>', additional_special_tokens=['<formula>'], language='en', **kwargs): pass @property def do_lower_case(self): pass def _compile_space_around_punctuation_pattern(self): pass def count_file(self, path, verbose=False, add_eos=False): pass def count_sents(self, sents, verbose=False): ''' sents : a list of sentences, each a list of tokenized symbols ''' pass def _build_from_file(self, vocab_file): pass def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: pass def build_vocab(self): pass @torch_only_method def encode_file(self, path, ordered=False, verbose=False, add_eos=True, add_double_eos=False): pass @torch_only_method def encode_sents(self, sents, ordered=False, verbose=False): pass def add_special(self, sym): pass def add_symbol(self, sym): pass def move_added_token(self, token: str, target_idx: int): ''' Moves an added token to a specific position in the vocab. This method should be used when resizing an embedding layer other than the last one in the `AdaptiveEmbedding` in order to move the token in the tokenizer from the default position (at the very end) to the desired one. Args: token: The token to move to a specific position in the vocab. target_idx: The position where the token should be moved to. ''' pass def moses_punct_norm(self, text): pass def moses_tokenize(self, text): pass def moses_pipeline(self, text: str) -> list[str]: ''' Does basic tokenization using [`sacremoses.MosesPunctNormalizer`] and [`sacremoses.MosesTokenizer`] with *aggressive_dash_splits=True* (see [`sacremoses.tokenize.MosesTokenizer.tokenize`]). Additionally, large comma-separated numbers and floating point values are split. E.g. "23,000 people are 1.80m tall" -> "23 @,@ 000 people are 1 @.@ 80m tall" Args: text: Text to be tokenize Returns: A list of tokenized string Example: ```python >>> tokenizer = TransfoXLTokenizer.from_pretrained("transfo-xl/transfo-xl-wt103") >>> tokenizer.moses_pipeline("23,000 people are 1.80 m tall") ['23', '@,@', '000', 'people', 'are', '1', '@.@', '80', 'm', 'tall'] ```''' pass def _convert_id_to_token(self, idx): '''Converts an id in a token (BPE) using the vocab.''' pass def _convert_token_to_id(self, sym): '''Converts a token (str) in an id using the vocab.''' pass def convert_tokens_to_string(self, tokens): ''' Converts a sequence of tokens (string) in a single string. Additionally, the split numbers are converted back into it's original form. ''' pass @torch_only_method def convert_to_tensor(self, symbols): pass @property def vocab_size(self): pass def get_vocab(self): pass def _tokenize(self, line, add_eos=False, add_double_eos=False): pass
29
7
14
1
11
2
3
0.32
1
10
0
0
23
18
23
112
398
53
262
94
218
85
193
68
169
13
3
4
70
1,993
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/tvlt/configuration_tvlt.py
transformers.models.deprecated.tvlt.configuration_tvlt.TvltConfig
from ....configuration_utils import PretrainedConfig class TvltConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`TvltModel`]. It is used to instantiate a TVLT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the TVLT [ZinengTang/tvlt-base](https://huggingface.co/ZinengTang/tvlt-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. spectrogram_length (`int`, *optional*, defaults to 2048): The time length of each audio spectrogram. frequency_length (`int`, *optional*, defaults to 128): The frequency length of audio spectrogram. image_patch_size (`list[int]`, *optional*, defaults to `[16, 16]`): The size (resolution) of each image patch. audio_patch_size (`list[int]`, *optional*, defaults to `[16, 16]`): The size (resolution) of each audio patch. num_image_channels (`int`, *optional*, defaults to 3): The number of input image channels. num_audio_channels (`int`, *optional*, defaults to 1): The number of input audio channels. num_frames (`int`, *optional*, defaults to 8): The maximum number of frames for an input video. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the layer normalization layers. qkv_bias (`bool`, *optional*, defaults to `True`): Whether to add a bias to the queries, keys and values. use_mean_pooling (`bool`, *optional*, defaults to `False`): Whether to mean pool the final hidden states instead of using the final hidden state of the [CLS] token. decoder_num_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the decoder. decoder_hidden_size (`int`, *optional*, defaults to 512): Dimensionality of the decoder. decoder_num_hidden_layers (`int`, *optional*, defaults to 8): Number of hidden layers in the decoder. decoder_intermediate_size (`int`, *optional*, defaults to 2048): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the decoder. pixel_mask_ratio (`float`, *optional*, defaults to 0.75): Image patch masking ratio. audio_mask_ratio (`float`, *optional*, defaults to 0.15): Audio patch masking ratio. audio_mask_type (`str`, *optional*, defaults to `"frame-level"`): Audio patch masking type, choose between "frame-level" and "patch-level". task_matching (`bool`, *optional*, defaults to `True`): Whether to use vision audio matching task in pretraining. task_mae (`bool`, *optional*, defaults to `True`): Whether to use the masked auto-encoder (MAE) in pretraining. loss_type (`str`, *optional*, defaults to `"classification"`): Loss types including regression and classification. Example: ```python >>> from transformers import TvltConfig, TvltModel >>> # # Initializing a TVLT ZinengTang/tvlt-base style configuration >>> configuration = TvltConfig() >>> # # Initializing a model (with random weights) from the ZinengTang/tvlt-base style configuration >>> model = TvltModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'tvlt' def __init__(self, image_size=224, spectrogram_length=2048, frequency_length=128, image_patch_size=[16, 16], audio_patch_size=[16, 16], num_image_channels=3, num_audio_channels=1, num_frames=8, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-06, qkv_bias=True, use_mean_pooling=False, decoder_num_attention_heads=16, decoder_hidden_size=512, decoder_num_hidden_layers=8, decoder_intermediate_size=2048, pixel_mask_ratio=0.75, audio_mask_ratio=0.15, audio_mask_type='frame-level', task_matching=True, task_mae=True, loss_type='classification', **kwargs): super().__init__(**kwargs) if audio_mask_type not in ('frame-level', 'patch_level'): raise ValueError(f"audio_mask_type must be one of two acceptable strategies - {{'frame_level', 'patch-level') got {audio_mask_type}") self.image_size = image_size self.spectrogram_length = spectrogram_length self.frequency_length = frequency_length self.image_patch_size = image_patch_size self.audio_patch_size = audio_patch_size self.num_image_channels = num_image_channels self.num_audio_channels = num_audio_channels self.num_frames = num_frames self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.qkv_bias = qkv_bias self.use_mean_pooling = use_mean_pooling self.decoder_num_attention_heads = decoder_num_attention_heads self.decoder_hidden_size = decoder_hidden_size self.decoder_num_hidden_layers = decoder_num_hidden_layers self.decoder_intermediate_size = decoder_intermediate_size self.pixel_mask_ratio = pixel_mask_ratio self.audio_mask_ratio = audio_mask_ratio self.audio_mask_type = audio_mask_type self.task_matching = task_matching self.task_mae = task_mae self.loss_type = loss_type
class TvltConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`TvltModel`]. It is used to instantiate a TVLT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the TVLT [ZinengTang/tvlt-base](https://huggingface.co/ZinengTang/tvlt-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. spectrogram_length (`int`, *optional*, defaults to 2048): The time length of each audio spectrogram. frequency_length (`int`, *optional*, defaults to 128): The frequency length of audio spectrogram. image_patch_size (`list[int]`, *optional*, defaults to `[16, 16]`): The size (resolution) of each image patch. audio_patch_size (`list[int]`, *optional*, defaults to `[16, 16]`): The size (resolution) of each audio patch. num_image_channels (`int`, *optional*, defaults to 3): The number of input image channels. num_audio_channels (`int`, *optional*, defaults to 1): The number of input audio channels. num_frames (`int`, *optional*, defaults to 8): The maximum number of frames for an input video. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the layer normalization layers. qkv_bias (`bool`, *optional*, defaults to `True`): Whether to add a bias to the queries, keys and values. use_mean_pooling (`bool`, *optional*, defaults to `False`): Whether to mean pool the final hidden states instead of using the final hidden state of the [CLS] token. decoder_num_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the decoder. decoder_hidden_size (`int`, *optional*, defaults to 512): Dimensionality of the decoder. decoder_num_hidden_layers (`int`, *optional*, defaults to 8): Number of hidden layers in the decoder. decoder_intermediate_size (`int`, *optional*, defaults to 2048): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the decoder. pixel_mask_ratio (`float`, *optional*, defaults to 0.75): Image patch masking ratio. audio_mask_ratio (`float`, *optional*, defaults to 0.15): Audio patch masking ratio. audio_mask_type (`str`, *optional*, defaults to `"frame-level"`): Audio patch masking type, choose between "frame-level" and "patch-level". task_matching (`bool`, *optional*, defaults to `True`): Whether to use vision audio matching task in pretraining. task_mae (`bool`, *optional*, defaults to `True`): Whether to use the masked auto-encoder (MAE) in pretraining. loss_type (`str`, *optional*, defaults to `"classification"`): Loss types including regression and classification. Example: ```python >>> from transformers import TvltConfig, TvltModel >>> # # Initializing a TVLT ZinengTang/tvlt-base style configuration >>> configuration = TvltConfig() >>> # # Initializing a model (with random weights) from the ZinengTang/tvlt-base style configuration >>> model = TvltModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, image_size=224, spectrogram_length=2048, frequency_length=128, image_patch_size=[16, 16], audio_patch_size=[16, 16], num_image_channels=3, num_audio_channels=1, num_frames=8, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-06, qkv_bias=True, use_mean_pooling=False, decoder_num_attention_heads=16, decoder_hidden_size=512, decoder_num_hidden_layers=8, decoder_intermediate_size=2048, pixel_mask_ratio=0.75, audio_mask_ratio=0.15, audio_mask_type='frame-level', task_matching=True, task_mae=True, loss_type='classification', **kwargs): pass
2
1
73
5
68
0
2
1.1
1
2
0
0
1
29
1
33
161
14
70
64
36
77
35
32
33
2
2
1
2
1,994
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/tvlt/feature_extraction_tvlt.py
transformers.models.deprecated.tvlt.feature_extraction_tvlt.TvltFeatureExtractor
from ....feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from math import ceil import numpy as np from typing import Optional, Union from ....utils import TensorType, logging from ....audio_utils import mel_filter_bank, spectrogram, window_function class TvltFeatureExtractor(SequenceFeatureExtractor): """ Constructs a TVLT audio feature extractor. This feature extractor can be used to prepare audios for the model. This feature extractor inherits from [`FeatureExtractionMixin`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: spectrogram_length (`dict[str, int]` *optional*, defaults to 2048): The time length of each audio spectrogram. num_channels (`int` *optional*, defaults to 1): Number of audio channels. patch_size (`list[int]` *optional*, defaults to `[16, 16]`): The patch size of audio patch embedding. feature_size (`int`, *optional*, defaults to 128): The frequency length of audio spectrogram. sampling_rate (`int`, *optional*, defaults to 44100): The sampling rate at which the audio files should be digitalized expressed in Hertz (Hz). hop_length_to_sampling_rate (`int`, *optional*, defaults to 86): Hop length is length of the overlapping windows for the STFT used to obtain the Mel Frequency coefficients. For example, with sampling rate 44100, the hop length is 512, with 44100 / 512 = 86 n_fft (`int`, *optional*, defaults to 2048): Size of the Fourier transform. padding_value (`float`, *optional*, defaults to 0.0): Padding value used to pad the audio. Should correspond to silences. """ model_input_names = ['audio_values', 'audio_mask'] def __init__(self, spectrogram_length=2048, num_channels=1, patch_size=[16, 16], feature_size=128, sampling_rate=44100, hop_length_to_sampling_rate=86, n_fft=2048, padding_value=0.0, **kwargs): super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs) self.spectrogram_length = spectrogram_length self.num_channels = num_channels self.patch_size = patch_size self.freq_len = feature_size // self.patch_size[1] self.n_fft = n_fft self.hop_length = sampling_rate // hop_length_to_sampling_rate self.sampling_rate = sampling_rate self.padding_value = padding_value self.mel_filters = mel_filter_bank(num_frequency_bins=1 + n_fft // 2, num_mel_filters=feature_size, min_frequency=0.0, max_frequency=22050.0, sampling_rate=sampling_rate, norm='slaney', mel_scale='slaney').T def _np_extract_fbank_features(self, waveform: np.ndarray) -> np.ndarray: """ Compute the log-mel spectrogram of the provided audio, gives similar results to Whisper's original torch implementation with 1e-5 tolerance. """ log_spec = spectrogram(waveform, window_function(self.n_fft, 'hann'), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel='dB', db_range=80.0) log_spec = log_spec[:, :-1] log_spec = log_spec - 20.0 log_spec = np.clip(log_spec / 40.0, -2.0, 0.0) + 1.0 return log_spec def __call__(self, raw_speech: Union[np.ndarray, list[float], list[np.ndarray], list[list[float]]], return_tensors: Optional[Union[str, TensorType]]=None, return_attention_mask: Optional[bool]=True, sampling_rate: Optional[int]=None, resample: bool=False, mask_audio: bool=False, **kwargs) -> BatchFeature: """ Main method to prepare one or several audio(s) for the model. Args: raw_speech (`np.ndarray`, `list[float]`, `list[np.ndarray]`, `list[list[float]]`): The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not stereo, i.e. single float per timestep. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*, default to `True`): Whether to return the attention mask. If left to the default, will return the attention mask according to the specific feature_extractor's default. [What are attention masks?](../glossary#attention-mask) <Tip> For TvltTransformer models, `attention_mask` should always be passed for batched inference, to avoid subtle bugs. </Tip> sampling_rate (`int`, *optional*): The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass `sampling_rate` at the forward call to prevent silent errors and allow automatic speech recognition pipeline. Current model supports sampling rate 16000 and 44100. resample (`bool`, *optional*, defaults to `False`): If the sampling rate is not matched, resample the input audio to match. mask_audio (`bool`, *optional*, defaults to `False`): Whether or not to mask input audio for MAE task. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **audio_values** -- Audio values to be fed to a model, of shape (batch_size, num_channels, height, width). - **audio_mask** -- Audio masks to be fed to a model, of shape (batch_size, num_audio_patches). """ if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError(f'This feature extractor is set to support sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with {self.sampling_rate} and not {sampling_rate}.') else: logger.warning('It is strongly recommended to pass the `sampling_rate` argument to this function. Failing to do so can result in silent errors that might be hard to debug.') is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1 if is_batched_numpy and len(raw_speech.shape) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}') is_batched = is_batched_numpy or (isinstance(raw_speech, (list, tuple)) and isinstance(raw_speech[0], (np.ndarray, tuple, list))) if is_batched: raw_speech = [np.asarray([speech], dtype=np.float32).T for speech in raw_speech] elif not is_batched and (not isinstance(raw_speech, np.ndarray)): raw_speech = np.asarray(raw_speech, dtype=np.float32) elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64): raw_speech = raw_speech.astype(np.float32) if not is_batched: raw_speech = [np.asarray([raw_speech]).T] audio_features = [self._np_extract_fbank_features(waveform.squeeze()).T[:self.spectrogram_length] for waveform in raw_speech] if isinstance(audio_features[0], list): audio_features = [np.asarray(feature, dtype=np.float32) for feature in audio_features] max_patch_len = max([ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len for feature in audio_features]) if return_attention_mask: audio_mask = [ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len) * [0] for feature in audio_features] audio_mask = np.array(audio_mask).astype(np.float32) max_time_len = max_patch_len // self.freq_len * self.patch_size[0] padded_audio_features = np.ones([len(audio_features), 1, max_time_len, self.feature_size]).astype(np.float32) padded_audio_features = padded_audio_features * self.padding_value for i in range(len(audio_features)): feature = audio_features[i] padded_audio_features[i, :, :feature.shape[0], :] = feature if return_attention_mask: data = {'audio_values': padded_audio_features, 'audio_mask': audio_mask} else: data = {'audio_values': padded_audio_features} encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) return encoded_inputs
class TvltFeatureExtractor(SequenceFeatureExtractor): ''' Constructs a TVLT audio feature extractor. This feature extractor can be used to prepare audios for the model. This feature extractor inherits from [`FeatureExtractionMixin`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: spectrogram_length (`dict[str, int]` *optional*, defaults to 2048): The time length of each audio spectrogram. num_channels (`int` *optional*, defaults to 1): Number of audio channels. patch_size (`list[int]` *optional*, defaults to `[16, 16]`): The patch size of audio patch embedding. feature_size (`int`, *optional*, defaults to 128): The frequency length of audio spectrogram. sampling_rate (`int`, *optional*, defaults to 44100): The sampling rate at which the audio files should be digitalized expressed in Hertz (Hz). hop_length_to_sampling_rate (`int`, *optional*, defaults to 86): Hop length is length of the overlapping windows for the STFT used to obtain the Mel Frequency coefficients. For example, with sampling rate 44100, the hop length is 512, with 44100 / 512 = 86 n_fft (`int`, *optional*, defaults to 2048): Size of the Fourier transform. padding_value (`float`, *optional*, defaults to 0.0): Padding value used to pad the audio. Should correspond to silences. ''' def __init__(self, spectrogram_length=2048, num_channels=1, patch_size=[16, 16], feature_size=128, sampling_rate=44100, hop_length_to_sampling_rate=86, n_fft=2048, padding_value=0.0, **kwargs): pass def _np_extract_fbank_features(self, waveform: np.ndarray) -> np.ndarray: ''' Compute the log-mel spectrogram of the provided audio, gives similar results to Whisper's original torch implementation with 1e-5 tolerance. ''' pass def __call__(self, raw_speech: Union[np.ndarray, list[float], list[np.ndarray], list[list[float]]], return_tensors: Optional[Union[str, TensorType]]=None, return_attention_mask: Optional[bool]=True, sampling_rate: Optional[int]=None, resample: bool=False, mask_audio: bool=False, **kwargs) -> BatchFeature: ''' Main method to prepare one or several audio(s) for the model. Args: raw_speech (`np.ndarray`, `list[float]`, `list[np.ndarray]`, `list[list[float]]`): The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not stereo, i.e. single float per timestep. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*, default to `True`): Whether to return the attention mask. If left to the default, will return the attention mask according to the specific feature_extractor's default. [What are attention masks?](../glossary#attention-mask) <Tip> For TvltTransformer models, `attention_mask` should always be passed for batched inference, to avoid subtle bugs. </Tip> sampling_rate (`int`, *optional*): The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass `sampling_rate` at the forward call to prevent silent errors and allow automatic speech recognition pipeline. Current model supports sampling rate 16000 and 44100. resample (`bool`, *optional*, defaults to `False`): If the sampling rate is not matched, resample the input audio to match. mask_audio (`bool`, *optional*, defaults to `False`): Whether or not to mask input audio for MAE task. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **audio_values** -- Audio values to be fed to a model, of shape (batch_size, num_channels, height, width). - **audio_mask** -- Audio masks to be fed to a model, of shape (batch_size, num_audio_patches). ''' pass
4
3
57
5
38
14
5
0.57
1
10
1
0
3
9
3
20
201
22
115
45
91
66
52
25
48
12
3
2
14
1,995
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/tvlt/image_processing_tvlt.py
transformers.models.deprecated.tvlt.image_processing_tvlt.TvltImageProcessor
from ....utils import TensorType, logging from ....image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict import numpy as np from typing import Optional, Union from ....image_transforms import get_resize_output_image_size, resize, to_channel_dimension_format from ....image_utils import IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, is_valid_image, to_numpy_array, valid_images, validate_kwargs, validate_preprocess_arguments class TvltImageProcessor(BaseImageProcessor): """ Constructs a TVLT image processor. This processor can be used to prepare either videos or images for the model by converting images to 1-frame videos. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`): Size of the output image after resizing. The shortest edge of the image will be resized to `size["shortest_edge"]` while maintaining the aspect ratio of the original image. Can be overridden by `size` in the `preprocess` method. patch_size (`list[int]` *optional*, defaults to [16,16]): The patch size of image patch embedding. num_frames (`int` *optional*, defaults to 8): The maximum number of video frames. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the `preprocess` method. do_center_crop (`bool`, *optional*, defaults to `True`): Whether to center crop the image to the specified `crop_size`. Can be overridden by the `do_center_crop` parameter in the `preprocess` method. crop_size (`dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`): Size of the image after applying the center crop. Can be overridden by the `crop_size` parameter in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to 1/255): Defines the scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. """ model_input_names = ['pixel_values', 'pixel_mask', 'pixel_values_mixed', 'pixel_mask_mixed'] def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, patch_size: list[int]=[16, 16], num_frames: int=8, resample: PILImageResampling=PILImageResampling.BILINEAR, do_center_crop: bool=True, crop_size: Optional[dict[str, int]]=None, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=IMAGENET_STANDARD_MEAN, image_std: Optional[Union[float, list[float]]]=IMAGENET_STANDARD_STD, init_mask_generator=False, **kwargs) -> None: super().__init__(**kwargs) size = size if size is not None else {'shortest_edge': 224} size = get_size_dict(size, default_to_square=False) crop_size = crop_size if crop_size is not None else {'height': 224, 'width': 224} crop_size = get_size_dict(crop_size, param_name='crop_size') self.do_resize = do_resize self.size = size self.patch_size = patch_size self.num_frames = num_frames self.do_center_crop = do_center_crop self.crop_size = crop_size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self._valid_processor_keys = ['videos', 'do_resize', 'size', 'patch_size', 'num_frames', 'resample', 'do_center_crop', 'crop_size', 'do_rescale', 'rescale_factor', 'do_normalize', 'image_mean', 'image_std', 'is_mixed', 'return_tensors', 'data_format', 'input_data_format'] def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: """ Resize an image. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]`): Size of the output image. If `size` is of the form `{"height": h, "width": w}`, the output image will have the size `(h, w)`. If `size` is of the form `{"shortest_edge": s}`, the output image will have its shortest edge of length `s` while keeping the aspect ratio of the original image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): Resampling filter to use when resiizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ size = get_size_dict(size, default_to_square=False) if 'shortest_edge' in size: output_size = get_resize_output_image_size(image, size['shortest_edge'], default_to_square=False, input_data_format=input_data_format) elif 'height' in size and 'width' in size: output_size = (size['height'], size['width']) else: raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}") return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs) def _preprocess_image(self, image: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[dict[str, int]]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray: """Preprocesses a single image.""" validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample) image = to_numpy_array(image) if do_rescale and is_scaled_image(image): logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.') if input_data_format is None: input_data_format = infer_channel_dimension_format(image) if do_resize: image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) if do_center_crop: image = self.center_crop(image, size=crop_size, input_data_format=input_data_format) if do_rescale: image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) if do_normalize: image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) return image def preprocess(self, videos: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, patch_size: Optional[list[int]]=None, num_frames: Optional[int]=None, resample: Optional[PILImageResampling]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[dict[str, int]]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, is_mixed: bool=False, return_tensors: Optional[Union[str, TensorType]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> BatchFeature: """ Preprocess an videos or image or batch of videos or images. Args: videos (`ImageInput`): Images or videos to preprocess. Expects a single or batch of frames with pixel values ranging from 0 to 255. If passing in frames with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after applying resize. patch_size (`list[int]` *optional*, defaults to self.patch_size): The patch size of image patch embedding. num_frames (`int` *optional*, defaults to self.num_frames): The maximum number of video frames. resample (`PILImageResampling`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only has an effect if `do_resize` is set to `True`. do_center_crop (`bool`, *optional*, defaults to `self.do_centre_crop`): Whether to centre crop the image. crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`): Size of the image after applying the centre crop. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation. is_mixed (`bool`, *optional*): If the input video has negative samples. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the inferred channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **pixel_values** -- Pixel values to be fed to a model, of shape (batch_size, num_channels, height, width). - **pixel_mask** -- Pixel masks to be fed to a model, of shape (batch_size, num_pixel_patches). - **pixel_values_mixed** -- Pixel values with both positive or negative to be fed to a model, of shape (batch_size, num_channels, height, width). - **pixel_mask_mixed** -- Pixel masks with both positive or negative to be fed to a model, of shape (batch_size, num_pixel_patches). """ do_resize = do_resize if do_resize is not None else self.do_resize resample = resample if resample is not None else self.resample do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std size = size if size is not None else self.size size = get_size_dict(size, default_to_square=False) crop_size = crop_size if crop_size is not None else self.crop_size crop_size = get_size_dict(crop_size, param_name='crop_size') patch_size = patch_size if patch_size is not None else self.patch_size num_frames = num_frames if patch_size is not None else self.num_frames validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys) if not valid_images(videos): raise ValueError('Invalid image or video type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor') videos = make_batched(videos) for video in videos: if len(video) > self.num_frames: raise ValueError(f'number of frames must not be greater than the maximum frames of the model {self.num_frames}.') max_num_frames = max([len(video) for video in videos]) num_patches_per_image = (size['shortest_edge'] // patch_size[0]) ** 2 video_masks = np.array([len(video) * num_patches_per_image * [1] + (max_num_frames - len(video)) * num_patches_per_image * [0] for video in videos]) videos = [[self._preprocess_image(image=img, do_resize=do_resize, size=size, resample=resample, do_center_crop=do_center_crop, crop_size=crop_size, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, data_format=data_format, input_data_format=input_data_format) for img in video] for video in videos] if is_mixed: data = {'pixel_values_mixed': videos, 'pixel_mask_mixed': video_masks} else: data = {'pixel_values': videos, 'pixel_mask': video_masks} return BatchFeature(data=data, tensor_type=return_tensors)
class TvltImageProcessor(BaseImageProcessor): ''' Constructs a TVLT image processor. This processor can be used to prepare either videos or images for the model by converting images to 1-frame videos. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`): Size of the output image after resizing. The shortest edge of the image will be resized to `size["shortest_edge"]` while maintaining the aspect ratio of the original image. Can be overridden by `size` in the `preprocess` method. patch_size (`list[int]` *optional*, defaults to [16,16]): The patch size of image patch embedding. num_frames (`int` *optional*, defaults to 8): The maximum number of video frames. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the `preprocess` method. do_center_crop (`bool`, *optional*, defaults to `True`): Whether to center crop the image to the specified `crop_size`. Can be overridden by the `do_center_crop` parameter in the `preprocess` method. crop_size (`dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`): Size of the image after applying the center crop. Can be overridden by the `crop_size` parameter in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to 1/255): Defines the scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. ''' def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, patch_size: list[int]=[16, 16], num_frames: int=8, resample: PILImageResampling=PILImageResampling.BILINEAR, do_center_crop: bool=True, crop_size: Optional[dict[str, int]]=None, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=IMAGENET_STANDARD_MEAN, image_std: Optional[Union[float, list[float]]]=IMAGENET_STANDARD_STD, init_mask_generator=False, **kwargs) -> None: pass def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: ''' Resize an image. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]`): Size of the output image. If `size` is of the form `{"height": h, "width": w}`, the output image will have the size `(h, w)`. If `size` is of the form `{"shortest_edge": s}`, the output image will have its shortest edge of length `s` while keeping the aspect ratio of the original image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): Resampling filter to use when resiizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. ''' pass def _preprocess_image(self, image: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[dict[str, int]]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray: '''Preprocesses a single image.''' pass def preprocess(self, videos: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, patch_size: Optional[list[int]]=None, num_frames: Optional[int]=None, resample: Optional[PILImageResampling]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[dict[str, int]]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, is_mixed: bool=False, return_tensors: Optional[Union[str, TensorType]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> BatchFeature: ''' Preprocess an videos or image or batch of videos or images. Args: videos (`ImageInput`): Images or videos to preprocess. Expects a single or batch of frames with pixel values ranging from 0 to 255. If passing in frames with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after applying resize. patch_size (`list[int]` *optional*, defaults to self.patch_size): The patch size of image patch embedding. num_frames (`int` *optional*, defaults to self.num_frames): The maximum number of video frames. resample (`PILImageResampling`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only has an effect if `do_resize` is set to `True`. do_center_crop (`bool`, *optional*, defaults to `self.do_centre_crop`): Whether to centre crop the image. crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`): Size of the image after applying the centre crop. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation. is_mixed (`bool`, *optional*): If the input video has negative samples. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the inferred channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **pixel_values** -- Pixel values to be fed to a model, of shape (batch_size, num_channels, height, width). - **pixel_mask** -- Pixel masks to be fed to a model, of shape (batch_size, num_pixel_patches). - **pixel_values_mixed** -- Pixel values with both positive or negative to be fed to a model, of shape (batch_size, num_channels, height, width). - **pixel_mask_mixed** -- Pixel masks with both positive or negative to be fed to a model, of shape (batch_size, num_pixel_patches). ''' pass
5
4
78
6
52
20
8
0.57
1
8
2
0
4
13
4
24
366
32
213
84
149
121
75
25
70
17
3
2
30
1,996
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/tvlt/modeling_tvlt.py
transformers.models.deprecated.tvlt.modeling_tvlt.TvltAttention
from ....pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from torch import nn class TvltAttention(nn.Module): def __init__(self, config): super().__init__() self.attention = TvltSelfAttention(config) self.output = TvltSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices(heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads) self.attention.query = prune_linear_layer(self.attention.query, index) self.attention.key = prune_linear_layer(self.attention.key, index) self.attention.value = prune_linear_layer(self.attention.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads) self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False): self_outputs = self.attention(hidden_states, attention_mask, head_mask, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs
class TvltAttention(nn.Module): def __init__(self, config): pass def prune_heads(self, heads): pass def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False): pass
4
0
10
1
8
1
1
0.13
1
4
2
0
3
3
3
13
32
6
24
11
20
3
22
11
18
2
1
1
4
1,997
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/tvlt/modeling_tvlt.py
transformers.models.deprecated.tvlt.modeling_tvlt.TvltAudioEmbeddings
import torch from torch import nn class TvltAudioEmbeddings(nn.Module): """Construct the patch and position embeddings.""" def __init__(self, config): super().__init__() self.patch_embeddings = TvltAudioPatchEmbeddings(config) self.num_patches = self.patch_embeddings.num_patches self.type_embed_a = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) self.num_freq_patches = config.frequency_length // config.audio_patch_size[1] self.pos_embed_a = nn.Parameter(torch.zeros(1, self.num_patches // self.num_freq_patches, config.hidden_size)) self.freq_embed = nn.Parameter(torch.zeros(1, self.num_freq_patches, config.hidden_size)) self.num_freq_patches = config.frequency_length // config.audio_patch_size[1] self.config = config def forward(self, audio_values, attention_masks=None): embeddings = self.patch_embeddings(audio_values) num_time_patches = embeddings.size(1) // self.num_freq_patches embeddings += self.freq_embed.repeat(1, num_time_patches, 1) embeddings += torch.repeat_interleave(self.pos_embed_a[:, :num_time_patches], self.num_freq_patches, dim=1) embeddings += self.type_embed_a return (embeddings, attention_masks)
class TvltAudioEmbeddings(nn.Module): '''Construct the patch and position embeddings.''' def __init__(self, config): pass def forward(self, audio_values, attention_masks=None): pass
3
1
12
3
9
1
1
0.11
1
2
1
0
2
7
2
12
27
7
18
12
15
2
18
12
15
1
1
0
2
1,998
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/tvlt/modeling_tvlt.py
transformers.models.deprecated.tvlt.modeling_tvlt.TvltAudioPatchEmbeddings
import collections.abc from torch import nn import torch class TvltAudioPatchEmbeddings(nn.Module): """ This class turns `audio_values` of shape `(batch_size, num_channels, height, width)` into the initial `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a Transformer. """ def __init__(self, config): super().__init__() spectrogram_length, frequency_length, patch_size = (config.spectrogram_length, config.frequency_length, config.audio_patch_size) num_channels, hidden_size = (config.num_audio_channels, config.hidden_size) spectrogram_size = (spectrogram_length, frequency_length) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = spectrogram_size[1] // patch_size[1] * (spectrogram_size[0] // patch_size[0]) patch_shape = (spectrogram_size[0] // patch_size[0], spectrogram_size[1] // patch_size[1]) self.spectrogram_size = spectrogram_size self.patch_size = patch_size self.num_channels = num_channels self.num_patches = num_patches self.patch_shape = patch_shape self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size) def forward(self, audio_values: torch.Tensor) -> torch.Tensor: batch_size, num_channels, height, width = audio_values.shape if num_channels != self.num_channels: raise ValueError('Make sure that the channel dimension of the pixel values match with the one set in the configuration.') if height > self.spectrogram_size[0] or width != self.spectrogram_size[1]: raise ValueError(f"Input audio size ({height}*{width}) doesn't match model ({self.spectrogram_size[0]}*{self.spectrogram_size[1]}).") embeddings = self.projection(audio_values).flatten(2).transpose(1, 2) return embeddings
class TvltAudioPatchEmbeddings(nn.Module): ''' This class turns `audio_values` of shape `(batch_size, num_channels, height, width)` into the initial `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a Transformer. ''' def __init__(self, config): pass def forward(self, audio_values: torch.Tensor) -> torch.Tensor: pass
3
1
17
2
16
0
3
0.16
1
4
0
0
2
6
2
12
42
5
32
16
29
5
23
16
20
3
1
1
5
1,999
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/tvlt/modeling_tvlt.py
transformers.models.deprecated.tvlt.modeling_tvlt.TvltDecoder
from copy import deepcopy from torch import nn class TvltDecoder(nn.Module): def __init__(self, config): super().__init__() decoder_config = deepcopy(config) decoder_config.hidden_size = config.decoder_hidden_size decoder_config.num_hidden_layers = config.decoder_num_hidden_layers decoder_config.num_attention_heads = config.decoder_num_attention_heads decoder_config.intermediate_size = config.decoder_intermediate_size self.decoder_layers = nn.ModuleList([TvltLayer(decoder_config) for _ in range(config.decoder_num_hidden_layers)]) self.layernorm = nn.LayerNorm(config.decoder_hidden_size, eps=config.layer_norm_eps) self.gradient_checkpointing = False self.config = config def forward(self, hidden_states, output_attentions=False, output_hidden_states=False, return_dict=True): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.decoder_layers): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module(hidden_states, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) logits = self.layernorm(hidden_states) if not return_dict: return tuple((v for v in [logits, all_hidden_states, all_self_attentions] if v is not None)) return TvltDecoderOutput(logits=logits, hidden_states=all_hidden_states, attentions=all_self_attentions)
class TvltDecoder(nn.Module): def __init__(self, config): pass def forward(self, hidden_states, output_attentions=False, output_hidden_states=False, return_dict=True): pass
3
0
27
5
22
1
5
0.05
1
6
2
0
2
4
2
12
56
10
44
20
35
2
30
13
27
9
1
2
10